adapt latest device layer (#129)

This commit is contained in:
Yun Hsiao Wu 2021-02-04 15:08:04 +08:00 committed by GitHub
parent 554f375f46
commit 44c3e39b52
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14722 changed files with 2964501 additions and 10383 deletions

1
.gitignore vendored
View File

@ -1 +1,2 @@
**/.DS_Store
config.json

View File

@ -2,11 +2,15 @@
set(CC_EXTERNAL_LIBS)
set(CC_EXTERNAL_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/sources)
set(CC_EXTERNAL_PRIVATE_INCLUDES)
set(CC_EXTERNAL_PRIVATE_DEFINITIONS)
list(APPEND CC_EXTERNAL_PRIVATE_DEFINITIONS
TBB_USE_EXCEPTIONS=0 # no rtti for now
)
include(${CMAKE_CURRENT_LIST_DIR}/cmake/CocosExternalConfig.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/sources/CMakeLists.txt)
if(WINDOWS)
include(${CMAKE_CURRENT_LIST_DIR}/win32/CMakeLists.txt)
elseif(ANDROID)
@ -18,4 +22,3 @@ elseif(APPLE)
include(${CMAKE_CURRENT_LIST_DIR}/ios/CMakeLists.txt)
endif()
endif()

View File

@ -49,28 +49,28 @@ set_target_properties(z PROPERTIES
add_library(android_platform STATIC
${CMAKE_ANDROID_NDK}/sources/android/cpufeatures/cpu-features.c
)
target_include_directories(android_platform PUBLIC
target_include_directories(android_platform PUBLIC
${CMAKE_ANDROID_NDK}/sources/android/cpufeatures
${CMAKE_ANDROID_NDK}/sources/android/native_app_glue
)
## Settings from ${CMAKE_ANDROID_NDK}/sources/android/native_app_glue/Android.mk
# set_property(TARGET android_platform APPEND_STRING PROPERTY LINK_FLAGS "-u ANativeActivity_onCreate")
# target_link_libraries(android_platform PUBLIC
# target_link_libraries(android_platform PUBLIC
# android log dl
# )
set(se_libs_name)
if(USE_SE_V8)
add_library(v8_monolith STATIC IMPORTED GLOBAL)
set_target_properties(v8_monolith PROPERTIES
IMPORTED_LOCATION ${platform_spec_path}/v8/libv8_monolith.a
)
if(ANDROID_ABI STREQUAL "arm64-v8a" OR ANDROID_ABI STREQUAL "x86_64")
set_property(TARGET v8_monolith
set_property(TARGET v8_monolith
APPEND PROPERTY INTERFACE_COMPILE_DEFINITIONS V8_COMPRESS_POINTERS
)
endif()
@ -119,6 +119,19 @@ set_target_properties(glslang-default-resource-limits PROPERTIES
)
set(glslang_libs_name glslang OGLCompiler OSDependent SPIRV glslang-default-resource-limits)
add_library(tbb STATIC IMPORTED GLOBAL)
set_target_properties(tbb PROPERTIES
IMPORTED_LOCATION ${platform_spec_path}/libtbb_static.a
)
add_library(tbbmalloc STATIC IMPORTED GLOBAL)
set_target_properties(tbbmalloc PROPERTIES
IMPORTED_LOCATION ${platform_spec_path}/libtbbmalloc_static.a
)
add_library(tbbmalloc_proxy STATIC IMPORTED GLOBAL)
set_target_properties(tbbmalloc_proxy PROPERTIES
IMPORTED_LOCATION ${platform_spec_path}/libtbbmalloc_proxy_static.a
)
set(tbb_libs_name tbbmalloc_proxy tbbmalloc tbb)
list(APPEND CC_EXTERNAL_LIBS
freetype
@ -130,6 +143,7 @@ list(APPEND CC_EXTERNAL_LIBS
z
android_platform
${glslang_libs_name}
${tbb_libs_name}
)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
android/x86/libtbb_static.a Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -80,7 +80,7 @@ set_target_properties(mozglue PROPERTIES
set(se_libs_name)
if(USE_SE_V8)
list(APPEND se_libs_name
list(APPEND se_libs_name
v8_monolith
uv
)
@ -130,7 +130,21 @@ set_target_properties(spirv-cross-glsl PROPERTIES
)
set(spirv-cross_libs_name spirv-cross-core spirv-cross-glsl spirv-cross-msl)
list(APPEND CC_EXTERNAL_INCLUDES
add_library(tbb STATIC IMPORTED GLOBAL)
set_target_properties(tbb PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/libtbb_static.a
)
add_library(tbbmalloc STATIC IMPORTED GLOBAL)
set_target_properties(tbbmalloc PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/libtbbmalloc_static.a
)
add_library(tbbmalloc_proxy STATIC IMPORTED GLOBAL)
set_target_properties(tbbmalloc_proxy PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/libtbbmalloc_proxy_static.a
)
set(tbb_libs_name tbb tbbmalloc tbbmalloc_proxy)
list(APPEND CC_EXTERNAL_INCLUDES
${CMAKE_CURRENT_LIST_DIR}/include
)
@ -144,4 +158,5 @@ list(APPEND CC_EXTERNAL_LIBS
ssl
${glslang_libs_name}
${spirv-cross_libs_name}
${tbb_libs_name}
)

BIN
ios/libs/libtbb_static.a Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -69,20 +69,10 @@ set_target_properties(inspector PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/libinspector.a
)
add_library(EGL SHARED IMPORTED GLOBAL)
set_target_properties(EGL PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/libEGL.dylib
)
add_library(GLESv2 SHARED IMPORTED GLOBAL)
set_target_properties(GLESv2 PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/libGLESv2.dylib
)
set(se_libs_name)
if(USE_SE_V8)
list(APPEND se_libs_name
list(APPEND se_libs_name
v8_monolith
uv
)
@ -92,7 +82,7 @@ if(USE_SE_V8)
endif()
# if(USE_SOCKETS)
# list(APPEND CC_EXTERNAL_LIBS
# list(APPEND CC_EXTERNAL_LIBS
# websockets
# )
# endif()
@ -133,10 +123,22 @@ set_target_properties(spirv-cross-glsl PROPERTIES
)
set(spirv-cross_libs_name spirv-cross-core spirv-cross-glsl spirv-cross-msl)
add_library(tbb STATIC IMPORTED GLOBAL)
set_target_properties(tbb PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/libtbb_static.a
)
add_library(tbbmalloc STATIC IMPORTED GLOBAL)
set_target_properties(tbbmalloc PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/libtbbmalloc_static.a
)
add_library(tbbmalloc_proxy STATIC IMPORTED GLOBAL)
set_target_properties(tbbmalloc_proxy PROPERTIES
IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/libtbbmalloc_proxy_static.a
)
set(tbb_libs_name tbb tbbmalloc tbbmalloc_proxy)
list(APPEND CC_EXTERNAL_LIBS
freetype
EGL
GLESv2
jpeg
png
webp
@ -147,6 +149,7 @@ list(APPEND CC_EXTERNAL_LIBS
z
${glslang_libs_name}
${spirv-cross_libs_name}
${tbb_libs_name}
)
list(APPEND CC_EXTERNAL_INCLUDES

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
mac/libs/libtbb_static.a Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -16,7 +16,6 @@ set(CC_EXTERNAL_SROUCES
${CMAKE_CURRENT_LIST_DIR}/unzip/unzip.cpp
${CMAKE_CURRENT_LIST_DIR}/unzip/unzip.h
${CMAKE_CURRENT_LIST_DIR}/ConvertUTF/ConvertUTF.c
${CMAKE_CURRENT_LIST_DIR}/ConvertUTF/ConvertUTF.h
${CMAKE_CURRENT_LIST_DIR}/ConvertUTF/ConvertUTFWrapper.cpp
@ -44,7 +43,3 @@ elseif(APPLE)
include(${CMAKE_CURRENT_LIST_DIR}/SocketRocket/CMakeLists.txt)
endif()
endif()
list(APPEND CC_EXTERNAL_INCLUDES
${CMAKE_CURRENT_LIST_DIR}
)

View File

@ -0,0 +1,85 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/compressed_tracks.h"
#include "acl/core/error_result.h"
#include "acl/core/iallocator.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/compression/compression_settings.h"
#include "acl/compression/output_stats.h"
#include "acl/compression/track_array.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// Compresses a track array with uniform sampling.
//
// This compression algorithm is the simplest by far and as such it offers
// the fastest compression and decompression. Every sample is retained and
// every track has the same number of samples playing back at the same
// sample rate. This means that when we sample at a particular time within
// the clip, we can trivially calculate the offsets required to read the
// desired data. All the data is sorted in order to ensure all reads are
// as contiguous as possible for optimal cache locality during decompression.
//
// allocator: The allocator instance to use to allocate and free memory.
// track_list: The track list to compress.
// settings: The compression settings to use.
// out_compressed_tracks: The resulting compressed tracks. The caller owns the returned memory and must free it.
// out_stats: Stat output structure.
//////////////////////////////////////////////////////////////////////////
error_result compress_track_list(iallocator& allocator, const track_array& track_list, const compression_settings& settings,
compressed_tracks*& out_compressed_tracks, output_stats& out_stats);
//////////////////////////////////////////////////////////////////////////
// Compresses a transform track array and using its additive base and uniform sampling.
//
// This compression algorithm is the simplest by far and as such it offers
// the fastest compression and decompression. Every sample is retained and
// every track has the same number of samples playing back at the same
// sample rate. This means that when we sample at a particular time within
// the clip, we can trivially calculate the offsets required to read the
// desired data. All the data is sorted in order to ensure all reads are
// as contiguous as possible for optimal cache locality during decompression.
//
// allocator: The allocator instance to use to allocate and free memory.
// track_list: The track list to compress.
// settings: The compression settings to use.
// out_compressed_tracks: The resulting compressed tracks. The caller owns the returned memory and must free it.
// out_stats: Stat output structure.
//////////////////////////////////////////////////////////////////////////
error_result compress_track_list(iallocator& allocator, const track_array_qvvf& track_list, const compression_settings& settings,
const track_array_qvvf& additive_base_track_list, additive_clip_format8 additive_format,
compressed_tracks*& out_compressed_tracks, output_stats& out_stats);
}
#include "acl/compression/impl/compress.impl.h"
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,122 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include <cstdint>
#include <cstring>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
////////////////////////////////////////////////////////////////////////////////
// compression_level8 represents how aggressively we attempt to reduce the memory
// footprint. Higher levels will try more permutations and bit rates. The higher
// the level, the slower the compression but the smaller the memory footprint.
enum class compression_level8 : uint8_t
{
lowest = 0, // Same as medium for now
low = 1, // Same as medium for now
medium = 2,
high = 3,
highest = 4,
//lossless = 255, // Not implemented, reserved
};
//////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Returns a string representing the compression level.
// TODO: constexpr
inline const char* get_compression_level_name(compression_level8 level)
{
switch (level)
{
case compression_level8::lowest: return "lowest";
case compression_level8::low: return "low";
case compression_level8::medium: return "medium";
case compression_level8::high: return "high";
case compression_level8::highest: return "highest";
default: return "<Invalid>";
}
}
//////////////////////////////////////////////////////////////////////////
// Returns the compression level from its string representation.
inline bool get_compression_level(const char* level_name, compression_level8& out_level)
{
const char* level_lowest = "Lowest"; // ACL_DEPRECATED Legacy name, keep for backwards compatibility, remove in 3.0
const char* level_lowest_new = "lowest";
if (std::strncmp(level_name, level_lowest, std::strlen(level_lowest)) == 0
|| std::strncmp(level_name, level_lowest_new, std::strlen(level_lowest_new)) == 0)
{
out_level = compression_level8::lowest;
return true;
}
const char* level_low = "Low"; // ACL_DEPRECATED Legacy name, keep for backwards compatibility, remove in 3.0
const char* level_low_new = "low";
if (std::strncmp(level_name, level_low, std::strlen(level_low)) == 0
|| std::strncmp(level_name, level_low_new, std::strlen(level_low_new)) == 0)
{
out_level = compression_level8::low;
return true;
}
const char* level_medium = "Medium"; // ACL_DEPRECATED Legacy name, keep for backwards compatibility, remove in 3.0
const char* level_medium_new = "medium";
if (std::strncmp(level_name, level_medium, std::strlen(level_medium)) == 0
|| std::strncmp(level_name, level_medium_new, std::strlen(level_medium_new)) == 0)
{
out_level = compression_level8::medium;
return true;
}
const char* level_highest = "Highest"; // ACL_DEPRECATED Legacy name, keep for backwards compatibility, remove in 3.0
const char* level_highest_new = "highest";
if (std::strncmp(level_name, level_highest, std::strlen(level_highest)) == 0
|| std::strncmp(level_name, level_highest_new, std::strlen(level_highest_new)) == 0)
{
out_level = compression_level8::highest;
return true;
}
const char* level_high = "High"; // ACL_DEPRECATED Legacy name, keep for backwards compatibility, remove in 3.0
const char* level_high_new = "high";
if (std::strncmp(level_name, level_high, std::strlen(level_high)) == 0
|| std::strncmp(level_name, level_high_new, std::strlen(level_high_new)) == 0)
{
out_level = compression_level8::high;
return true;
}
return false;
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,146 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/error_result.h"
#include "acl/core/hash.h"
#include "acl/core/track_formats.h"
#include "acl/core/track_types.h"
#include "acl/core/range_reduction_types.h"
#include "acl/compression/compression_level.h"
#include "acl/compression/transform_error_metrics.h"
#include <rtm/scalarf.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// Encapsulates all the compression settings related to segmenting.
// Segmenting ensures that large clips are split into smaller segments and
// compressed independently to allow a smaller memory footprint as well as
// faster compression and decompression.
// See also: https://nfrechette.github.io/2016/11/10/anim_compression_uniform_segmenting/
struct segmenting_settings
{
//////////////////////////////////////////////////////////////////////////
// How many samples to try and fit in our segments
// Defaults to '16'
uint32_t ideal_num_samples = 16;
//////////////////////////////////////////////////////////////////////////
// Maximum number of samples per segment
// Defaults to '31'
uint32_t max_num_samples = 31;
//////////////////////////////////////////////////////////////////////////
// Calculates a hash from the internal state to uniquely identify a configuration.
uint32_t get_hash() const;
//////////////////////////////////////////////////////////////////////////
// Checks if everything is valid and if it isn't, returns an error string.
// Returns nullptr if the settings are valid.
error_result is_valid() const;
};
//////////////////////////////////////////////////////////////////////////
// Encapsulates all the compression settings.
struct compression_settings
{
//////////////////////////////////////////////////////////////////////////
// The compression level determines how aggressively we attempt to reduce the memory
// footprint. Higher levels will try more permutations and bit rates. The higher
// the level, the slower the compression but the smaller the memory footprint.
// Transform tracks only.
compression_level8 level = compression_level8::low;
//////////////////////////////////////////////////////////////////////////
// The rotation, translation, and scale formats to use. See functions get_rotation_format(..) and get_vector_format(..)
// Defaults to raw: 'quatf_full' and 'vector3f_full'
// Transform tracks only.
rotation_format8 rotation_format = rotation_format8::quatf_full;
vector_format8 translation_format = vector_format8::vector3f_full;
vector_format8 scale_format = vector_format8::vector3f_full;
//////////////////////////////////////////////////////////////////////////
// Segmenting settings, if used
// Transform tracks only.
segmenting_settings segmenting;
//////////////////////////////////////////////////////////////////////////
// The error metric to use.
// Defaults to 'null', this value must be set manually!
// Transform tracks only.
itransform_error_metric* error_metric = nullptr;
//////////////////////////////////////////////////////////////////////////
// Whether to include the optional metadata for the track list name
// Defaults to 'false'
bool include_track_list_name = false;
//////////////////////////////////////////////////////////////////////////
// Whether to include the optional metadata for track names
// Defaults to 'false'
bool include_track_names = false;
//////////////////////////////////////////////////////////////////////////
// Whether to include the optional metadata for parent track indices
// Transform tracks only
// Defaults to 'false'
bool include_parent_track_indices = false;
//////////////////////////////////////////////////////////////////////////
// Whether to include the optional metadata for track descriptions
// For transforms, also enables the parent track indices metadata
// Defaults to 'false'
bool include_track_descriptions = false;
//////////////////////////////////////////////////////////////////////////
// Calculates a hash from the internal state to uniquely identify a configuration.
uint32_t get_hash() const;
//////////////////////////////////////////////////////////////////////////
// Checks if everything is valid and if it isn't, returns an error string.
error_result is_valid() const;
};
//////////////////////////////////////////////////////////////////////////
// Returns raw compression settings. No compression is performed and
// samples are all retained with full precision.
compression_settings get_raw_compression_settings();
//////////////////////////////////////////////////////////////////////////
// Returns the recommended and default compression settings. These have
// been tested in a wide range of scenarios and perform best overall.
compression_settings get_default_compression_settings();
}
#include "acl/compression/impl/compression_settings.impl.h"
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,54 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2020 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/compression/track_array.h"
#include "acl/core/compressed_tracks.h"
#include "acl/core/error_result.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/iallocator.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// Convert a track array instance into a raw compressed tracks instance.
// This is a lossless process.
//////////////////////////////////////////////////////////////////////////
error_result convert_track_list(iallocator& allocator, const track_array& track_list, compressed_tracks*& out_compressed_tracks);
//////////////////////////////////////////////////////////////////////////
// Convert a compressed tracks instance into a track array instance.
// This is a lossless process if all the metadata is present.
//////////////////////////////////////////////////////////////////////////
error_result convert_track_list(iallocator& allocator, const compressed_tracks& tracks, track_array& out_track_list);
}
#include "acl/compression/impl/convert.impl.h"
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,259 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2020 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/error.h"
#include "acl/core/iallocator.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/compression/impl/clip_context.h"
#include "acl/compression/impl/segment_context.h"
#include <cstdint>
#include <functional>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
inline void get_num_sub_tracks(const SegmentContext& segment,
const std::function<bool(animation_track_type8 group_type, uint32_t bone_index)>& group_filter_action,
uint32_t& out_num_rotation_sub_tracks, uint32_t& out_num_translation_sub_tracks, uint32_t& out_num_scale_sub_tracks)
{
uint32_t num_rotation_sub_tracks = 0;
uint32_t num_translation_sub_tracks = 0;
uint32_t num_scale_sub_tracks = 0;
for (uint32_t bone_index = 0; bone_index < segment.num_bones; ++bone_index)
{
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
if (bone_stream.output_index == k_invalid_track_index)
continue; // Stripped
if (group_filter_action(animation_track_type8::rotation, bone_index))
num_rotation_sub_tracks++;
if (group_filter_action(animation_track_type8::translation, bone_index))
num_translation_sub_tracks++;
if (group_filter_action(animation_track_type8::scale, bone_index))
num_scale_sub_tracks++;
}
out_num_rotation_sub_tracks = num_rotation_sub_tracks;
out_num_translation_sub_tracks = num_translation_sub_tracks;
out_num_scale_sub_tracks = num_scale_sub_tracks;
}
inline void get_num_animated_sub_tracks(const SegmentContext& segment,
uint32_t& out_num_animated_rotation_sub_tracks, uint32_t& out_num_animated_translation_sub_tracks, uint32_t& out_num_animated_scale_sub_tracks)
{
const auto animated_group_filter_action = [&](animation_track_type8 group_type, uint32_t bone_index)
{
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
if (group_type == animation_track_type8::rotation)
return !bone_stream.is_rotation_constant;
else if (group_type == animation_track_type8::translation)
return !bone_stream.is_translation_constant;
else
return !bone_stream.is_scale_constant;
};
get_num_sub_tracks(segment, animated_group_filter_action, out_num_animated_rotation_sub_tracks, out_num_animated_translation_sub_tracks, out_num_animated_scale_sub_tracks);
}
inline animation_track_type8* calculate_sub_track_groups(const SegmentContext& segment, const uint32_t* output_bone_mapping, uint32_t num_output_bones, uint32_t& out_num_groups,
const std::function<bool(animation_track_type8 group_type, uint32_t bone_index)>& group_filter_action)
{
uint32_t num_rotation_sub_tracks = 0;
uint32_t num_translation_sub_tracks = 0;
uint32_t num_scale_sub_tracks = 0;
get_num_sub_tracks(segment, group_filter_action, num_rotation_sub_tracks, num_translation_sub_tracks, num_scale_sub_tracks);
const uint32_t num_rotation_groups = (num_rotation_sub_tracks + 3) / 4;
const uint32_t num_translation_groups = (num_translation_sub_tracks + 3) / 4;
const uint32_t num_scale_groups = (num_scale_sub_tracks + 3) / 4;
const uint32_t num_groups = num_rotation_groups + num_translation_groups + num_scale_groups;
animation_track_type8* sub_track_groups = allocate_type_array<animation_track_type8>(*segment.clip->allocator, num_groups);
std::memset(sub_track_groups, 0xFF, num_groups * sizeof(animation_track_type8));
// Simulate reading in groups of 4
uint32_t num_cached_rotations = 0;
uint32_t num_left_rotations = num_rotation_sub_tracks;
uint32_t num_cached_translations = 0;
uint32_t num_left_translations = num_translation_sub_tracks;
uint32_t num_cached_scales = 0;
uint32_t num_left_scales = num_scale_sub_tracks;
uint32_t current_group_index = 0;
for (uint32_t output_index = 0; output_index < num_output_bones; ++output_index)
{
if ((output_index % 4) == 0)
{
if (num_cached_rotations < 4 && num_left_rotations != 0)
{
sub_track_groups[current_group_index++] = animation_track_type8::rotation;
const uint32_t num_unpacked = std::min<uint32_t>(num_left_rotations, 4);
num_left_rotations -= num_unpacked;
num_cached_rotations += num_unpacked;
}
if (num_cached_translations < 4 && num_left_translations != 0)
{
sub_track_groups[current_group_index++] = animation_track_type8::translation;
const uint32_t num_unpacked = std::min<uint32_t>(num_left_translations, 4);
num_left_translations -= num_unpacked;
num_cached_translations += num_unpacked;
}
if (num_cached_scales < 4 && num_left_scales != 0)
{
sub_track_groups[current_group_index++] = animation_track_type8::scale;
const uint32_t num_unpacked = std::min<uint32_t>(num_left_scales, 4);
num_left_scales -= num_unpacked;
num_cached_scales += num_unpacked;
}
}
const uint32_t bone_index = output_bone_mapping[output_index];
if (group_filter_action(animation_track_type8::rotation, bone_index))
num_cached_rotations--; // Consumed
if (group_filter_action(animation_track_type8::translation, bone_index))
num_cached_translations--; // Consumed
if (group_filter_action(animation_track_type8::scale, bone_index))
num_cached_scales--; // Consumed
}
ACL_ASSERT(current_group_index == num_groups, "Unexpected number of groups written");
out_num_groups = num_groups;
return sub_track_groups;
}
inline void group_writer(const SegmentContext& segment, const uint32_t* output_bone_mapping, uint32_t num_output_bones,
const std::function<bool(animation_track_type8 group_type, uint32_t bone_index)>& group_filter_action,
const std::function<void(animation_track_type8 group_type, uint32_t group_size, uint32_t bone_index)>& group_entry_action,
const std::function<void(animation_track_type8 group_type, uint32_t group_size)>& group_flush_action)
{
uint32_t num_groups = 0;
animation_track_type8* sub_track_groups = calculate_sub_track_groups(segment, output_bone_mapping, num_output_bones, num_groups, group_filter_action);
uint32_t group_size = 0;
uint32_t rotation_output_index = 0;
uint32_t translation_output_index = 0;
uint32_t scale_output_index = 0;
for (uint32_t group_index = 0; group_index < num_groups; ++group_index)
{
const animation_track_type8 group_type = sub_track_groups[group_index];
if (group_type == animation_track_type8::rotation)
{
for (; group_size < 4 && rotation_output_index < num_output_bones; ++rotation_output_index)
{
const uint32_t bone_index = output_bone_mapping[rotation_output_index];
if (group_filter_action(animation_track_type8::rotation, bone_index))
group_entry_action(group_type, group_size++, bone_index);
}
}
else if (group_type == animation_track_type8::translation)
{
for (; group_size < 4 && translation_output_index < num_output_bones; ++translation_output_index)
{
const uint32_t bone_index = output_bone_mapping[translation_output_index];
if (group_filter_action(animation_track_type8::translation, bone_index))
group_entry_action(group_type, group_size++, bone_index);
}
}
else // scale
{
for (; group_size < 4 && scale_output_index < num_output_bones; ++scale_output_index)
{
const uint32_t bone_index = output_bone_mapping[scale_output_index];
if (group_filter_action(animation_track_type8::scale, bone_index))
group_entry_action(group_type, group_size++, bone_index);
}
}
ACL_ASSERT(group_size != 0, "Group cannot be empty");
// Group full or we ran out of tracks, write it out and move onto to the next group
group_flush_action(group_type, group_size);
group_size = 0;
}
deallocate_type_array(*segment.clip->allocator, sub_track_groups, num_groups);
}
inline void animated_group_writer(const SegmentContext& segment, const uint32_t* output_bone_mapping, uint32_t num_output_bones,
const std::function<bool(animation_track_type8 group_type, uint32_t bone_index)>& group_filter_action,
const std::function<void(animation_track_type8 group_type, uint32_t group_size, uint32_t bone_index)>& group_entry_action,
const std::function<void(animation_track_type8 group_type, uint32_t group_size)>& group_flush_action)
{
const auto animated_group_filter_action = [&](animation_track_type8 group_type, uint32_t bone_index)
{
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
if (group_type == animation_track_type8::rotation)
return !bone_stream.is_rotation_constant && group_filter_action(group_type, bone_index);
else if (group_type == animation_track_type8::translation)
return !bone_stream.is_translation_constant && group_filter_action(group_type, bone_index);
else
return !bone_stream.is_scale_constant && group_filter_action(group_type, bone_index);
};
group_writer(segment, output_bone_mapping, num_output_bones, animated_group_filter_action, group_entry_action, group_flush_action);
}
inline void constant_group_writer(const SegmentContext& segment, const uint32_t* output_bone_mapping, uint32_t num_output_bones,
const std::function<void(animation_track_type8 group_type, uint32_t group_size, uint32_t bone_index)>& group_entry_action,
const std::function<void(animation_track_type8 group_type, uint32_t group_size)>& group_flush_action)
{
const auto constant_group_filter_action = [&](animation_track_type8 group_type, uint32_t bone_index)
{
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
if (group_type == animation_track_type8::rotation)
return !bone_stream.is_rotation_default && bone_stream.is_rotation_constant;
else if (group_type == animation_track_type8::translation)
return !bone_stream.is_translation_default && bone_stream.is_translation_constant;
else
return !bone_stream.is_scale_default && bone_stream.is_scale_constant;
};
group_writer(segment, output_bone_mapping, num_output_bones, constant_group_filter_action, group_entry_action, group_flush_action);
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,393 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/additive_utils.h"
#include "acl/core/bitset.h"
#include "acl/core/iallocator.h"
#include "acl/core/iterator.h"
#include "acl/core/error.h"
#include "acl/core/track_formats.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/compression/compression_settings.h"
#include "acl/compression/impl/segment_context.h"
#include <rtm/quatf.h>
#include <rtm/vector4f.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
//////////////////////////////////////////////////////////////////////////
// Simple iterator utility class to allow easy looping
class BoneChainIterator
{
public:
BoneChainIterator(const uint32_t* bone_chain, bitset_description bone_chain_desc, uint32_t bone_index, uint32_t offset)
: m_bone_chain(bone_chain)
, m_bone_chain_desc(bone_chain_desc)
, m_bone_index(bone_index)
, m_offset(offset)
{}
BoneChainIterator& operator++()
{
ACL_ASSERT(m_offset <= m_bone_index, "Cannot increment the iterator, it is no longer valid");
// Skip the current bone
m_offset++;
// Iterate until we find the next bone part of the chain or until we reach the end of the chain
// TODO: Use clz or similar to find the next set bit starting at the current index
while (m_offset < m_bone_index && !bitset_test(m_bone_chain, m_bone_chain_desc, m_offset))
m_offset++;
return *this;
}
uint32_t operator*() const
{
ACL_ASSERT(m_offset <= m_bone_index, "Returned bone index doesn't belong to the bone chain");
ACL_ASSERT(bitset_test(m_bone_chain, m_bone_chain_desc, m_offset), "Returned bone index doesn't belong to the bone chain");
return m_offset;
}
// We only compare the offset in the bone chain. Two iterators on the same bone index
// from two different or equal chains will be equal.
bool operator==(const BoneChainIterator& other) const { return m_offset == other.m_offset; }
bool operator!=(const BoneChainIterator& other) const { return m_offset != other.m_offset; }
private:
const uint32_t* m_bone_chain;
bitset_description m_bone_chain_desc;
uint32_t m_bone_index;
uint32_t m_offset;
};
//////////////////////////////////////////////////////////////////////////
// Simple bone chain container to allow easy looping
//
// A bone chain allows looping over all bones up to a specific bone starting
// at the root bone.
//////////////////////////////////////////////////////////////////////////
struct BoneChain
{
BoneChain(const uint32_t* bone_chain, bitset_description bone_chain_desc, uint32_t bone_index)
: m_bone_chain(bone_chain)
, m_bone_chain_desc(bone_chain_desc)
, m_bone_index(bone_index)
{
// We don't know where this bone chain starts, find the root bone
// TODO: Use clz or similar to find the next set bit starting at the current index
uint32_t root_index = 0;
while (!bitset_test(bone_chain, bone_chain_desc, root_index))
root_index++;
m_root_index = root_index;
}
acl_impl::BoneChainIterator begin() const { return acl_impl::BoneChainIterator(m_bone_chain, m_bone_chain_desc, m_bone_index, m_root_index); }
acl_impl::BoneChainIterator end() const { return acl_impl::BoneChainIterator(m_bone_chain, m_bone_chain_desc, m_bone_index, m_bone_index + 1); }
const uint32_t* m_bone_chain;
bitset_description m_bone_chain_desc;
uint32_t m_root_index;
uint32_t m_bone_index;
};
struct transform_metadata
{
const uint32_t* transform_chain;
uint32_t parent_index;
float precision;
float shell_distance;
};
struct clip_context
{
SegmentContext* segments;
BoneRanges* ranges;
transform_metadata* metadata;
uint32_t* leaf_transform_chains;
uint32_t num_segments;
uint32_t num_bones;
uint32_t num_samples;
float sample_rate;
float duration;
bool are_rotations_normalized;
bool are_translations_normalized;
bool are_scales_normalized;
bool has_scale;
bool has_additive_base;
additive_clip_format8 additive_format;
uint32_t num_leaf_transforms;
iallocator* allocator = nullptr; // Never null if the context is initialized
// Stat tracking
uint32_t decomp_touched_bytes;
uint32_t decomp_touched_cache_lines;
//////////////////////////////////////////////////////////////////////////
bool is_initialized() const { return allocator != nullptr; }
iterator<SegmentContext> segment_iterator() { return iterator<SegmentContext>(segments, num_segments); }
const_iterator<SegmentContext> segment_iterator() const { return const_iterator<SegmentContext>(segments, num_segments); }
BoneChain get_bone_chain(uint32_t bone_index) const
{
ACL_ASSERT(bone_index < num_bones, "Invalid bone index: %u >= %u", bone_index, num_bones);
const transform_metadata& meta = metadata[bone_index];
return BoneChain(meta.transform_chain, bitset_description::make_from_num_bits(num_bones), bone_index);
}
};
inline bool initialize_clip_context(iallocator& allocator, const track_array_qvvf& track_list, const compression_settings& settings, additive_clip_format8 additive_format, clip_context& out_clip_context)
{
const uint32_t num_transforms = track_list.get_num_tracks();
const uint32_t num_samples = track_list.get_num_samples_per_track();
const float sample_rate = track_list.get_sample_rate();
ACL_ASSERT(num_transforms > 0, "Track array has no tracks!");
ACL_ASSERT(num_samples > 0, "Track array has no samples!");
// Create a single segment with the whole clip
out_clip_context.segments = allocate_type_array<SegmentContext>(allocator, 1);
out_clip_context.ranges = nullptr;
out_clip_context.metadata = allocate_type_array<transform_metadata>(allocator, num_transforms);
out_clip_context.leaf_transform_chains = nullptr;
out_clip_context.num_segments = 1;
out_clip_context.num_bones = num_transforms;
out_clip_context.num_samples = num_samples;
out_clip_context.sample_rate = sample_rate;
out_clip_context.duration = track_list.get_duration();
out_clip_context.are_rotations_normalized = false;
out_clip_context.are_translations_normalized = false;
out_clip_context.are_scales_normalized = false;
out_clip_context.has_additive_base = additive_format != additive_clip_format8::none;
out_clip_context.additive_format = additive_format;
out_clip_context.num_leaf_transforms = 0;
out_clip_context.allocator = &allocator;
bool has_scale = false;
bool are_samples_valid = true;
const rtm::vector4f default_scale = get_default_scale(additive_format);
SegmentContext& segment = out_clip_context.segments[0];
BoneStreams* bone_streams = allocate_type_array<BoneStreams>(allocator, num_transforms);
for (uint32_t transform_index = 0; transform_index < num_transforms; ++transform_index)
{
const track_qvvf& track = track_list[transform_index];
const track_desc_transformf& desc = track.get_description();
BoneStreams& bone_stream = bone_streams[transform_index];
bone_stream.segment = &segment;
bone_stream.bone_index = transform_index;
bone_stream.parent_bone_index = desc.parent_index;
bone_stream.output_index = desc.output_index;
bone_stream.rotations = RotationTrackStream(allocator, num_samples, sizeof(rtm::quatf), sample_rate, rotation_format8::quatf_full);
bone_stream.translations = TranslationTrackStream(allocator, num_samples, sizeof(rtm::vector4f), sample_rate, vector_format8::vector3f_full);
bone_stream.scales = ScaleTrackStream(allocator, num_samples, sizeof(rtm::vector4f), sample_rate, vector_format8::vector3f_full);
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
const rtm::qvvf& transform = track[sample_index];
// If we request raw data and we are already normalized, retain the original value
// otherwise we normalize for safety
rtm::quatf rotation;
if (settings.rotation_format != rotation_format8::quatf_full || !rtm::quat_is_normalized(transform.rotation))
rotation = rtm::quat_normalize(transform.rotation);
else
rotation = transform.rotation;
are_samples_valid &= rtm::quat_is_finite(rotation);
are_samples_valid &= rtm::vector_is_finite3(transform.translation);
are_samples_valid &= rtm::vector_is_finite3(transform.scale);
bone_stream.rotations.set_raw_sample(sample_index, rotation);
bone_stream.translations.set_raw_sample(sample_index, transform.translation);
bone_stream.scales.set_raw_sample(sample_index, transform.scale);
}
{
const rtm::qvvf& first_transform = track[0];
const rtm::quatf first_rotation = rtm::quat_normalize(first_transform.rotation);
// If we request raw data, use a 0.0 threshold for safety
const float constant_rotation_threshold_angle = settings.rotation_format != rotation_format8::quatf_full ? desc.constant_rotation_threshold_angle : 0.0F;
const float constant_translation_threshold = settings.translation_format != vector_format8::vector3f_full ? desc.constant_translation_threshold : 0.0F;
const float constant_scale_threshold = settings.scale_format != vector_format8::vector3f_full ? desc.constant_scale_threshold : 0.0F;
bone_stream.is_rotation_constant = num_samples == 1;
bone_stream.is_rotation_default = bone_stream.is_rotation_constant && rtm::quat_near_identity(first_rotation, constant_rotation_threshold_angle);
bone_stream.is_translation_constant = num_samples == 1;
bone_stream.is_translation_default = bone_stream.is_translation_constant && rtm::vector_all_near_equal3(first_transform.translation, rtm::vector_zero(), constant_translation_threshold);
bone_stream.is_scale_constant = num_samples == 1;
bone_stream.is_scale_default = bone_stream.is_scale_constant && rtm::vector_all_near_equal3(first_transform.scale, default_scale, constant_scale_threshold);
}
has_scale |= !bone_stream.is_scale_default;
transform_metadata& metadata = out_clip_context.metadata[transform_index];
metadata.transform_chain = nullptr;
metadata.parent_index = desc.parent_index;
metadata.precision = desc.precision;
metadata.shell_distance = desc.shell_distance;
}
out_clip_context.has_scale = has_scale;
out_clip_context.decomp_touched_bytes = 0;
out_clip_context.decomp_touched_cache_lines = 0;
segment.bone_streams = bone_streams;
segment.clip = &out_clip_context;
segment.ranges = nullptr;
segment.num_samples = num_samples;
segment.num_bones = num_transforms;
segment.clip_sample_offset = 0;
segment.segment_index = 0;
segment.distribution = SampleDistribution8::Uniform;
segment.are_rotations_normalized = false;
segment.are_translations_normalized = false;
segment.are_scales_normalized = false;
segment.animated_pose_rotation_bit_size = 0;
segment.animated_pose_translation_bit_size = 0;
segment.animated_pose_scale_bit_size = 0;
segment.animated_pose_bit_size = 0;
segment.animated_data_size = 0;
segment.range_data_size = 0;
segment.total_header_size = 0;
// Initialize our hierarchy information
{
// Calculate which bones are leaf bones that have no children
bitset_description bone_bitset_desc = bitset_description::make_from_num_bits(num_transforms);
uint32_t* is_leaf_bitset = allocate_type_array<uint32_t>(allocator, bone_bitset_desc.get_size());
bitset_reset(is_leaf_bitset, bone_bitset_desc, false);
// By default and if we find a child, we'll mark it as non-leaf
bitset_set_range(is_leaf_bitset, bone_bitset_desc, 0, num_transforms, true);
#if defined(ACL_HAS_ASSERT_CHECKS)
uint32_t num_root_bones = 0;
#endif
// Move and validate the input data
for (uint32_t transform_index = 0; transform_index < num_transforms; ++transform_index)
{
const transform_metadata& metadata = out_clip_context.metadata[transform_index];
const bool is_root = metadata.parent_index == k_invalid_track_index;
// If we have a parent, mark it as not being a leaf bone (it has at least one child)
if (!is_root)
bitset_set(is_leaf_bitset, bone_bitset_desc, metadata.parent_index, false);
#if defined(ACL_HAS_ASSERT_CHECKS)
if (is_root)
num_root_bones++;
#endif
}
const uint32_t num_leaf_transforms = bitset_count_set_bits(is_leaf_bitset, bone_bitset_desc);
out_clip_context.num_leaf_transforms = num_leaf_transforms;
uint32_t* leaf_transform_chains = allocate_type_array<uint32_t>(allocator, size_t(num_leaf_transforms) * bone_bitset_desc.get_size());
out_clip_context.leaf_transform_chains = leaf_transform_chains;
uint32_t leaf_index = 0;
for (uint32_t transform_index = 0; transform_index < num_transforms; ++transform_index)
{
if (!bitset_test(is_leaf_bitset, bone_bitset_desc, transform_index))
continue; // Skip non-leaf bones
uint32_t* bone_chain = leaf_transform_chains + (leaf_index * bone_bitset_desc.get_size());
bitset_reset(bone_chain, bone_bitset_desc, false);
uint32_t chain_bone_index = transform_index;
while (chain_bone_index != k_invalid_track_index)
{
bitset_set(bone_chain, bone_bitset_desc, chain_bone_index, true);
transform_metadata& metadata = out_clip_context.metadata[chain_bone_index];
// We assign a bone chain the first time we find a bone that isn't part of one already
if (metadata.transform_chain == nullptr)
metadata.transform_chain = bone_chain;
chain_bone_index = metadata.parent_index;
}
leaf_index++;
}
ACL_ASSERT(num_root_bones > 0, "No root bone found. The root bones must have a parent index = 0xFFFF");
ACL_ASSERT(leaf_index == num_leaf_transforms, "Invalid number of leaf bone found");
deallocate_type_array(allocator, is_leaf_bitset, bone_bitset_desc.get_size());
}
return are_samples_valid;
}
inline void destroy_clip_context(clip_context& context)
{
if (context.allocator == nullptr)
return; // Not initialized
iallocator& allocator = *context.allocator;
for (SegmentContext& segment : context.segment_iterator())
destroy_segment_context(allocator, segment);
deallocate_type_array(allocator, context.segments, context.num_segments);
deallocate_type_array(allocator, context.ranges, context.num_bones);
deallocate_type_array(allocator, context.metadata, context.num_bones);
bitset_description bone_bitset_desc = bitset_description::make_from_num_bits(context.num_bones);
deallocate_type_array(allocator, context.leaf_transform_chains, size_t(context.num_leaf_transforms) * bone_bitset_desc.get_size());
}
constexpr bool segment_context_has_scale(const SegmentContext& segment) { return segment.clip->has_scale; }
constexpr bool bone_streams_has_scale(const BoneStreams& bone_streams) { return segment_context_has_scale(*bone_streams.segment); }
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,156 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/iallocator.h"
#include "acl/core/track_formats.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/error.h"
#include "acl/compression/impl/clip_context.h"
#include <rtm/quatf.h>
#include <rtm/vector4f.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
inline bool is_rotation_track_constant(const RotationTrackStream& rotations, float threshold_angle)
{
// Calculating the average rotation and comparing every rotation in the track to it
// to determine if we are within the threshold seems overkill. We can't use the min/max for the range
// either because neither of those represents a valid rotation. Instead we grab
// the first rotation, and compare everything else to it.
auto sample_to_quat = [](const RotationTrackStream& track, uint32_t sample_index)
{
const rtm::vector4f rotation = track.get_raw_sample<rtm::vector4f>(sample_index);
switch (track.get_rotation_format())
{
case rotation_format8::quatf_full:
return rtm::vector_to_quat(rotation);
case rotation_format8::quatf_drop_w_full:
case rotation_format8::quatf_drop_w_variable:
return rtm::quat_from_positive_w(rotation);
default:
ACL_ASSERT(false, "Invalid or unsupported rotation format: %s", get_rotation_format_name(track.get_rotation_format()));
return rtm::vector_to_quat(rotation);
}
};
const rtm::quatf ref_rotation = sample_to_quat(rotations, 0);
const rtm::quatf inv_ref_rotation = rtm::quat_conjugate(ref_rotation);
const uint32_t num_samples = rotations.get_num_samples();
for (uint32_t sample_index = 1; sample_index < num_samples; ++sample_index)
{
const rtm::quatf rotation = sample_to_quat(rotations, sample_index);
const rtm::quatf delta = rtm::quat_normalize(rtm::quat_mul(inv_ref_rotation, rotation));
if (!rtm::quat_near_identity(delta, threshold_angle))
return false;
}
return true;
}
inline void compact_constant_streams(iallocator& allocator, clip_context& context, const track_array_qvvf& track_list, const compression_settings& settings)
{
ACL_ASSERT(context.num_segments == 1, "context must contain a single segment!");
SegmentContext& segment = context.segments[0];
const uint32_t num_bones = context.num_bones;
const rtm::vector4f default_scale = get_default_scale(context.additive_format);
uint32_t num_default_bone_scales = 0;
// When a stream is constant, we only keep the first sample
for (uint32_t bone_index = 0; bone_index < num_bones; ++bone_index)
{
const track_desc_transformf& desc = track_list[bone_index].get_description();
BoneStreams& bone_stream = segment.bone_streams[bone_index];
BoneRanges& bone_range = context.ranges[bone_index];
// We expect all our samples to have the same width of sizeof(rtm::vector4f)
ACL_ASSERT(bone_stream.rotations.get_sample_size() == sizeof(rtm::vector4f), "Unexpected rotation sample size. %u != %zu", bone_stream.rotations.get_sample_size(), sizeof(rtm::vector4f));
ACL_ASSERT(bone_stream.translations.get_sample_size() == sizeof(rtm::vector4f), "Unexpected translation sample size. %u != %zu", bone_stream.translations.get_sample_size(), sizeof(rtm::vector4f));
ACL_ASSERT(bone_stream.scales.get_sample_size() == sizeof(rtm::vector4f), "Unexpected scale sample size. %u != %zu", bone_stream.scales.get_sample_size(), sizeof(rtm::vector4f));
// If we request raw data, use a 0.0 threshold for safety
const float constant_rotation_threshold_angle = settings.rotation_format != rotation_format8::quatf_full ? desc.constant_rotation_threshold_angle : 0.0F;
const float constant_translation_threshold = settings.translation_format != vector_format8::vector3f_full ? desc.constant_translation_threshold : 0.0F;
const float constant_scale_threshold = settings.scale_format != vector_format8::vector3f_full ? desc.constant_scale_threshold : 0.0F;
if (is_rotation_track_constant(bone_stream.rotations, constant_rotation_threshold_angle))
{
RotationTrackStream constant_stream(allocator, 1, bone_stream.rotations.get_sample_size(), bone_stream.rotations.get_sample_rate(), bone_stream.rotations.get_rotation_format());
rtm::vector4f rotation = bone_stream.rotations.get_raw_sample<rtm::vector4f>(0);
constant_stream.set_raw_sample(0, rotation);
bone_stream.rotations = std::move(constant_stream);
bone_stream.is_rotation_constant = true;
bone_stream.is_rotation_default = rtm::quat_near_identity(rtm::vector_to_quat(rotation), constant_rotation_threshold_angle);
bone_range.rotation = TrackStreamRange::from_min_extent(rotation, rtm::vector_zero());
}
if (bone_range.translation.is_constant(constant_translation_threshold))
{
TranslationTrackStream constant_stream(allocator, 1, bone_stream.translations.get_sample_size(), bone_stream.translations.get_sample_rate(), bone_stream.translations.get_vector_format());
rtm::vector4f translation = bone_stream.translations.get_raw_sample<rtm::vector4f>(0);
constant_stream.set_raw_sample(0, translation);
bone_stream.translations = std::move(constant_stream);
bone_stream.is_translation_constant = true;
bone_stream.is_translation_default = rtm::vector_all_near_equal3(translation, rtm::vector_zero(), constant_translation_threshold);
bone_range.translation = TrackStreamRange::from_min_extent(translation, rtm::vector_zero());
}
if (bone_range.scale.is_constant(constant_scale_threshold))
{
ScaleTrackStream constant_stream(allocator, 1, bone_stream.scales.get_sample_size(), bone_stream.scales.get_sample_rate(), bone_stream.scales.get_vector_format());
rtm::vector4f scale = bone_stream.scales.get_raw_sample<rtm::vector4f>(0);
constant_stream.set_raw_sample(0, scale);
bone_stream.scales = std::move(constant_stream);
bone_stream.is_scale_constant = true;
bone_stream.is_scale_default = rtm::vector_all_near_equal3(scale, default_scale, constant_scale_threshold);
bone_range.scale = TrackStreamRange::from_min_extent(scale, rtm::vector_zero());
num_default_bone_scales += bone_stream.is_scale_default ? 1 : 0;
}
}
context.has_scale = num_default_bone_scales != num_bones;
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,707 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2020 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
// Included only once from compress.h
#include "acl/core/buffer_tag.h"
#include "acl/core/compressed_tracks.h"
#include "acl/core/compressed_tracks_version.h"
#include "acl/core/error.h"
#include "acl/core/error_result.h"
#include "acl/core/floating_point_exceptions.h"
#include "acl/core/iallocator.h"
#include "acl/core/scope_profiler.h"
#include "acl/compression/compression_settings.h"
#include "acl/compression/output_stats.h"
#include "acl/compression/track_array.h"
#include "acl/compression/impl/clip_context.h"
#include "acl/compression/impl/constant_track_impl.h"
#include "acl/compression/impl/normalize_track_impl.h"
#include "acl/compression/impl/quantize_track_impl.h"
#include "acl/compression/impl/track_list_context.h"
#include "acl/compression/impl/track_range_impl.h"
#include "acl/compression/impl/write_compression_stats_impl.h"
#include "acl/compression/impl/write_track_data_impl.h"
#include "acl/compression/impl/track_stream.h"
#include "acl/compression/impl/convert_rotation_streams.h"
#include "acl/compression/impl/compact_constant_streams.h"
#include "acl/compression/impl/normalize_streams.h"
#include "acl/compression/impl/quantize_streams.h"
#include "acl/compression/impl/segment_streams.h"
#include "acl/compression/impl/write_segment_data.h"
#include "acl/compression/impl/write_stats.h"
#include "acl/compression/impl/write_stream_bitsets.h"
#include "acl/compression/impl/write_stream_data.h"
#include "acl/compression/impl/write_track_metadata.h"
#include <cstdint>
namespace acl
{
namespace acl_impl
{
inline error_result compress_scalar_track_list(iallocator& allocator, const track_array& track_list, const compression_settings& settings, compressed_tracks*& out_compressed_tracks, output_stats& out_stats)
{
(void)out_stats;
#if defined(SJSON_CPP_WRITER)
scope_profiler compression_time;
#endif
track_list_context context;
if (!initialize_context(allocator, track_list, context))
return error_result("Some samples are not finite");
extract_track_ranges(context);
extract_constant_tracks(context);
normalize_tracks(context);
quantize_tracks(context);
// Done transforming our input tracks, time to pack them into their final form
const uint32_t per_track_metadata_size = write_track_metadata(context, nullptr);
const uint32_t constant_values_size = write_track_constant_values(context, nullptr);
const uint32_t range_values_size = write_track_range_values(context, nullptr);
const uint32_t animated_num_bits = write_track_animated_values(context, nullptr);
const uint32_t animated_values_size = (animated_num_bits + 7) / 8; // Round up to nearest byte
const uint32_t num_bits_per_frame = context.num_samples != 0 ? (animated_num_bits / context.num_samples) : 0;
uint32_t buffer_size = 0;
buffer_size += sizeof(raw_buffer_header); // Header
buffer_size += sizeof(tracks_header); // Header
buffer_size += sizeof(scalar_tracks_header); // Header
ACL_ASSERT(is_aligned_to(buffer_size, alignof(track_metadata)), "Invalid alignment");
buffer_size += per_track_metadata_size; // Per track metadata
buffer_size = align_to(buffer_size, 4); // Align constant values
buffer_size += constant_values_size; // Constant values
ACL_ASSERT(is_aligned_to(buffer_size, 4), "Invalid alignment");
buffer_size += range_values_size; // Range values
ACL_ASSERT(is_aligned_to(buffer_size, 4), "Invalid alignment");
buffer_size += animated_values_size; // Animated values
// Optional metadata
const uint32_t metadata_start_offset = align_to(buffer_size, 4);
const uint32_t metadata_track_list_name_size = settings.include_track_list_name ? write_track_list_name(track_list, nullptr) : 0;
const uint32_t metadata_track_names_size = settings.include_track_names ? write_track_names(track_list, context.track_output_indices, context.num_output_tracks, nullptr) : 0;
const uint32_t metadata_track_descriptions_size = settings.include_track_descriptions ? write_track_descriptions(track_list, context.track_output_indices, context.num_output_tracks, nullptr) : 0;
uint32_t metadata_size = 0;
metadata_size += metadata_track_list_name_size;
metadata_size = align_to(metadata_size, 4);
metadata_size += metadata_track_names_size;
metadata_size = align_to(metadata_size, 4);
metadata_size += metadata_track_descriptions_size;
if (metadata_size != 0)
{
buffer_size = align_to(buffer_size, 4);
buffer_size += metadata_size;
buffer_size = align_to(buffer_size, 4);
buffer_size += sizeof(optional_metadata_header);
}
else
buffer_size += 15; // Ensure we have sufficient padding for unaligned 16 byte loads
uint8_t* buffer = allocate_type_array_aligned<uint8_t>(allocator, buffer_size, alignof(compressed_tracks));
std::memset(buffer, 0, buffer_size);
uint8_t* buffer_start = buffer;
out_compressed_tracks = reinterpret_cast<compressed_tracks*>(buffer);
raw_buffer_header* buffer_header = safe_ptr_cast<raw_buffer_header>(buffer);
buffer += sizeof(raw_buffer_header);
tracks_header* header = safe_ptr_cast<tracks_header>(buffer);
buffer += sizeof(tracks_header);
// Write our primary header
header->tag = static_cast<uint32_t>(buffer_tag32::compressed_tracks);
header->version = compressed_tracks_version16::latest;
header->algorithm_type = algorithm_type8::uniformly_sampled;
header->track_type = track_list.get_track_type();
header->num_tracks = context.num_tracks;
header->num_samples = context.num_samples;
header->sample_rate = context.sample_rate;
header->set_has_metadata(metadata_size != 0);
// Write our scalar tracks header
scalar_tracks_header* scalars_header = safe_ptr_cast<scalar_tracks_header>(buffer);
buffer += sizeof(scalar_tracks_header);
scalars_header->num_bits_per_frame = num_bits_per_frame;
const uint8_t* packed_data_start_offset = buffer - sizeof(scalar_tracks_header); // Relative to our header
scalars_header->metadata_per_track = buffer - packed_data_start_offset;
buffer += per_track_metadata_size;
buffer = align_to(buffer, 4);
scalars_header->track_constant_values = buffer - packed_data_start_offset;
buffer += constant_values_size;
scalars_header->track_range_values = buffer - packed_data_start_offset;
buffer += range_values_size;
scalars_header->track_animated_values = buffer - packed_data_start_offset;
buffer += animated_values_size;
if (metadata_size != 0)
{
buffer = align_to(buffer, 4);
buffer += metadata_size;
buffer = align_to(buffer, 4);
buffer += sizeof(optional_metadata_header);
}
else
buffer += 15;
(void)buffer_start; // Avoid VS2017 bug, it falsely reports this variable as unused even when asserts are enabled
ACL_ASSERT((buffer_start + buffer_size) == buffer, "Buffer size and pointer mismatch");
// Write our compressed data
track_metadata* per_track_metadata = scalars_header->get_track_metadata();
write_track_metadata(context, per_track_metadata);
float* constant_values = scalars_header->get_track_constant_values();
write_track_constant_values(context, constant_values);
float* range_values = scalars_header->get_track_range_values();
write_track_range_values(context, range_values);
uint8_t* animated_values = scalars_header->get_track_animated_values();
write_track_animated_values(context, animated_values);
// Optional metadata header is last
uint32_t writter_metadata_track_list_name_size = 0;
uint32_t written_metadata_track_names_size = 0;
uint32_t written_metadata_track_descriptions_size = 0;
if (metadata_size != 0)
{
optional_metadata_header* metadada_header = reinterpret_cast<optional_metadata_header*>(buffer_start + buffer_size - sizeof(optional_metadata_header));
uint32_t metadata_offset = metadata_start_offset;
if (settings.include_track_list_name)
{
metadada_header->track_list_name = metadata_offset;
writter_metadata_track_list_name_size = write_track_list_name(track_list, metadada_header->get_track_list_name(*out_compressed_tracks));
metadata_offset += writter_metadata_track_list_name_size;
}
else
metadada_header->track_list_name = invalid_ptr_offset();
if (settings.include_track_names)
{
metadata_offset = align_to(metadata_offset, 4);
metadada_header->track_name_offsets = metadata_offset;
written_metadata_track_names_size = write_track_names(track_list, context.track_output_indices, context.num_output_tracks, metadada_header->get_track_name_offsets(*out_compressed_tracks));
metadata_offset += written_metadata_track_names_size;
}
else
metadada_header->track_name_offsets = invalid_ptr_offset();
metadada_header->parent_track_indices = invalid_ptr_offset(); // Not supported for scalar tracks
if (settings.include_track_descriptions)
{
metadata_offset = align_to(metadata_offset, 4);
metadada_header->track_descriptions = metadata_offset;
written_metadata_track_descriptions_size = write_track_descriptions(track_list, context.track_output_indices, context.num_output_tracks, metadada_header->get_track_descriptions(*out_compressed_tracks));
metadata_offset += written_metadata_track_descriptions_size;
}
else
metadada_header->track_descriptions = invalid_ptr_offset();
}
ACL_ASSERT(writter_metadata_track_list_name_size == metadata_track_list_name_size, "Wrote too little or too much data");
ACL_ASSERT(written_metadata_track_names_size == metadata_track_names_size, "Wrote too little or too much data");
ACL_ASSERT(written_metadata_track_descriptions_size == metadata_track_descriptions_size, "Wrote too little or too much data");
// Finish the raw buffer header
buffer_header->size = buffer_size;
buffer_header->hash = hash32(safe_ptr_cast<const uint8_t>(header), buffer_size - sizeof(raw_buffer_header)); // Hash everything but the raw buffer header
#if defined(ACL_HAS_ASSERT_CHECKS)
if (metadata_size == 0)
{
for (const uint8_t* padding = buffer - 15; padding < buffer; ++padding)
ACL_ASSERT(*padding == 0, "Padding was overwritten");
}
#endif
#if defined(SJSON_CPP_WRITER)
compression_time.stop();
if (out_stats.logging != stat_logging::none)
write_compression_stats(context, *out_compressed_tracks, compression_time, out_stats);
#endif
return error_result();
}
inline error_result compress_transform_track_list(iallocator& allocator, const track_array_qvvf& track_list, compression_settings settings, const track_array_qvvf* additive_base_track_list, additive_clip_format8 additive_format,
compressed_tracks*& out_compressed_tracks, output_stats& out_stats)
{
error_result result = settings.is_valid();
if (result.any())
return result;
#if defined(SJSON_CPP_WRITER)
scope_profiler compression_time;
#endif
// If every track is retains full precision, we disable segmenting since it provides no benefit
if (!is_rotation_format_variable(settings.rotation_format) && !is_vector_format_variable(settings.translation_format) && !is_vector_format_variable(settings.scale_format))
{
settings.segmenting.ideal_num_samples = 0xFFFFFFFF;
settings.segmenting.max_num_samples = 0xFFFFFFFF;
}
// If we want the optional track descriptions, make sure to include the parent track indices
if (settings.include_track_descriptions)
settings.include_parent_track_indices = true;
// Variable bit rate tracks need range reduction
// Full precision tracks do not need range reduction since samples are stored raw
range_reduction_flags8 range_reduction = range_reduction_flags8::none;
if (is_rotation_format_variable(settings.rotation_format))
range_reduction |= range_reduction_flags8::rotations;
if (is_vector_format_variable(settings.translation_format))
range_reduction |= range_reduction_flags8::translations;
if (is_vector_format_variable(settings.scale_format))
range_reduction |= range_reduction_flags8::scales;
// If we have no additive base, our additive format is always none
if (additive_base_track_list == nullptr || additive_base_track_list->is_empty())
additive_format = additive_clip_format8::none;
clip_context raw_clip_context;
if (!initialize_clip_context(allocator, track_list, settings, additive_format, raw_clip_context))
return error_result("Some samples are not finite");
clip_context lossy_clip_context;
initialize_clip_context(allocator, track_list, settings, additive_format, lossy_clip_context);
const bool is_additive = additive_format != additive_clip_format8::none;
clip_context additive_base_clip_context;
if (is_additive && !initialize_clip_context(allocator, *additive_base_track_list, settings, additive_format, additive_base_clip_context))
return error_result("Some base samples are not finite");
// Convert our rotations if we need to
convert_rotation_streams(allocator, lossy_clip_context, settings.rotation_format);
// Extract our clip ranges now, we need it for compacting the constant streams
extract_clip_bone_ranges(allocator, lossy_clip_context);
// Compact and collapse the constant streams
compact_constant_streams(allocator, lossy_clip_context, track_list, settings);
uint32_t clip_range_data_size = 0;
if (range_reduction != range_reduction_flags8::none)
{
// Normalize our samples into the clip wide ranges per bone
normalize_clip_streams(lossy_clip_context, range_reduction);
clip_range_data_size = get_stream_range_data_size(lossy_clip_context, range_reduction, settings.rotation_format);
}
segment_streams(allocator, lossy_clip_context, settings.segmenting);
// If we have a single segment, skip segment range reduction since it won't help
if (range_reduction != range_reduction_flags8::none && lossy_clip_context.num_segments > 1)
{
// Extract and fixup our segment wide ranges per bone
extract_segment_bone_ranges(allocator, lossy_clip_context);
// Normalize our samples into the segment wide ranges per bone
normalize_segment_streams(lossy_clip_context, range_reduction);
}
quantize_streams(allocator, lossy_clip_context, settings, raw_clip_context, additive_base_clip_context, out_stats);
uint32_t num_output_bones = 0;
uint32_t* output_bone_mapping = create_output_track_mapping(allocator, track_list, num_output_bones);
const uint32_t constant_data_size = get_constant_data_size(lossy_clip_context);
calculate_animated_data_size(lossy_clip_context, output_bone_mapping, num_output_bones);
uint32_t num_animated_variable_sub_tracks_padded = 0;
const uint32_t format_per_track_data_size = get_format_per_track_data_size(lossy_clip_context, settings.rotation_format, settings.translation_format, settings.scale_format, &num_animated_variable_sub_tracks_padded);
auto animated_group_filter_action = [&](animation_track_type8 group_type, uint32_t bone_index)
{
const BoneStreams& bone_stream = lossy_clip_context.segments[0].bone_streams[bone_index];
if (group_type == animation_track_type8::rotation)
return !bone_stream.is_rotation_constant;
else if (group_type == animation_track_type8::translation)
return !bone_stream.is_translation_constant;
else
return !bone_stream.is_scale_constant;
};
uint32_t num_animated_groups = 0;
animation_track_type8* animated_sub_track_groups = calculate_sub_track_groups(lossy_clip_context.segments[0], output_bone_mapping, num_output_bones, num_animated_groups, animated_group_filter_action);
const uint32_t animated_group_types_size = sizeof(animation_track_type8) * (num_animated_groups + 1); // Includes terminator
const uint32_t num_tracks_per_bone = lossy_clip_context.has_scale ? 3 : 2;
const uint32_t num_tracks = uint32_t(num_output_bones) * num_tracks_per_bone;
const bitset_description bitset_desc = bitset_description::make_from_num_bits(num_tracks);
// Adding an extra index at the end to delimit things, the index is always invalid: 0xFFFFFFFF
const uint32_t segment_start_indices_size = lossy_clip_context.num_segments > 1 ? (sizeof(uint32_t) * (lossy_clip_context.num_segments + 1)) : 0;
const uint32_t segment_headers_size = sizeof(segment_header) * lossy_clip_context.num_segments;
uint32_t buffer_size = 0;
// Per clip data
buffer_size += sizeof(raw_buffer_header); // Header
buffer_size += sizeof(tracks_header); // Header
buffer_size += sizeof(transform_tracks_header); // Header
const uint32_t clip_header_size = buffer_size;
buffer_size = align_to(buffer_size, 4); // Align segment start indices
buffer_size += segment_start_indices_size; // Segment start indices
buffer_size = align_to(buffer_size, 4); // Align segment headers
buffer_size += segment_headers_size; // Segment headers
buffer_size = align_to(buffer_size, 4); // Align bitsets
const uint32_t clip_segment_header_size = buffer_size - clip_header_size;
buffer_size += bitset_desc.get_num_bytes(); // Default tracks bitset
buffer_size += bitset_desc.get_num_bytes(); // Constant tracks bitset
buffer_size = align_to(buffer_size, 4); // Align constant track data
buffer_size += constant_data_size; // Constant track data
buffer_size = align_to(buffer_size, 4); // Align range data
buffer_size += clip_range_data_size; // Range data
buffer_size += animated_group_types_size; // Our animated group types
const uint32_t clip_data_size = buffer_size - clip_segment_header_size - clip_header_size;
if (are_all_enum_flags_set(out_stats.logging, stat_logging::detailed))
{
constexpr uint32_t k_cache_line_byte_size = 64;
lossy_clip_context.decomp_touched_bytes = clip_header_size + clip_data_size;
lossy_clip_context.decomp_touched_bytes += sizeof(uint32_t) * 4; // We touch at most 4 segment start indices
lossy_clip_context.decomp_touched_bytes += sizeof(segment_header) * 2; // We touch at most 2 segment headers
lossy_clip_context.decomp_touched_cache_lines = align_to(clip_header_size, k_cache_line_byte_size) / k_cache_line_byte_size;
lossy_clip_context.decomp_touched_cache_lines += align_to(clip_data_size, k_cache_line_byte_size) / k_cache_line_byte_size;
lossy_clip_context.decomp_touched_cache_lines += 1; // All 4 segment start indices should fit in a cache line
lossy_clip_context.decomp_touched_cache_lines += 1; // Both segment headers should fit in a cache line
}
// Per segment data
for (SegmentContext& segment : lossy_clip_context.segment_iterator())
{
const uint32_t header_start = buffer_size;
buffer_size += format_per_track_data_size; // Format per track data
// TODO: Alignment only necessary with 16bit per component (segment constant tracks), need to fix scalar decoding path
buffer_size = align_to(buffer_size, 2); // Align range data
buffer_size += segment.range_data_size; // Range data
const uint32_t header_end = buffer_size;
// TODO: Variable bit rate doesn't need alignment
buffer_size = align_to(buffer_size, 4); // Align animated data
buffer_size += segment.animated_data_size; // Animated track data
segment.segment_data_size = buffer_size - header_start;
segment.total_header_size = header_end - header_start;
}
const uint32_t segment_data_size = buffer_size - clip_data_size - clip_segment_header_size - clip_header_size;
// Optional metadata
const uint32_t metadata_start_offset = align_to(buffer_size, 4);
const uint32_t metadata_track_list_name_size = settings.include_track_list_name ? write_track_list_name(track_list, nullptr) : 0;
const uint32_t metadata_track_names_size = settings.include_track_names ? write_track_names(track_list, output_bone_mapping, num_output_bones, nullptr) : 0;
const uint32_t metadata_parent_track_indices_size = settings.include_parent_track_indices ? write_parent_track_indices(track_list, output_bone_mapping, num_output_bones, nullptr) : 0;
const uint32_t metadata_track_descriptions_size = settings.include_track_descriptions ? write_track_descriptions(track_list, output_bone_mapping, num_output_bones, nullptr) : 0;
uint32_t metadata_size = 0;
metadata_size += metadata_track_list_name_size;
metadata_size = align_to(metadata_size, 4);
metadata_size += metadata_track_names_size;
metadata_size = align_to(metadata_size, 4);
metadata_size += metadata_parent_track_indices_size;
metadata_size = align_to(metadata_size, 4);
metadata_size += metadata_track_descriptions_size;
if (metadata_size != 0)
{
buffer_size = align_to(buffer_size, 4);
buffer_size += metadata_size;
buffer_size = align_to(buffer_size, 4);
buffer_size += sizeof(optional_metadata_header);
}
else
buffer_size += 15; // Ensure we have sufficient padding for unaligned 16 byte loads
uint8_t* buffer = allocate_type_array_aligned<uint8_t>(allocator, buffer_size, alignof(compressed_tracks));
std::memset(buffer, 0, buffer_size);
uint8_t* buffer_start = buffer;
out_compressed_tracks = reinterpret_cast<compressed_tracks*>(buffer);
raw_buffer_header* buffer_header = safe_ptr_cast<raw_buffer_header>(buffer);
buffer += sizeof(raw_buffer_header);
tracks_header* header = safe_ptr_cast<tracks_header>(buffer);
buffer += sizeof(tracks_header);
// Write our primary header
header->tag = static_cast<uint32_t>(buffer_tag32::compressed_tracks);
header->version = compressed_tracks_version16::latest;
header->algorithm_type = algorithm_type8::uniformly_sampled;
header->track_type = track_list.get_track_type();
header->num_tracks = num_output_bones;
header->num_samples = track_list.get_num_samples_per_track();
header->sample_rate = track_list.get_sample_rate();
header->set_rotation_format(settings.rotation_format);
header->set_translation_format(settings.translation_format);
header->set_scale_format(settings.scale_format);
header->set_has_scale(lossy_clip_context.has_scale);
// Our default scale is 1.0 if we have no additive base or if we don't use 'additive1', otherwise it is 0.0
header->set_default_scale(!is_additive || additive_format != additive_clip_format8::additive1 ? 1 : 0);
header->set_has_metadata(metadata_size != 0);
// Write our transform tracks header
transform_tracks_header* transforms_header = safe_ptr_cast<transform_tracks_header>(buffer);
buffer += sizeof(transform_tracks_header);
transforms_header->num_segments = lossy_clip_context.num_segments;
transforms_header->num_animated_variable_sub_tracks = num_animated_variable_sub_tracks_padded;
get_num_constant_samples(lossy_clip_context, transforms_header->num_constant_rotation_samples, transforms_header->num_constant_translation_samples, transforms_header->num_constant_scale_samples);
get_num_animated_sub_tracks(lossy_clip_context.segments[0],
transforms_header->num_animated_rotation_sub_tracks, transforms_header->num_animated_translation_sub_tracks, transforms_header->num_animated_scale_sub_tracks);
const uint32_t segment_start_indices_offset = align_to<uint32_t>(sizeof(transform_tracks_header), 4); // Relative to the start of our transform_tracks_header
transforms_header->segment_headers_offset = align_to(segment_start_indices_offset + segment_start_indices_size, 4);
transforms_header->default_tracks_bitset_offset = align_to(transforms_header->segment_headers_offset + segment_headers_size, 4);
transforms_header->constant_tracks_bitset_offset = transforms_header->default_tracks_bitset_offset + bitset_desc.get_num_bytes();
transforms_header->constant_track_data_offset = align_to(transforms_header->constant_tracks_bitset_offset + bitset_desc.get_num_bytes(), 4);
transforms_header->clip_range_data_offset = align_to(transforms_header->constant_track_data_offset + constant_data_size, 4);
transforms_header->animated_group_types_offset = transforms_header->clip_range_data_offset + clip_range_data_size;
uint32_t written_segment_start_indices_size = 0;
if (lossy_clip_context.num_segments > 1)
written_segment_start_indices_size = write_segment_start_indices(lossy_clip_context, transforms_header->get_segment_start_indices());
const uint32_t segment_data_start_offset = transforms_header->animated_group_types_offset + animated_group_types_size;
const uint32_t written_segment_headers_size = write_segment_headers(lossy_clip_context, settings, transforms_header->get_segment_headers(), segment_data_start_offset);
uint32_t written_bitset_size = 0;
written_bitset_size += write_default_track_bitset(lossy_clip_context, transforms_header->get_default_tracks_bitset(), bitset_desc, output_bone_mapping, num_output_bones);
written_bitset_size += write_constant_track_bitset(lossy_clip_context, transforms_header->get_constant_tracks_bitset(), bitset_desc, output_bone_mapping, num_output_bones);
uint32_t written_constant_data_size = 0;
if (constant_data_size != 0)
written_constant_data_size = write_constant_track_data(lossy_clip_context, settings.rotation_format, transforms_header->get_constant_track_data(), constant_data_size, output_bone_mapping, num_output_bones);
else
transforms_header->constant_track_data_offset = invalid_ptr_offset();
uint32_t written_clip_range_data_size = 0;
if (range_reduction != range_reduction_flags8::none)
written_clip_range_data_size = write_clip_range_data(lossy_clip_context, range_reduction, transforms_header->get_clip_range_data(), clip_range_data_size, output_bone_mapping, num_output_bones);
else
transforms_header->clip_range_data_offset = invalid_ptr_offset();
const uint32_t written_animated_group_types_size = write_animated_group_types(animated_sub_track_groups, num_animated_groups, transforms_header->get_animated_group_types(), animated_group_types_size);
const uint32_t written_segment_data_size = write_segment_data(lossy_clip_context, settings, range_reduction, *transforms_header, output_bone_mapping, num_output_bones);
// Optional metadata header is last
uint32_t writter_metadata_track_list_name_size = 0;
uint32_t written_metadata_track_names_size = 0;
uint32_t written_metadata_parent_track_indices_size = 0;
uint32_t written_metadata_track_descriptions_size = 0;
if (metadata_size != 0)
{
optional_metadata_header* metadada_header = reinterpret_cast<optional_metadata_header*>(buffer_start + buffer_size - sizeof(optional_metadata_header));
uint32_t metadata_offset = metadata_start_offset; // Relative to the start of our compressed_tracks
if (settings.include_track_list_name)
{
metadada_header->track_list_name = metadata_offset;
writter_metadata_track_list_name_size = write_track_list_name(track_list, metadada_header->get_track_list_name(*out_compressed_tracks));
metadata_offset += writter_metadata_track_list_name_size;
}
else
metadada_header->track_list_name = invalid_ptr_offset();
if (settings.include_track_names)
{
metadata_offset = align_to(metadata_offset, 4);
metadada_header->track_name_offsets = metadata_offset;
written_metadata_track_names_size = write_track_names(track_list, output_bone_mapping, num_output_bones, metadada_header->get_track_name_offsets(*out_compressed_tracks));
metadata_offset += written_metadata_track_names_size;
}
else
metadada_header->track_name_offsets = invalid_ptr_offset();
if (settings.include_parent_track_indices)
{
metadata_offset = align_to(metadata_offset, 4);
metadada_header->parent_track_indices = metadata_offset;
written_metadata_parent_track_indices_size = write_parent_track_indices(track_list, output_bone_mapping, num_output_bones, metadada_header->get_parent_track_indices(*out_compressed_tracks));
metadata_offset += written_metadata_parent_track_indices_size;
}
else
metadada_header->parent_track_indices = invalid_ptr_offset();
if (settings.include_track_descriptions)
{
metadata_offset = align_to(metadata_offset, 4);
metadada_header->track_descriptions = metadata_offset;
written_metadata_track_descriptions_size = write_track_descriptions(track_list, output_bone_mapping, num_output_bones, metadada_header->get_track_descriptions(*out_compressed_tracks));
metadata_offset += written_metadata_track_descriptions_size;
}
else
metadada_header->track_descriptions = invalid_ptr_offset();
}
#if defined(ACL_HAS_ASSERT_CHECKS)
{
// Make sure we wrote the right amount of data
buffer = align_to(buffer, 4); // Align segment start indices
buffer += written_segment_start_indices_size;
buffer = align_to(buffer, 4); // Align segment headers
buffer += written_segment_headers_size;
buffer = align_to(buffer, 4); // Align bitsets
buffer += written_bitset_size;
buffer = align_to(buffer, 4); // Align constant track data
buffer += written_constant_data_size;
buffer = align_to(buffer, 4); // Align range data
buffer += written_clip_range_data_size;
buffer += written_animated_group_types_size;
buffer += written_segment_data_size;
if (metadata_size != 0)
{
buffer = align_to(buffer, 4);
buffer += metadata_size;
buffer = align_to(buffer, 4);
buffer += sizeof(optional_metadata_header);
}
else
buffer += 15; // Ensure we have sufficient padding for unaligned 16 byte loads
(void)buffer_start; // Avoid VS2017 bug, it falsely reports this variable as unused even when asserts are enabled
ACL_ASSERT(written_segment_start_indices_size == segment_start_indices_size, "Wrote too little or too much data");
ACL_ASSERT(written_segment_headers_size == segment_headers_size, "Wrote too little or too much data");
ACL_ASSERT(written_segment_data_size == segment_data_size, "Wrote too little or too much data");
ACL_ASSERT(written_bitset_size == (bitset_desc.get_num_bytes() * 2), "Wrote too little or too much data");
ACL_ASSERT(written_constant_data_size == constant_data_size, "Wrote too little or too much data");
ACL_ASSERT(written_clip_range_data_size == clip_range_data_size, "Wrote too little or too much data");
ACL_ASSERT(written_animated_group_types_size == animated_group_types_size, "Wrote too little or too much data");
ACL_ASSERT(writter_metadata_track_list_name_size == metadata_track_list_name_size, "Wrote too little or too much data");
ACL_ASSERT(written_metadata_track_names_size == metadata_track_names_size, "Wrote too little or too much data");
ACL_ASSERT(written_metadata_parent_track_indices_size == metadata_parent_track_indices_size, "Wrote too little or too much data");
ACL_ASSERT(written_metadata_track_descriptions_size == metadata_track_descriptions_size, "Wrote too little or too much data");
ACL_ASSERT(uint32_t(buffer - buffer_start) == buffer_size, "Wrote too little or too much data");
if (metadata_size == 0)
{
for (const uint8_t* padding = buffer - 15; padding < buffer; ++padding)
ACL_ASSERT(*padding == 0, "Padding was overwritten");
}
}
#else
(void)written_segment_start_indices_size;
(void)written_segment_headers_size;
(void)written_bitset_size;
(void)written_constant_data_size;
(void)written_clip_range_data_size;
(void)written_segment_data_size;
(void)written_animated_group_types_size;
(void)segment_data_size;
(void)buffer_start;
#endif
// Finish the raw buffer header
buffer_header->size = buffer_size;
buffer_header->hash = hash32(safe_ptr_cast<const uint8_t>(header), buffer_size - sizeof(raw_buffer_header)); // Hash everything but the raw buffer header
#if defined(SJSON_CPP_WRITER)
compression_time.stop();
if (out_stats.logging != stat_logging::none)
write_stats(allocator, track_list, lossy_clip_context, *out_compressed_tracks, settings, range_reduction, raw_clip_context, additive_base_clip_context, compression_time, out_stats);
#endif
deallocate_type_array(allocator, animated_sub_track_groups, num_animated_groups);
deallocate_type_array(allocator, output_bone_mapping, num_output_bones);
destroy_clip_context(lossy_clip_context);
destroy_clip_context(raw_clip_context);
destroy_clip_context(additive_base_clip_context);
return error_result();
}
}
inline error_result compress_track_list(iallocator& allocator, const track_array& track_list, const compression_settings& settings, compressed_tracks*& out_compressed_tracks, output_stats& out_stats)
{
using namespace acl_impl;
error_result result = track_list.is_valid();
if (result.any())
return result;
// Disable floating point exceptions during compression because we leverage all SIMD lanes
// and we might intentionally divide by zero, etc.
scope_disable_fp_exceptions fp_off;
if (track_list.get_track_category() == track_category8::transformf)
result = compress_transform_track_list(allocator, track_array_cast<track_array_qvvf>(track_list), settings, nullptr, additive_clip_format8::none, out_compressed_tracks, out_stats);
else
result = compress_scalar_track_list(allocator, track_list, settings, out_compressed_tracks, out_stats);
return result;
}
inline error_result compress_track_list(iallocator& allocator, const track_array_qvvf& track_list, const compression_settings& settings, const track_array_qvvf& additive_base_track_list, additive_clip_format8 additive_format, compressed_tracks*& out_compressed_tracks, output_stats& out_stats)
{
using namespace acl_impl;
error_result result = track_list.is_valid();
if (result.any())
return result;
if (additive_format != additive_clip_format8::none)
{
result = additive_base_track_list.is_valid();
if (result.any())
return result;
}
// Disable floating point exceptions during compression because we leverage all SIMD lanes
// and we might intentionally divide by zero, etc.
scope_disable_fp_exceptions fp_off;
return compress_transform_track_list(allocator, track_list, settings, &additive_base_track_list, additive_format, out_compressed_tracks, out_stats);
}
}

View File

@ -0,0 +1,99 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2020 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
// Included only once from compression_settings.h
#include "acl/core/error_result.h"
#include "acl/core/hash.h"
#include "acl/core/track_formats.h"
#include <cstdint>
namespace acl
{
inline uint32_t segmenting_settings::get_hash() const
{
uint32_t hash_value = 0;
hash_value = hash_combine(hash_value, hash32(ideal_num_samples));
hash_value = hash_combine(hash_value, hash32(max_num_samples));
return hash_value;
}
inline error_result segmenting_settings::is_valid() const
{
if (ideal_num_samples < 8)
return error_result("ideal_num_samples must be greater or equal to 8");
if (ideal_num_samples > max_num_samples)
return error_result("ideal_num_samples must be smaller or equal to max_num_samples");
return error_result();
}
inline uint32_t compression_settings::get_hash() const
{
uint32_t hash_value = 0;
hash_value = hash_combine(hash_value, hash32(level));
hash_value = hash_combine(hash_value, hash32(rotation_format));
hash_value = hash_combine(hash_value, hash32(translation_format));
hash_value = hash_combine(hash_value, hash32(scale_format));
hash_value = hash_combine(hash_value, segmenting.get_hash());
if (error_metric != nullptr)
hash_value = hash_combine(hash_value, error_metric->get_hash());
hash_value = hash_combine(hash_value, hash32(include_track_list_name));
hash_value = hash_combine(hash_value, hash32(include_track_names));
hash_value = hash_combine(hash_value, hash32(include_parent_track_indices));
hash_value = hash_combine(hash_value, hash32(include_track_descriptions));
return hash_value;
}
inline error_result compression_settings::is_valid() const
{
if (error_metric == nullptr)
return error_result("error_metric cannot be NULL");
return segmenting.is_valid();
}
inline compression_settings get_raw_compression_settings()
{
return compression_settings();
}
inline compression_settings get_default_compression_settings()
{
compression_settings settings;
settings.level = compression_level8::medium;
settings.rotation_format = rotation_format8::quatf_drop_w_variable;
settings.translation_format = vector_format8::vector3f_variable;
settings.scale_format = vector_format8::vector3f_variable;
return settings;
}
}

View File

@ -0,0 +1,78 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/bitset.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/iallocator.h"
#include "acl/core/track_desc.h"
#include "acl/compression/impl/track_list_context.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
inline bool is_scalarf_track_constant(const track& track_, const track_range& range)
{
const track_desc_scalarf& desc = track_.get_description<track_desc_scalarf>();
return range.is_constant(desc.precision);
}
inline void extract_constant_tracks(track_list_context& context)
{
ACL_ASSERT(context.is_valid(), "Invalid context");
const bitset_description bitset_desc = bitset_description::make_from_num_bits(context.num_tracks);
context.constant_tracks_bitset = allocate_type_array<uint32_t>(*context.allocator, bitset_desc.get_size());
bitset_reset(context.constant_tracks_bitset, bitset_desc, false);
for (uint32_t track_index = 0; track_index < context.num_tracks; ++track_index)
{
const track& mut_track = context.track_list[track_index];
const track_range& range = context.range_list[track_index];
bool is_constant = false;
switch (range.category)
{
case track_category8::scalarf:
is_constant = is_scalarf_track_constant(mut_track, range);
break;
default:
ACL_ASSERT(false, "Invalid track category");
break;
}
bitset_set(context.constant_tracks_bitset, bitset_desc, track_index, is_constant);
}
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,236 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2020 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
// Included only once from convert.h
#include "acl/compression/compress.h"
#include "acl/compression/track_array.h"
#include "acl/core/compressed_tracks.h"
#include "acl/core/error_result.h"
#include "acl/core/iallocator.h"
#include "acl/core/track_formats.h"
#include "acl/core/impl/debug_track_writer.h"
#include "acl/decompression/decompress.h"
#include <cstdint>
namespace acl
{
namespace acl_impl
{
struct raw_sampling_decompression_settings final : public decompression_settings
{
// Disable normalization. This is only safe if we know the input data is already normalized!
static constexpr bool normalize_rotations() { return false; }
};
}
inline error_result convert_track_list(iallocator& allocator, const track_array& track_list, compressed_tracks*& out_compressed_tracks)
{
compression_settings settings = get_raw_compression_settings();
qvvf_transform_error_metric error_metric;
settings.error_metric = &error_metric;
// Include all the metadata
settings.include_track_list_name = true;
settings.include_track_names = true;
settings.include_parent_track_indices = true;
settings.include_track_descriptions = true;
output_stats stats;
return compress_track_list(allocator, track_list, settings, out_compressed_tracks, stats);
}
inline error_result convert_track_list(iallocator& allocator, const compressed_tracks& tracks, track_array& out_track_list)
{
error_result error = tracks.is_valid(false);
if (error.any())
return error;
const uint32_t num_tracks = tracks.get_num_tracks();
const track_type8 track_type = tracks.get_track_type();
const uint32_t num_samples = tracks.get_num_samples_per_track();
const float sample_rate = tracks.get_sample_rate();
const float duration = tracks.get_duration();
track_array result(allocator, num_tracks);
result.set_name(string(allocator, tracks.get_name()));
// Setup our track metadata and allocate memory
bool success = true;
for (uint32_t track_index = 0; track_index < num_tracks; ++track_index)
{
track& track_ = result[track_index];
track_desc_scalarf desc_scalar;
track_desc_transformf desc_transform;
bool got_description = false;
switch (track_type)
{
case track_type8::float1f:
got_description = tracks.get_track_description(track_index, desc_scalar);
track_ = track_float1f::make_reserve(desc_scalar, allocator, num_samples, sample_rate);
break;
case track_type8::float2f:
got_description = tracks.get_track_description(track_index, desc_scalar);
track_ = track_float2f::make_reserve(desc_scalar, allocator, num_samples, sample_rate);
break;
case track_type8::float3f:
got_description = tracks.get_track_description(track_index, desc_scalar);
track_ = track_float3f::make_reserve(desc_scalar, allocator, num_samples, sample_rate);
break;
case track_type8::float4f:
got_description = tracks.get_track_description(track_index, desc_scalar);
track_ = track_float4f::make_reserve(desc_scalar, allocator, num_samples, sample_rate);
break;
case track_type8::vector4f:
got_description = tracks.get_track_description(track_index, desc_scalar);
track_ = track_vector4f::make_reserve(desc_scalar, allocator, num_samples, sample_rate);
break;
case track_type8::qvvf:
got_description = tracks.get_track_description(track_index, desc_transform);
track_ = track_qvvf::make_reserve(desc_transform, allocator, num_samples, sample_rate);
break;
default:
ACL_ASSERT(false, "Unexpected track type");
break;
}
success &= got_description;
track_.set_name(string(allocator, tracks.get_track_name(track_index)));
}
if (!success)
return error_result("Metadata was missing from the input");
// Disable floating point exceptions since decompression assumes it
scope_disable_fp_exceptions fp_off;
// Decompress and populate our track data
const acl_impl::tracks_header& header = acl_impl::get_tracks_header(tracks);
if (header.get_rotation_format() == rotation_format8::quatf_full && header.get_translation_format() == vector_format8::vector3f_full && header.get_scale_format() == vector_format8::vector3f_full)
{
// Our input data uses full precision, retain it
decompression_context<acl_impl::raw_sampling_decompression_settings> context;
context.initialize(tracks);
acl_impl::debug_track_writer writer(allocator, track_type, num_tracks);
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
const float sample_time = rtm::scalar_min(float(sample_index) / sample_rate, duration);
// Round to nearest to land directly on a sample
context.seek(sample_time, sample_rounding_policy::nearest);
context.decompress_tracks(writer);
for (uint32_t track_index = 0; track_index < num_tracks; ++track_index)
{
track& track_ = result[track_index];
switch (track_type)
{
case track_type8::float1f:
*reinterpret_cast<float*>(track_[sample_index]) = writer.read_float1(track_index);
break;
case track_type8::float2f:
rtm::vector_store2(writer.read_float2(track_index), reinterpret_cast<rtm::float2f*>(track_[sample_index]));
break;
case track_type8::float3f:
rtm::vector_store3(writer.read_float3(track_index), reinterpret_cast<rtm::float3f*>(track_[sample_index]));
break;
case track_type8::float4f:
rtm::vector_store(writer.read_float4(track_index), reinterpret_cast<rtm::float4f*>(track_[sample_index]));
break;
case track_type8::vector4f:
*reinterpret_cast<rtm::vector4f*>(track_[sample_index]) = writer.read_vector4(track_index);
break;
case track_type8::qvvf:
*reinterpret_cast<rtm::qvvf*>(track_[sample_index]) = writer.read_qvv(track_index);
break;
default:
ACL_ASSERT(false, "Unexpected track type");
break;
}
}
}
}
else
{
decompression_context<decompression_settings> context;
context.initialize(tracks);
acl_impl::debug_track_writer writer(allocator, track_type, num_tracks);
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
const float sample_time = rtm::scalar_min(float(sample_index) / sample_rate, duration);
// Round to nearest to land directly on a sample
context.seek(sample_time, sample_rounding_policy::nearest);
context.decompress_tracks(writer);
for (uint32_t track_index = 0; track_index < num_tracks; ++track_index)
{
track& track_ = result[track_index];
switch (track_type)
{
case track_type8::float1f:
*reinterpret_cast<float*>(track_[sample_index]) = writer.read_float1(track_index);
break;
case track_type8::float2f:
rtm::vector_store2(writer.read_float2(track_index), reinterpret_cast<rtm::float2f*>(track_[sample_index]));
break;
case track_type8::float3f:
rtm::vector_store3(writer.read_float3(track_index), reinterpret_cast<rtm::float3f*>(track_[sample_index]));
break;
case track_type8::float4f:
rtm::vector_store(writer.read_float4(track_index), reinterpret_cast<rtm::float4f*>(track_[sample_index]));
break;
case track_type8::vector4f:
*reinterpret_cast<rtm::vector4f*>(track_[sample_index]) = writer.read_vector4(track_index);
break;
case track_type8::qvvf:
*reinterpret_cast<rtm::qvvf*>(track_[sample_index]) = writer.read_qvv(track_index);
break;
default:
ACL_ASSERT(false, "Unexpected track type");
break;
}
}
}
}
out_track_list = std::move(result);
return error_result();
}
}

View File

@ -0,0 +1,111 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/iallocator.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/error.h"
#include "acl/core/track_formats.h"
#include "acl/compression/impl/clip_context.h"
#include <rtm/quatf.h>
#include <rtm/vector4f.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
inline rtm::vector4f RTM_SIMD_CALL convert_rotation(rtm::vector4f_arg0 rotation, rotation_format8 from, rotation_format8 to)
{
ACL_ASSERT(from == rotation_format8::quatf_full, "Source rotation format must be a full precision quaternion");
(void)from;
const rotation_format8 high_precision_format = get_rotation_variant(to) == rotation_variant8::quat ? rotation_format8::quatf_full : rotation_format8::quatf_drop_w_full;
switch (high_precision_format)
{
case rotation_format8::quatf_full:
// Original format, nothing to do
return rotation;
case rotation_format8::quatf_drop_w_full:
// Drop W, we just ensure it is positive and write it back, the W component can be ignored afterwards
return rtm::quat_to_vector(rtm::quat_ensure_positive_w(rtm::vector_to_quat(rotation)));
default:
ACL_ASSERT(false, "Invalid or unsupported rotation format: %s", get_rotation_format_name(to));
return rotation;
}
}
inline void convert_rotation_streams(iallocator& allocator, SegmentContext& segment, rotation_format8 rotation_format)
{
const rotation_format8 high_precision_format = get_rotation_variant(rotation_format) == rotation_variant8::quat ? rotation_format8::quatf_full : rotation_format8::quatf_drop_w_full;
for (BoneStreams& bone_stream : segment.bone_iterator())
{
// We convert our rotation stream in place. We assume that the original format is quatf_full stored as rtm::quatf
// For all other formats, we keep the same sample size and either keep Quat_32 or use rtm::vector4f
ACL_ASSERT(bone_stream.rotations.get_sample_size() == sizeof(rtm::quatf), "Unexpected rotation sample size. %u != %zu", bone_stream.rotations.get_sample_size(), sizeof(rtm::quatf));
const uint32_t num_samples = bone_stream.rotations.get_num_samples();
const float sample_rate = bone_stream.rotations.get_sample_rate();
RotationTrackStream converted_stream(allocator, num_samples, sizeof(rtm::quatf), sample_rate, high_precision_format);
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
rtm::quatf rotation = bone_stream.rotations.get_raw_sample<rtm::quatf>(sample_index);
switch (high_precision_format)
{
case rotation_format8::quatf_full:
// Original format, nothing to do
break;
case rotation_format8::quatf_drop_w_full:
// Drop W, we just ensure it is positive and write it back, the W component can be ignored afterwards
rotation = rtm::quat_ensure_positive_w(rotation);
break;
default:
ACL_ASSERT(false, "Invalid or unsupported rotation format: %s", get_rotation_format_name(high_precision_format));
break;
}
converted_stream.set_raw_sample(sample_index, rotation);
}
bone_stream.rotations = std::move(converted_stream);
}
}
inline void convert_rotation_streams(iallocator& allocator, clip_context& context, rotation_format8 rotation_format)
{
for (SegmentContext& segment : context.segment_iterator())
convert_rotation_streams(allocator, segment, rotation_format);
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,401 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/iallocator.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/error.h"
#include "acl/core/enum_utils.h"
#include "acl/core/track_formats.h"
#include "acl/core/track_types.h"
#include "acl/core/range_reduction_types.h"
#include "acl/compression/impl/clip_context.h"
#include <rtm/vector4f.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
inline TrackStreamRange calculate_track_range(const TrackStream& stream, bool is_vector4)
{
rtm::vector4f min = rtm::vector_set(1e10F);
rtm::vector4f max = rtm::vector_set(-1e10F);
const uint32_t num_samples = stream.get_num_samples();
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
const rtm::vector4f sample = stream.get_raw_sample<rtm::vector4f>(sample_index);
min = rtm::vector_min(min, sample);
max = rtm::vector_max(max, sample);
}
// Set the 4th component to zero if we don't need it
if (!is_vector4)
{
min = rtm::vector_set_w(min, 0.0F);
max = rtm::vector_set_w(max, 0.0F);
}
return TrackStreamRange::from_min_max(min, max);
}
inline void extract_bone_ranges_impl(const SegmentContext& segment, BoneRanges* bone_ranges)
{
const bool has_scale = segment_context_has_scale(segment);
for (uint32_t bone_index = 0; bone_index < segment.num_bones; ++bone_index)
{
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
BoneRanges& bone_range = bone_ranges[bone_index];
bone_range.rotation = calculate_track_range(bone_stream.rotations, true);
bone_range.translation = calculate_track_range(bone_stream.translations, false);
if (has_scale)
bone_range.scale = calculate_track_range(bone_stream.scales, false);
else
bone_range.scale = TrackStreamRange();
}
}
inline void extract_clip_bone_ranges(iallocator& allocator, clip_context& context)
{
context.ranges = allocate_type_array<BoneRanges>(allocator, context.num_bones);
ACL_ASSERT(context.num_segments == 1, "context must contain a single segment!");
const SegmentContext& segment = context.segments[0];
acl_impl::extract_bone_ranges_impl(segment, context.ranges);
}
inline void extract_segment_bone_ranges(iallocator& allocator, clip_context& context)
{
const rtm::vector4f one = rtm::vector_set(1.0F);
const rtm::vector4f zero = rtm::vector_zero();
const float max_range_value_flt = float((1 << k_segment_range_reduction_num_bits_per_component) - 1);
const rtm::vector4f max_range_value = rtm::vector_set(max_range_value_flt);
const rtm::vector4f inv_max_range_value = rtm::vector_set(1.0F / max_range_value_flt);
// Segment ranges are always normalized and live between [0.0 ... 1.0]
auto fixup_range = [&](const TrackStreamRange& range)
{
// In our compressed format, we store the minimum value of the track range quantized on 8 bits.
// To get the best accuracy, we pick the value closest to the true minimum that is slightly lower.
// This is to ensure that we encompass the lowest value even after quantization.
const rtm::vector4f range_min = range.get_min();
const rtm::vector4f scaled_min = rtm::vector_mul(range_min, max_range_value);
const rtm::vector4f quantized_min0 = rtm::vector_clamp(rtm::vector_floor(scaled_min), zero, max_range_value);
const rtm::vector4f quantized_min1 = rtm::vector_max(rtm::vector_sub(quantized_min0, one), zero);
const rtm::vector4f padded_range_min0 = rtm::vector_mul(quantized_min0, inv_max_range_value);
const rtm::vector4f padded_range_min1 = rtm::vector_mul(quantized_min1, inv_max_range_value);
// Check if min0 is below or equal to our original range minimum value, if it is, it is good
// enough to use otherwise min1 is guaranteed to be lower.
const rtm::mask4f is_min0_lower_mask = rtm::vector_less_equal(padded_range_min0, range_min);
const rtm::vector4f padded_range_min = rtm::vector_select(is_min0_lower_mask, padded_range_min0, padded_range_min1);
// The story is different for the extent. We do not store the max, instead we use the extent
// for performance reasons: a single mul/add is required to reconstruct the original value.
// Now that our minimum value changed, our extent also changed.
// We want to pick the extent value that brings us closest to our original max value while
// being slightly larger to encompass it.
const rtm::vector4f range_max = range.get_max();
const rtm::vector4f range_extent = rtm::vector_sub(range_max, padded_range_min);
const rtm::vector4f scaled_extent = rtm::vector_mul(range_extent, max_range_value);
const rtm::vector4f quantized_extent0 = rtm::vector_clamp(rtm::vector_ceil(scaled_extent), zero, max_range_value);
const rtm::vector4f quantized_extent1 = rtm::vector_min(rtm::vector_add(quantized_extent0, one), max_range_value);
const rtm::vector4f padded_range_extent0 = rtm::vector_mul(quantized_extent0, inv_max_range_value);
const rtm::vector4f padded_range_extent1 = rtm::vector_mul(quantized_extent1, inv_max_range_value);
// Check if extent0 is above or equal to our original range maximum value, if it is, it is good
// enough to use otherwise extent1 is guaranteed to be higher.
const rtm::mask4f is_extent0_higher_mask = rtm::vector_greater_equal(padded_range_extent0, range_max);
const rtm::vector4f padded_range_extent = rtm::vector_select(is_extent0_higher_mask, padded_range_extent0, padded_range_extent1);
return TrackStreamRange::from_min_extent(padded_range_min, padded_range_extent);
};
for (SegmentContext& segment : context.segment_iterator())
{
segment.ranges = allocate_type_array<BoneRanges>(allocator, segment.num_bones);
acl_impl::extract_bone_ranges_impl(segment, segment.ranges);
for (uint32_t bone_index = 0; bone_index < segment.num_bones; ++bone_index)
{
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
BoneRanges& bone_range = segment.ranges[bone_index];
if (!bone_stream.is_rotation_constant && context.are_rotations_normalized)
bone_range.rotation = fixup_range(bone_range.rotation);
if (!bone_stream.is_translation_constant && context.are_translations_normalized)
bone_range.translation = fixup_range(bone_range.translation);
if (!bone_stream.is_scale_constant && context.are_scales_normalized)
bone_range.scale = fixup_range(bone_range.scale);
}
}
}
inline rtm::vector4f RTM_SIMD_CALL normalize_sample(rtm::vector4f_arg0 sample, const TrackStreamRange& range)
{
const rtm::vector4f range_min = range.get_min();
const rtm::vector4f range_extent = range.get_extent();
const rtm::mask4f is_range_zero_mask = rtm::vector_less_than(range_extent, rtm::vector_set(0.000000001F));
rtm::vector4f normalized_sample = rtm::vector_div(rtm::vector_sub(sample, range_min), range_extent);
// Clamp because the division might be imprecise
normalized_sample = rtm::vector_min(normalized_sample, rtm::vector_set(1.0F));
return rtm::vector_select(is_range_zero_mask, rtm::vector_zero(), normalized_sample);
}
inline void normalize_rotation_streams(BoneStreams* bone_streams, const BoneRanges* bone_ranges, uint32_t num_bones)
{
const rtm::vector4f one = rtm::vector_set(1.0F);
const rtm::vector4f zero = rtm::vector_zero();
for (uint32_t bone_index = 0; bone_index < num_bones; ++bone_index)
{
BoneStreams& bone_stream = bone_streams[bone_index];
const BoneRanges& bone_range = bone_ranges[bone_index];
// We expect all our samples to have the same width of sizeof(rtm::vector4f)
ACL_ASSERT(bone_stream.rotations.get_sample_size() == sizeof(rtm::vector4f), "Unexpected rotation sample size. %u != %zu", bone_stream.rotations.get_sample_size(), sizeof(rtm::vector4f));
// Constant or default tracks are not normalized
if (bone_stream.is_rotation_constant)
continue;
const uint32_t num_samples = bone_stream.rotations.get_num_samples();
const rtm::vector4f range_min = bone_range.rotation.get_min();
const rtm::vector4f range_extent = bone_range.rotation.get_extent();
const rtm::mask4f is_range_zero_mask = rtm::vector_less_than(range_extent, rtm::vector_set(0.000000001F));
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
// normalized value is between [0.0 .. 1.0]
// value = (normalized value * range extent) + range min
// normalized value = (value - range min) / range extent
const rtm::vector4f rotation = bone_stream.rotations.get_raw_sample<rtm::vector4f>(sample_index);
rtm::vector4f normalized_rotation = rtm::vector_div(rtm::vector_sub(rotation, range_min), range_extent);
// Clamp because the division might be imprecise
normalized_rotation = rtm::vector_min(normalized_rotation, one);
normalized_rotation = rtm::vector_select(is_range_zero_mask, zero, normalized_rotation);
#if defined(ACL_HAS_ASSERT_CHECKS)
switch (bone_stream.rotations.get_rotation_format())
{
case rotation_format8::quatf_full:
ACL_ASSERT(rtm::vector_all_greater_equal(normalized_rotation, zero) && rtm::vector_all_less_equal(normalized_rotation, one), "Invalid normalized rotation. 0.0 <= [%f, %f, %f, %f] <= 1.0", (float)rtm::vector_get_x(normalized_rotation), (float)rtm::vector_get_y(normalized_rotation), (float)rtm::vector_get_z(normalized_rotation), (float)rtm::vector_get_w(normalized_rotation));
break;
case rotation_format8::quatf_drop_w_full:
case rotation_format8::quatf_drop_w_variable:
ACL_ASSERT(rtm::vector_all_greater_equal3(normalized_rotation, zero) && rtm::vector_all_less_equal3(normalized_rotation, one), "Invalid normalized rotation. 0.0 <= [%f, %f, %f] <= 1.0", (float)rtm::vector_get_x(normalized_rotation), (float)rtm::vector_get_y(normalized_rotation), (float)rtm::vector_get_z(normalized_rotation));
break;
}
#endif
bone_stream.rotations.set_raw_sample(sample_index, normalized_rotation);
}
}
}
inline void normalize_translation_streams(BoneStreams* bone_streams, const BoneRanges* bone_ranges, uint32_t num_bones)
{
const rtm::vector4f one = rtm::vector_set(1.0F);
const rtm::vector4f zero = rtm::vector_zero();
for (uint32_t bone_index = 0; bone_index < num_bones; ++bone_index)
{
BoneStreams& bone_stream = bone_streams[bone_index];
const BoneRanges& bone_range = bone_ranges[bone_index];
// We expect all our samples to have the same width of sizeof(rtm::vector4f)
ACL_ASSERT(bone_stream.translations.get_sample_size() == sizeof(rtm::vector4f), "Unexpected translation sample size. %u != %zu", bone_stream.translations.get_sample_size(), sizeof(rtm::vector4f));
// Constant or default tracks are not normalized
if (bone_stream.is_translation_constant)
continue;
const uint32_t num_samples = bone_stream.translations.get_num_samples();
const rtm::vector4f range_min = bone_range.translation.get_min();
const rtm::vector4f range_extent = bone_range.translation.get_extent();
const rtm::mask4f is_range_zero_mask = rtm::vector_less_than(range_extent, rtm::vector_set(0.000000001F));
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
// normalized value is between [0.0 .. 1.0]
// value = (normalized value * range extent) + range min
// normalized value = (value - range min) / range extent
const rtm::vector4f translation = bone_stream.translations.get_raw_sample<rtm::vector4f>(sample_index);
rtm::vector4f normalized_translation = rtm::vector_div(rtm::vector_sub(translation, range_min), range_extent);
// Clamp because the division might be imprecise
normalized_translation = rtm::vector_min(normalized_translation, one);
normalized_translation = rtm::vector_select(is_range_zero_mask, zero, normalized_translation);
ACL_ASSERT(rtm::vector_all_greater_equal3(normalized_translation, zero) && rtm::vector_all_less_equal3(normalized_translation, one), "Invalid normalized translation. 0.0 <= [%f, %f, %f] <= 1.0", (float)rtm::vector_get_x(normalized_translation), (float)rtm::vector_get_y(normalized_translation), (float)rtm::vector_get_z(normalized_translation));
bone_stream.translations.set_raw_sample(sample_index, normalized_translation);
}
}
}
inline void normalize_scale_streams(BoneStreams* bone_streams, const BoneRanges* bone_ranges, uint32_t num_bones)
{
const rtm::vector4f one = rtm::vector_set(1.0F);
const rtm::vector4f zero = rtm::vector_zero();
for (uint32_t bone_index = 0; bone_index < num_bones; ++bone_index)
{
BoneStreams& bone_stream = bone_streams[bone_index];
const BoneRanges& bone_range = bone_ranges[bone_index];
// We expect all our samples to have the same width of sizeof(rtm::vector4f)
ACL_ASSERT(bone_stream.scales.get_sample_size() == sizeof(rtm::vector4f), "Unexpected scale sample size. %u != %zu", bone_stream.scales.get_sample_size(), sizeof(rtm::vector4f));
// Constant or default tracks are not normalized
if (bone_stream.is_scale_constant)
continue;
const uint32_t num_samples = bone_stream.scales.get_num_samples();
const rtm::vector4f range_min = bone_range.scale.get_min();
const rtm::vector4f range_extent = bone_range.scale.get_extent();
const rtm::mask4f is_range_zero_mask = rtm::vector_less_than(range_extent, rtm::vector_set(0.000000001F));
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
// normalized value is between [0.0 .. 1.0]
// value = (normalized value * range extent) + range min
// normalized value = (value - range min) / range extent
const rtm::vector4f scale = bone_stream.scales.get_raw_sample<rtm::vector4f>(sample_index);
rtm::vector4f normalized_scale = rtm::vector_div(rtm::vector_sub(scale, range_min), range_extent);
// Clamp because the division might be imprecise
normalized_scale = rtm::vector_min(normalized_scale, one);
normalized_scale = rtm::vector_select(is_range_zero_mask, zero, normalized_scale);
ACL_ASSERT(rtm::vector_all_greater_equal3(normalized_scale, zero) && rtm::vector_all_less_equal3(normalized_scale, one), "Invalid normalized scale. 0.0 <= [%f, %f, %f] <= 1.0", (float)rtm::vector_get_x(normalized_scale), (float)rtm::vector_get_y(normalized_scale), (float)rtm::vector_get_z(normalized_scale));
bone_stream.scales.set_raw_sample(sample_index, normalized_scale);
}
}
}
inline void normalize_clip_streams(clip_context& context, range_reduction_flags8 range_reduction)
{
ACL_ASSERT(context.num_segments == 1, "context must contain a single segment!");
SegmentContext& segment = context.segments[0];
const bool has_scale = segment_context_has_scale(segment);
if (are_any_enum_flags_set(range_reduction, range_reduction_flags8::rotations))
{
normalize_rotation_streams(segment.bone_streams, context.ranges, segment.num_bones);
context.are_rotations_normalized = true;
}
if (are_any_enum_flags_set(range_reduction, range_reduction_flags8::translations))
{
normalize_translation_streams(segment.bone_streams, context.ranges, segment.num_bones);
context.are_translations_normalized = true;
}
if (has_scale && are_any_enum_flags_set(range_reduction, range_reduction_flags8::scales))
{
normalize_scale_streams(segment.bone_streams, context.ranges, segment.num_bones);
context.are_scales_normalized = true;
}
}
inline void normalize_segment_streams(clip_context& context, range_reduction_flags8 range_reduction)
{
for (SegmentContext& segment : context.segment_iterator())
{
if (are_any_enum_flags_set(range_reduction, range_reduction_flags8::rotations))
{
normalize_rotation_streams(segment.bone_streams, segment.ranges, segment.num_bones);
segment.are_rotations_normalized = true;
}
if (are_any_enum_flags_set(range_reduction, range_reduction_flags8::translations))
{
normalize_translation_streams(segment.bone_streams, segment.ranges, segment.num_bones);
segment.are_translations_normalized = true;
}
const bool has_scale = segment_context_has_scale(segment);
if (has_scale && are_any_enum_flags_set(range_reduction, range_reduction_flags8::scales))
{
normalize_scale_streams(segment.bone_streams, segment.ranges, segment.num_bones);
segment.are_scales_normalized = true;
}
uint32_t range_data_size = 0;
uint32_t range_data_rotation_num = 0;
for (uint32_t bone_index = 0; bone_index < segment.num_bones; ++bone_index)
{
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
if (bone_stream.is_stripped_from_output())
continue;
if (are_any_enum_flags_set(range_reduction, range_reduction_flags8::rotations) && !bone_stream.is_rotation_constant)
{
ACL_ASSERT(bone_stream.rotations.get_rotation_format() != rotation_format8::quatf_full, "Normalization only supported on drop W variants");
range_data_size += k_segment_range_reduction_num_bytes_per_component * 6;
range_data_rotation_num++;
}
if (are_any_enum_flags_set(range_reduction, range_reduction_flags8::translations) && !bone_stream.is_translation_constant)
range_data_size += k_segment_range_reduction_num_bytes_per_component * 6;
if (are_any_enum_flags_set(range_reduction, range_reduction_flags8::scales) && !bone_stream.is_scale_constant)
range_data_size += k_segment_range_reduction_num_bytes_per_component * 6;
}
// The last partial rotation group is padded to 4 elements to keep decompression fast
const uint32_t partial_group_size_rotation = range_data_rotation_num % 4;
if (partial_group_size_rotation != 0)
range_data_size += (4 - partial_group_size_rotation) * k_segment_range_reduction_num_bytes_per_component * 6;
segment.range_data_size = range_data_size;
}
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,102 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/compression/impl/track_list_context.h"
#include <rtm/mask4i.h>
#include <rtm/vector4f.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
inline void normalize_scalarf_track(track& mut_track, const scalarf_range& range)
{
using namespace rtm;
const vector4f one = rtm::vector_set(1.0F);
const vector4f zero = vector_zero();
track_vector4f& typed_track = track_cast<track_vector4f>(mut_track);
const uint32_t num_samples = mut_track.get_num_samples();
const vector4f range_min = range.get_min();
const vector4f range_extent = range.get_extent();
const mask4f is_range_zero_mask = vector_less_than(range_extent, rtm::vector_set(0.000000001F));
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
// normalized value is between [0.0 .. 1.0]
// value = (normalized value * range extent) + range min
// normalized value = (value - range min) / range extent
const vector4f sample = typed_track[sample_index];
vector4f normalized_sample = vector_div(vector_sub(sample, range_min), range_extent);
// Clamp because the division might be imprecise
normalized_sample = vector_min(normalized_sample, one);
normalized_sample = vector_select(is_range_zero_mask, zero, normalized_sample);
ACL_ASSERT(vector_all_greater_equal(normalized_sample, zero) && vector_all_less_equal(normalized_sample, one), "Invalid normalized value. 0.0 <= [%f, %f, %f, %f] <= 1.0", (float)vector_get_x(normalized_sample), (float)vector_get_y(normalized_sample), (float)vector_get_z(normalized_sample), (float)vector_get_w(normalized_sample));
typed_track[sample_index] = normalized_sample;
}
}
inline void normalize_tracks(track_list_context& context)
{
ACL_ASSERT(context.is_valid(), "Invalid context");
for (uint32_t track_index = 0; track_index < context.num_tracks; ++track_index)
{
const bool is_track_constant = context.is_constant(track_index);
if (is_track_constant)
continue; // Constant tracks don't need to be modified
const track_range& range = context.range_list[track_index];
track& mut_track = context.track_list[track_index];
switch (range.category)
{
case track_category8::scalarf:
normalize_scalarf_track(mut_track, range.range.scalarf);
break;
default:
ACL_ASSERT(false, "Invalid track category");
break;
}
}
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,187 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/track_types.h"
#include "acl/core/variable_bit_rates.h"
#include "acl/compression/impl/track_list_context.h"
#include <rtm/mask4i.h>
#include <rtm/vector4f.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
struct quantization_scales
{
rtm::vector4f max_value;
rtm::vector4f inv_max_value;
explicit quantization_scales(uint32_t num_bits)
{
ACL_ASSERT(num_bits > 0, "Cannot decay with 0 bits");
ACL_ASSERT(num_bits < 31, "Attempting to decay on too many bits");
const float max_value_ = rtm::scalar_safe_to_float((1 << num_bits) - 1);
max_value = rtm::vector_set(max_value_);
inv_max_value = rtm::vector_set(1.0F / max_value_);
}
};
// Decays the input value through quantization by packing and unpacking a normalized input value
inline rtm::vector4f RTM_SIMD_CALL decay_vector4_uXX(rtm::vector4f_arg0 value, const quantization_scales& scales)
{
using namespace rtm;
ACL_ASSERT(vector_all_greater_equal(value, vector_zero()) && vector_all_less_equal(value, rtm::vector_set(1.0F)), "Expected normalized unsigned input value: %f, %f, %f, %f", (float)vector_get_x(value), (float)vector_get_y(value), (float)vector_get_z(value), (float)vector_get_w(value));
const vector4f packed_value = vector_round_symmetric(vector_mul(value, scales.max_value));
const vector4f decayed_value = vector_mul(packed_value, scales.inv_max_value);
return decayed_value;
}
// Packs a normalized input value through quantization
inline rtm::vector4f RTM_SIMD_CALL pack_vector4_uXX(rtm::vector4f_arg0 value, const quantization_scales& scales)
{
using namespace rtm;
ACL_ASSERT(vector_all_greater_equal(value, vector_zero()) && vector_all_less_equal(value, rtm::vector_set(1.0F)), "Expected normalized unsigned input value: %f, %f, %f, %f", (float)vector_get_x(value), (float)vector_get_y(value), (float)vector_get_z(value), (float)vector_get_w(value));
return vector_round_symmetric(vector_mul(value, scales.max_value));
}
inline void quantize_scalarf_track(track_list_context& context, uint32_t track_index)
{
using namespace rtm;
const track& ref_track = (*context.reference_list)[track_index];
track_vector4f& mut_track = track_cast<track_vector4f>(context.track_list[track_index]);
const vector4f precision = vector_load1(&mut_track.get_description().precision);
const uint32_t ref_element_size = ref_track.get_sample_size();
const uint32_t num_samples = mut_track.get_num_samples();
const scalarf_range& range = context.range_list[track_index].range.scalarf;
const vector4f range_min = range.get_min();
const vector4f range_extent = range.get_extent();
const vector4f zero = vector_zero();
const mask4f all_true_mask = mask_set(true, true, true, true);
mask4f sample_mask = mask_set(false, false, false, false);
std::memcpy(&sample_mask, &all_true_mask, ref_element_size);
vector4f raw_sample = zero;
uint8_t best_bit_rate = k_highest_bit_rate; // Default to raw if we fail to find something better
// First we look for the best bit rate possible that keeps us within our precision target
for (uint8_t bit_rate = k_highest_bit_rate - 1; bit_rate != 0; --bit_rate) // Skip the raw bit rate and the constant bit rate
{
const uint32_t num_bits_at_bit_rate = get_num_bits_at_bit_rate(bit_rate);
const quantization_scales scales(num_bits_at_bit_rate);
bool is_error_to_high = false;
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
std::memcpy(&raw_sample, ref_track[sample_index], ref_element_size);
const vector4f normalized_sample = mut_track[sample_index];
// Decay our value through quantization
const vector4f decayed_normalized_sample = decay_vector4_uXX(normalized_sample, scales);
// Undo normalization
const vector4f decayed_sample = vector_mul_add(decayed_normalized_sample, range_extent, range_min);
const vector4f delta = vector_abs(vector_sub(raw_sample, decayed_sample));
const vector4f masked_delta = vector_select(sample_mask, delta, zero);
if (!vector_all_less_equal(masked_delta, precision))
{
is_error_to_high = true;
break;
}
}
if (is_error_to_high)
break; // Our error is too high, use the previous bit rate
// We were accurate enough, this is the best bit rate so far
best_bit_rate = bit_rate;
}
context.bit_rate_list[track_index].scalar.value = best_bit_rate;
// Done, update our track with the final result
if (best_bit_rate == k_highest_bit_rate)
{
// We can't quantize this track, keep it raw
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
std::memcpy(&mut_track[sample_index], ref_track[sample_index], ref_element_size);
}
else
{
// Use the selected bit rate to quantize our track
const uint32_t num_bits_at_bit_rate = get_num_bits_at_bit_rate(best_bit_rate);
const quantization_scales scales(num_bits_at_bit_rate);
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
mut_track[sample_index] = pack_vector4_uXX(mut_track[sample_index], scales);
}
}
inline void quantize_tracks(track_list_context& context)
{
ACL_ASSERT(context.is_valid(), "Invalid context");
context.bit_rate_list = allocate_type_array<track_bit_rate>(*context.allocator, context.num_tracks);
for (uint32_t track_index = 0; track_index < context.num_tracks; ++track_index)
{
const bool is_track_constant = context.is_constant(track_index);
if (is_track_constant)
continue; // Constant tracks don't need to be modified
const track_range& range = context.range_list[track_index];
switch (range.category)
{
case track_category8::scalarf:
quantize_scalarf_track(context, track_index);
break;
default:
ACL_ASSERT(false, "Invalid track category");
break;
}
}
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,97 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/iallocator.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/error.h"
#include "acl/core/hash.h"
#include "acl/core/iterator.h"
#include "acl/compression/impl/track_stream.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
struct clip_context;
//////////////////////////////////////////////////////////////////////////
// The sample distribution.
//////////////////////////////////////////////////////////////////////////
enum class SampleDistribution8 : uint8_t
{
// Samples are uniform, use the whole clip to determine the interpolation alpha.
Uniform,
// Samples are not uniform, use each track to determine the interpolation alpha.
Variable,
};
struct SegmentContext
{
clip_context* clip;
BoneStreams* bone_streams;
BoneRanges* ranges;
uint32_t num_samples;
uint32_t num_bones;
uint32_t clip_sample_offset;
uint32_t segment_index;
SampleDistribution8 distribution;
bool are_rotations_normalized;
bool are_translations_normalized;
bool are_scales_normalized;
// Stat tracking
uint32_t animated_pose_rotation_bit_size;
uint32_t animated_pose_translation_bit_size;
uint32_t animated_pose_scale_bit_size;
uint32_t animated_pose_bit_size;
uint32_t animated_data_size;
uint32_t range_data_size;
uint32_t segment_data_size;
uint32_t total_header_size;
//////////////////////////////////////////////////////////////////////////
iterator<BoneStreams> bone_iterator() { return iterator<BoneStreams>(bone_streams, num_bones); }
const_iterator<BoneStreams> const_bone_iterator() const { return const_iterator<BoneStreams>(bone_streams, num_bones); }
};
inline void destroy_segment_context(iallocator& allocator, SegmentContext& segment)
{
deallocate_type_array(allocator, segment.bone_streams, segment.num_bones);
deallocate_type_array(allocator, segment.ranges, segment.num_bones);
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,187 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/iallocator.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/error.h"
#include "acl/compression/compression_settings.h"
#include "acl/compression/impl/clip_context.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
inline void segment_streams(iallocator& allocator, clip_context& clip, const segmenting_settings& settings)
{
ACL_ASSERT(clip.num_segments == 1, "clip_context must have a single segment.");
ACL_ASSERT(settings.ideal_num_samples <= settings.max_num_samples, "Invalid num samples for segmenting settings. %u > %u", settings.ideal_num_samples, settings.max_num_samples);
if (clip.num_samples <= settings.max_num_samples)
return;
//////////////////////////////////////////////////////////////////////////
// This algorithm is simple in nature. Its primary aim is to avoid having
// the last segment being partial if multiple segments are present.
// The extra samples from the last segment will be redistributed evenly
// starting with the first segment.
// As such, in order to quickly find which segment contains a particular sample
// you can simply divide the number of samples by the number of segments to get
// the floored value of number of samples per segment. This guarantees an accurate estimate.
// You can then query the segment start index by dividing the desired sample index
// with the floored value. If the sample isn't in the current segment, it will live in one of its neighbors.
// TODO: Can we provide a tighter guarantee?
//////////////////////////////////////////////////////////////////////////
uint32_t num_segments = (clip.num_samples + settings.ideal_num_samples - 1) / settings.ideal_num_samples;
const uint32_t max_num_samples = num_segments * settings.ideal_num_samples;
const uint32_t original_num_segments = num_segments;
uint32_t* num_samples_per_segment = allocate_type_array<uint32_t>(allocator, num_segments);
std::fill(num_samples_per_segment, num_samples_per_segment + num_segments, settings.ideal_num_samples);
const uint32_t num_leftover_samples = settings.ideal_num_samples - (max_num_samples - clip.num_samples);
if (num_leftover_samples != 0)
num_samples_per_segment[num_segments - 1] = num_leftover_samples;
const uint32_t slack = settings.max_num_samples - settings.ideal_num_samples;
if ((num_segments - 1) * slack >= num_leftover_samples)
{
// Enough segments to distribute the leftover samples of the last segment
while (num_samples_per_segment[num_segments - 1] != 0)
{
for (uint32_t segment_index = 0; segment_index < num_segments - 1 && num_samples_per_segment[num_segments - 1] != 0; ++segment_index)
{
num_samples_per_segment[segment_index]++;
num_samples_per_segment[num_segments - 1]--;
}
}
num_segments--;
}
ACL_ASSERT(num_segments != 1, "Expected a number of segments greater than 1.");
SegmentContext* clip_segment = clip.segments;
clip.segments = allocate_type_array<SegmentContext>(allocator, num_segments);
clip.num_segments = num_segments;
uint32_t clip_sample_index = 0;
for (uint32_t segment_index = 0; segment_index < num_segments; ++segment_index)
{
const uint32_t num_samples_in_segment = num_samples_per_segment[segment_index];
SegmentContext& segment = clip.segments[segment_index];
segment.clip = &clip;
segment.bone_streams = allocate_type_array<BoneStreams>(allocator, clip.num_bones);
segment.ranges = nullptr;
segment.num_bones = clip.num_bones;
segment.num_samples = num_samples_in_segment;
segment.clip_sample_offset = clip_sample_index;
segment.segment_index = segment_index;
segment.distribution = SampleDistribution8::Uniform;
segment.are_rotations_normalized = false;
segment.are_translations_normalized = false;
segment.are_scales_normalized = false;
segment.animated_pose_rotation_bit_size = 0;
segment.animated_pose_translation_bit_size = 0;
segment.animated_pose_scale_bit_size = 0;
segment.animated_pose_bit_size = 0;
segment.animated_data_size = 0;
segment.range_data_size = 0;
segment.total_header_size = 0;
for (uint32_t bone_index = 0; bone_index < clip.num_bones; ++bone_index)
{
const BoneStreams& clip_bone_stream = clip_segment->bone_streams[bone_index];
BoneStreams& segment_bone_stream = segment.bone_streams[bone_index];
segment_bone_stream.segment = &segment;
segment_bone_stream.bone_index = bone_index;
segment_bone_stream.parent_bone_index = clip_bone_stream.parent_bone_index;
segment_bone_stream.output_index = clip_bone_stream.output_index;
if (clip_bone_stream.is_rotation_constant)
{
segment_bone_stream.rotations = clip_bone_stream.rotations.duplicate();
}
else
{
const uint32_t sample_size = clip_bone_stream.rotations.get_sample_size();
RotationTrackStream rotations(allocator, num_samples_in_segment, sample_size, clip_bone_stream.rotations.get_sample_rate(), clip_bone_stream.rotations.get_rotation_format(), clip_bone_stream.rotations.get_bit_rate());
std::memcpy(rotations.get_raw_sample_ptr(0), clip_bone_stream.rotations.get_raw_sample_ptr(clip_sample_index), size_t(num_samples_in_segment) * sample_size);
segment_bone_stream.rotations = std::move(rotations);
}
if (clip_bone_stream.is_translation_constant)
{
segment_bone_stream.translations = clip_bone_stream.translations.duplicate();
}
else
{
const uint32_t sample_size = clip_bone_stream.translations.get_sample_size();
TranslationTrackStream translations(allocator, num_samples_in_segment, sample_size, clip_bone_stream.translations.get_sample_rate(), clip_bone_stream.translations.get_vector_format(), clip_bone_stream.translations.get_bit_rate());
std::memcpy(translations.get_raw_sample_ptr(0), clip_bone_stream.translations.get_raw_sample_ptr(clip_sample_index), size_t(num_samples_in_segment) * sample_size);
segment_bone_stream.translations = std::move(translations);
}
if (clip_bone_stream.is_scale_constant)
{
segment_bone_stream.scales = clip_bone_stream.scales.duplicate();
}
else
{
const uint32_t sample_size = clip_bone_stream.scales.get_sample_size();
ScaleTrackStream scales(allocator, num_samples_in_segment, sample_size, clip_bone_stream.scales.get_sample_rate(), clip_bone_stream.scales.get_vector_format(), clip_bone_stream.scales.get_bit_rate());
std::memcpy(scales.get_raw_sample_ptr(0), clip_bone_stream.scales.get_raw_sample_ptr(clip_sample_index), size_t(num_samples_in_segment) * sample_size);
segment_bone_stream.scales = std::move(scales);
}
segment_bone_stream.is_rotation_constant = clip_bone_stream.is_rotation_constant;
segment_bone_stream.is_rotation_default = clip_bone_stream.is_rotation_default;
segment_bone_stream.is_translation_constant = clip_bone_stream.is_translation_constant;
segment_bone_stream.is_translation_default = clip_bone_stream.is_translation_default;
segment_bone_stream.is_scale_constant = clip_bone_stream.is_scale_constant;
segment_bone_stream.is_scale_default = clip_bone_stream.is_scale_default;
}
clip_sample_index += num_samples_in_segment;
}
deallocate_type_array(allocator, num_samples_per_segment, original_num_segments);
destroy_segment_context(allocator, *clip_segment);
deallocate_type_array(allocator, clip_segment, 1);
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,353 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2020 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
// Included only once from track.h
#include "acl/core/iallocator.h"
#include "acl/core/string.h"
#include "acl/core/track_desc.h"
#include "acl/core/track_traits.h"
#include "acl/core/track_types.h"
#include <cstdint>
namespace acl
{
inline track::track() noexcept
: m_allocator(nullptr)
, m_data(nullptr)
, m_num_samples(0)
, m_stride(0)
, m_data_size(0)
, m_sample_rate(0.0F)
, m_type(track_type8::float1f)
, m_category(track_category8::scalarf)
, m_sample_size(0)
, m_desc()
, m_name()
{}
inline track::track(track&& other) noexcept
: m_allocator(other.m_allocator)
, m_data(other.m_data)
, m_num_samples(other.m_num_samples)
, m_stride(other.m_stride)
, m_data_size(other.m_data_size)
, m_sample_rate(other.m_sample_rate)
, m_type(other.m_type)
, m_category(other.m_category)
, m_sample_size(other.m_sample_size)
, m_desc(other.m_desc)
, m_name(std::move(other.m_name))
{
other.m_allocator = nullptr;
other.m_data = nullptr;
}
inline track::~track()
{
if (is_owner())
{
// We own the memory, free it
m_allocator->deallocate(m_data, m_data_size);
}
}
inline track& track::operator=(track&& other) noexcept
{
std::swap(m_allocator, other.m_allocator);
std::swap(m_data, other.m_data);
std::swap(m_num_samples, other.m_num_samples);
std::swap(m_stride, other.m_stride);
std::swap(m_data_size, other.m_data_size);
std::swap(m_sample_rate, other.m_sample_rate);
std::swap(m_type, other.m_type);
std::swap(m_category, other.m_category);
std::swap(m_sample_size, other.m_sample_size);
std::swap(m_desc, other.m_desc);
std::swap(m_name, other.m_name);
return *this;
}
inline void* track::operator[](uint32_t index)
{
ACL_ASSERT(index < m_num_samples, "Invalid sample index. %u >= %u", index, m_num_samples);
return m_data + (index * m_stride);
}
inline const void* track::operator[](uint32_t index) const
{
ACL_ASSERT(index < m_num_samples, "Invalid sample index. %u >= %u", index, m_num_samples);
return m_data + (index * m_stride);
}
inline uint32_t track::get_output_index() const
{
switch (m_category)
{
default:
case track_category8::scalarf: return m_desc.scalar.output_index;
case track_category8::transformf: return m_desc.transform.output_index;
}
}
template<>
inline track_desc_scalarf& track::get_description()
{
ACL_ASSERT(track_desc_scalarf::category == m_category, "Unexpected track category");
return m_desc.scalar;
}
template<>
inline track_desc_transformf& track::get_description()
{
ACL_ASSERT(track_desc_transformf::category == m_category, "Unexpected track category");
return m_desc.transform;
}
template<>
inline const track_desc_scalarf& track::get_description() const
{
ACL_ASSERT(track_desc_scalarf::category == m_category, "Unexpected track category");
return m_desc.scalar;
}
template<>
inline const track_desc_transformf& track::get_description() const
{
ACL_ASSERT(track_desc_transformf::category == m_category, "Unexpected track category");
return m_desc.transform;
}
inline track track::get_copy(iallocator& allocator) const
{
track track_;
get_copy_impl(allocator, track_);
return track_;
}
inline track track::get_ref() const
{
track track_;
get_ref_impl(track_);
return track_;
}
inline error_result track::is_valid() const
{
if (m_data == nullptr)
return error_result();
if (m_num_samples == 0xFFFFFFFFU)
return error_result("Too many samples");
if (m_sample_rate <= 0.0F || !rtm::scalar_is_finite(m_sample_rate))
return error_result("Invalid sample rate");
switch (m_category)
{
case track_category8::scalarf: return m_desc.scalar.is_valid();
case track_category8::transformf: return m_desc.transform.is_valid();
default: return error_result("Invalid category");
}
}
inline track::track(track_type8 type, track_category8 category) noexcept
: m_allocator(nullptr)
, m_data(nullptr)
, m_num_samples(0)
, m_stride(0)
, m_data_size(0)
, m_sample_rate(0.0F)
, m_type(type)
, m_category(category)
, m_sample_size(0)
, m_desc()
, m_name()
{}
inline track::track(iallocator* allocator, uint8_t* data, uint32_t num_samples, uint32_t stride, size_t data_size, float sample_rate, track_type8 type, track_category8 category, uint8_t sample_size) noexcept
: m_allocator(allocator)
, m_data(data)
, m_num_samples(num_samples)
, m_stride(stride)
, m_data_size(data_size)
, m_sample_rate(sample_rate)
, m_type(type)
, m_category(category)
, m_sample_size(sample_size)
, m_desc()
, m_name()
{}
inline void track::get_copy_impl(iallocator& allocator, track& out_track) const
{
out_track.m_allocator = &allocator;
out_track.m_data = reinterpret_cast<uint8_t*>(allocator.allocate(m_data_size));
out_track.m_num_samples = m_num_samples;
out_track.m_stride = m_stride;
out_track.m_data_size = m_data_size;
out_track.m_sample_rate = m_sample_rate;
out_track.m_type = m_type;
out_track.m_category = m_category;
out_track.m_sample_size = m_sample_size;
out_track.m_desc = m_desc;
out_track.m_name = m_name.get_copy(allocator);
std::memcpy(out_track.m_data, m_data, m_data_size);
}
inline void track::get_ref_impl(track& out_track) const
{
out_track.m_allocator = nullptr;
out_track.m_data = m_data;
out_track.m_num_samples = m_num_samples;
out_track.m_stride = m_stride;
out_track.m_data_size = m_data_size;
out_track.m_sample_rate = m_sample_rate;
out_track.m_type = m_type;
out_track.m_category = m_category;
out_track.m_sample_size = m_sample_size;
out_track.m_desc = m_desc;
out_track.m_name = m_name.get_copy();
}
template<track_type8 track_type_>
inline typename track_typed<track_type_>::sample_type& track_typed<track_type_>::operator[](uint32_t index)
{
ACL_ASSERT(index < m_num_samples, "Invalid sample index. %u >= %u", index, m_num_samples);
return *reinterpret_cast<sample_type*>(m_data + (index * m_stride));
}
template<track_type8 track_type_>
inline const typename track_typed<track_type_>::sample_type& track_typed<track_type_>::operator[](uint32_t index) const
{
ACL_ASSERT(index < m_num_samples, "Invalid sample index. %u >= %u", index, m_num_samples);
return *reinterpret_cast<const sample_type*>(m_data + (index * m_stride));
}
template<track_type8 track_type_>
inline typename track_typed<track_type_>::desc_type& track_typed<track_type_>::get_description()
{
return track::get_description<desc_type>();
}
template<track_type8 track_type_>
inline const typename track_typed<track_type_>::desc_type& track_typed<track_type_>::get_description() const
{
return track::get_description<desc_type>();
}
template<track_type8 track_type_>
inline track_typed<track_type_> track_typed<track_type_>::get_copy(iallocator& allocator) const
{
track_typed track_;
track::get_copy_impl(allocator, track_);
return track_;
}
template<track_type8 track_type_>
inline track_typed<track_type_> track_typed<track_type_>::get_ref() const
{
track_typed track_;
track::get_ref_impl(track_);
return track_;
}
template<track_type8 track_type_>
inline track_typed<track_type_> track_typed<track_type_>::make_copy(const typename track_typed<track_type_>::desc_type& desc, iallocator& allocator, const sample_type* data, uint32_t num_samples, float sample_rate, uint32_t stride)
{
const size_t data_size = size_t(num_samples) * sizeof(sample_type);
const uint8_t* data_raw = reinterpret_cast<const uint8_t*>(data);
// Copy the data manually to avoid preserving the stride
sample_type* data_copy = reinterpret_cast<sample_type*>(allocator.allocate(data_size));
for (uint32_t index = 0; index < num_samples; ++index)
data_copy[index] = *reinterpret_cast<const sample_type*>(data_raw + (index * stride));
return track_typed<track_type_>(&allocator, reinterpret_cast<uint8_t*>(data_copy), num_samples, sizeof(sample_type), data_size, sample_rate, desc);
}
template<track_type8 track_type_>
inline track_typed<track_type_> track_typed<track_type_>::make_reserve(const typename track_typed<track_type_>::desc_type& desc, iallocator& allocator, uint32_t num_samples, float sample_rate)
{
const size_t data_size = size_t(num_samples) * sizeof(sample_type);
return track_typed<track_type_>(&allocator, reinterpret_cast<uint8_t*>(allocator.allocate(data_size)), num_samples, sizeof(sample_type), data_size, sample_rate, desc);
}
template<track_type8 track_type_>
inline track_typed<track_type_> track_typed<track_type_>::make_owner(const typename track_typed<track_type_>::desc_type& desc, iallocator& allocator, sample_type* data, uint32_t num_samples, float sample_rate, uint32_t stride)
{
const size_t data_size = size_t(num_samples) * stride;
return track_typed<track_type_>(&allocator, reinterpret_cast<uint8_t*>(data), num_samples, stride, data_size, sample_rate, desc);
}
template<track_type8 track_type_>
inline track_typed<track_type_> track_typed<track_type_>::make_ref(const typename track_typed<track_type_>::desc_type& desc, sample_type* data, uint32_t num_samples, float sample_rate, uint32_t stride)
{
const size_t data_size = size_t(num_samples) * stride;
return track_typed<track_type_>(nullptr, reinterpret_cast<uint8_t*>(data), num_samples, stride, data_size, sample_rate, desc);
}
template<track_type8 track_type_>
inline track_typed<track_type_>::track_typed(iallocator* allocator, uint8_t* data, uint32_t num_samples, uint32_t stride, size_t data_size, float sample_rate, const typename track_typed<track_type_>::desc_type& desc) noexcept
: track(allocator, data, num_samples, stride, data_size, sample_rate, type, category, sizeof(sample_type))
{
m_desc = track_desc_untyped(desc);
}
template<typename track_type>
inline track_type& track_cast(track& track_)
{
ACL_ASSERT(track_type::type == track_.get_type() || track_.is_empty(), "Unexpected track type");
return static_cast<track_type&>(track_);
}
template<typename track_type>
inline const track_type& track_cast(const track& track_)
{
ACL_ASSERT(track_type::type == track_.get_type() || track_.is_empty(), "Unexpected track type");
return static_cast<const track_type&>(track_);
}
template<typename track_type>
inline track_type* track_cast(track* track_)
{
if (track_ == nullptr || (track_type::type != track_->get_type() && !track_->is_empty()))
return nullptr;
return static_cast<track_type*>(track_);
}
template<typename track_type>
inline const track_type* track_cast(const track* track_)
{
if (track_ == nullptr || (track_type::type != track_->get_type() && !track_->is_empty()))
return nullptr;
return static_cast<const track_type*>(track_);
}
}

View File

@ -0,0 +1,416 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2020 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
// Included only once from track_array.h
#include "acl/core/error_result.h"
#include "acl/core/iallocator.h"
#include "acl/core/interpolation_utils.h"
#include "acl/core/track_types.h"
#include "acl/core/track_writer.h"
#include <cstdint>
namespace acl
{
inline track_array::track_array() noexcept
: m_allocator(nullptr)
, m_tracks(nullptr)
, m_num_tracks(0)
, m_name()
{}
inline track_array::track_array(iallocator& allocator, uint32_t num_tracks)
: m_allocator(&allocator)
, m_tracks(allocate_type_array<track>(allocator, num_tracks))
, m_num_tracks(num_tracks)
, m_name()
{}
inline track_array::track_array(track_array&& other) noexcept
: m_allocator(other.m_allocator)
, m_tracks(other.m_tracks)
, m_num_tracks(other.m_num_tracks)
, m_name(std::move(other.m_name))
{
other.m_allocator = nullptr; // Make sure we don't free our data since we no longer own it
}
inline track_array::~track_array()
{
if (m_allocator != nullptr)
deallocate_type_array(*m_allocator, m_tracks, m_num_tracks);
}
inline track_array& track_array::operator=(track_array&& other) noexcept
{
std::swap(m_allocator, other.m_allocator);
std::swap(m_tracks, other.m_tracks);
std::swap(m_num_tracks, other.m_num_tracks);
std::swap(m_name, other.m_name);
return *this;
}
inline track& track_array::operator[](uint32_t index)
{
ACL_ASSERT(index < m_num_tracks, "Invalid track index. %u >= %u", index, m_num_tracks);
return m_tracks[index];
}
inline const track& track_array::operator[](uint32_t index) const
{
ACL_ASSERT(index < m_num_tracks, "Invalid track index. %u >= %u", index, m_num_tracks);
return m_tracks[index];
}
inline error_result track_array::is_valid() const
{
const track_type8 type = get_track_type();
const uint32_t num_samples = get_num_samples_per_track();
const float sample_rate = get_sample_rate();
for (uint32_t track_index = 0; track_index < m_num_tracks; ++track_index)
{
const track& track_ = m_tracks[track_index];
if (track_.get_type() != type)
return error_result("Tracks must all have the same type within an array");
if (track_.get_num_samples() != num_samples)
return error_result("Track array requires the same number of samples in every track");
if (track_.get_sample_rate() != sample_rate)
return error_result("Track array requires the same sample rate in every track");
const error_result result = track_.is_valid();
if (result.any())
return result;
if (track_.get_category() == track_category8::transformf)
{
const track_desc_transformf& desc = track_.get_description<track_desc_transformf>();
if (desc.parent_index != k_invalid_track_index && desc.parent_index >= m_num_tracks)
return error_result("Invalid parent_index. It must be 'k_invalid_track_index' or a valid track index");
}
}
// Validate output indices
uint32_t num_outputs = 0;
for (uint32_t track_index = 0; track_index < m_num_tracks; ++track_index)
{
const track& track_ = m_tracks[track_index];
const uint32_t output_index = track_.get_output_index();
if (output_index != k_invalid_track_index && output_index >= m_num_tracks)
return error_result("The output_index must be 'k_invalid_track_index' or less than the number of bones");
if (output_index != k_invalid_track_index)
{
for (uint32_t track_index2 = track_index + 1; track_index2 < m_num_tracks; ++track_index2)
{
const track& track2_ = m_tracks[track_index2];
const uint32_t output_index2 = track2_.get_output_index();
if (output_index == output_index2)
return error_result("Duplicate output_index found");
}
num_outputs++;
}
}
for (uint32_t output_index = 0; output_index < num_outputs; ++output_index)
{
bool found = false;
for (uint32_t track_index = 0; track_index < m_num_tracks; ++track_index)
{
const track& track_ = m_tracks[track_index];
const uint32_t output_index_ = track_.get_output_index();
if (output_index == output_index_)
{
found = true;
break;
}
}
if (!found)
return error_result("Output indices are not contiguous");
}
return error_result();
}
template<class track_writer_type>
inline void track_array::sample_tracks(float sample_time, sample_rounding_policy rounding_policy, track_writer_type& writer) const
{
static_assert(std::is_base_of<track_writer, track_writer_type>::value, "track_writer_type must derive from track_writer");
ACL_ASSERT(is_valid().empty(), "Invalid track array");
const uint32_t num_samples = get_num_samples_per_track();
const float sample_rate = get_sample_rate();
const track_type8 track_type = get_track_type();
// Clamp for safety, the caller should normally handle this but in practice, it often isn't the case
const float duration = get_duration();
sample_time = rtm::scalar_clamp(sample_time, 0.0F, duration);
uint32_t key_frame0;
uint32_t key_frame1;
float interpolation_alpha;
find_linear_interpolation_samples_with_sample_rate(num_samples, sample_rate, sample_time, rounding_policy, key_frame0, key_frame1, interpolation_alpha);
switch (track_type)
{
case track_type8::float1f:
for (uint32_t track_index = 0; track_index < m_num_tracks; ++track_index)
{
const track_float1f& track__ = track_cast<track_float1f>(m_tracks[track_index]);
const rtm::scalarf value0 = rtm::scalar_load(&track__[key_frame0]);
const rtm::scalarf value1 = rtm::scalar_load(&track__[key_frame1]);
const rtm::scalarf value = rtm::scalar_lerp(value0, value1, rtm::scalar_set(interpolation_alpha));
writer.write_float1(track_index, value);
}
break;
case track_type8::float2f:
for (uint32_t track_index = 0; track_index < m_num_tracks; ++track_index)
{
const track_float2f& track__ = track_cast<track_float2f>(m_tracks[track_index]);
const rtm::vector4f value0 = rtm::vector_load2(&track__[key_frame0]);
const rtm::vector4f value1 = rtm::vector_load2(&track__[key_frame1]);
const rtm::vector4f value = rtm::vector_lerp(value0, value1, interpolation_alpha);
writer.write_float2(track_index, value);
}
break;
case track_type8::float3f:
for (uint32_t track_index = 0; track_index < m_num_tracks; ++track_index)
{
const track_float3f& track__ = track_cast<track_float3f>(m_tracks[track_index]);
const rtm::vector4f value0 = rtm::vector_load3(&track__[key_frame0]);
const rtm::vector4f value1 = rtm::vector_load3(&track__[key_frame1]);
const rtm::vector4f value = rtm::vector_lerp(value0, value1, interpolation_alpha);
writer.write_float3(track_index, value);
}
break;
case track_type8::float4f:
for (uint32_t track_index = 0; track_index < m_num_tracks; ++track_index)
{
const track_float4f& track__ = track_cast<track_float4f>(m_tracks[track_index]);
const rtm::vector4f value0 = rtm::vector_load(&track__[key_frame0]);
const rtm::vector4f value1 = rtm::vector_load(&track__[key_frame1]);
const rtm::vector4f value = rtm::vector_lerp(value0, value1, interpolation_alpha);
writer.write_float4(track_index, value);
}
break;
case track_type8::vector4f:
for (uint32_t track_index = 0; track_index < m_num_tracks; ++track_index)
{
const track_vector4f& track__ = track_cast<track_vector4f>(m_tracks[track_index]);
const rtm::vector4f value0 = track__[key_frame0];
const rtm::vector4f value1 = track__[key_frame1];
const rtm::vector4f value = rtm::vector_lerp(value0, value1, interpolation_alpha);
writer.write_vector4(track_index, value);
}
break;
case track_type8::qvvf:
for (uint32_t track_index = 0; track_index < m_num_tracks; ++track_index)
{
const track_qvvf& track__ = track_cast<track_qvvf>(m_tracks[track_index]);
const rtm::qvvf& value0 = track__[key_frame0];
const rtm::qvvf& value1 = track__[key_frame1];
const rtm::quatf rotation = rtm::quat_lerp(value0.rotation, value1.rotation, interpolation_alpha);
const rtm::vector4f translation = rtm::vector_lerp(value0.translation, value1.translation, interpolation_alpha);
const rtm::vector4f scale = rtm::vector_lerp(value0.scale, value1.scale, interpolation_alpha);
writer.write_rotation(track_index, rotation);
writer.write_translation(track_index, translation);
writer.write_scale(track_index, scale);
}
break;
default:
ACL_ASSERT(false, "Invalid track type");
break;
}
}
template<class track_writer_type>
inline void track_array::sample_track(uint32_t track_index, float sample_time, sample_rounding_policy rounding_policy, track_writer_type& writer) const
{
static_assert(std::is_base_of<track_writer, track_writer_type>::value, "track_writer_type must derive from track_writer");
ACL_ASSERT(is_valid().empty(), "Invalid track array");
ACL_ASSERT(track_index < m_num_tracks, "Invalid track index");
const track& track_ = m_tracks[track_index];
const uint32_t num_samples = track_.get_num_samples();
const float sample_rate = track_.get_sample_rate();
// Clamp for safety, the caller should normally handle this but in practice, it often isn't the case
const float duration = calculate_duration(num_samples, sample_rate);
sample_time = rtm::scalar_clamp(sample_time, 0.0F, duration);
uint32_t key_frame0;
uint32_t key_frame1;
float interpolation_alpha;
find_linear_interpolation_samples_with_sample_rate(num_samples, sample_rate, sample_time, rounding_policy, key_frame0, key_frame1, interpolation_alpha);
switch (track_.get_type())
{
case track_type8::float1f:
{
const track_float1f& track__ = track_cast<track_float1f>(track_);
const rtm::scalarf value0 = rtm::scalar_load(&track__[key_frame0]);
const rtm::scalarf value1 = rtm::scalar_load(&track__[key_frame1]);
const rtm::scalarf value = rtm::scalar_lerp(value0, value1, rtm::scalar_set(interpolation_alpha));
writer.write_float1(track_index, value);
break;
}
case track_type8::float2f:
{
const track_float2f& track__ = track_cast<track_float2f>(track_);
const rtm::vector4f value0 = rtm::vector_load2(&track__[key_frame0]);
const rtm::vector4f value1 = rtm::vector_load2(&track__[key_frame1]);
const rtm::vector4f value = rtm::vector_lerp(value0, value1, interpolation_alpha);
writer.write_float2(track_index, value);
break;
}
case track_type8::float3f:
{
const track_float3f& track__ = track_cast<track_float3f>(track_);
const rtm::vector4f value0 = rtm::vector_load3(&track__[key_frame0]);
const rtm::vector4f value1 = rtm::vector_load3(&track__[key_frame1]);
const rtm::vector4f value = rtm::vector_lerp(value0, value1, interpolation_alpha);
writer.write_float3(track_index, value);
break;
}
case track_type8::float4f:
{
const track_float4f& track__ = track_cast<track_float4f>(track_);
const rtm::vector4f value0 = rtm::vector_load(&track__[key_frame0]);
const rtm::vector4f value1 = rtm::vector_load(&track__[key_frame1]);
const rtm::vector4f value = rtm::vector_lerp(value0, value1, interpolation_alpha);
writer.write_float4(track_index, value);
break;
}
case track_type8::vector4f:
{
const track_vector4f& track__ = track_cast<track_vector4f>(track_);
const rtm::vector4f value0 = track__[key_frame0];
const rtm::vector4f value1 = track__[key_frame1];
const rtm::vector4f value = rtm::vector_lerp(value0, value1, interpolation_alpha);
writer.write_vector4(track_index, value);
break;
}
case track_type8::qvvf:
{
const track_qvvf& track__ = track_cast<track_qvvf>(track_);
const rtm::qvvf& value0 = track__[key_frame0];
const rtm::qvvf& value1 = track__[key_frame1];
const rtm::quatf rotation = rtm::quat_lerp(value0.rotation, value1.rotation, interpolation_alpha);
const rtm::vector4f translation = rtm::vector_lerp(value0.translation, value1.translation, interpolation_alpha);
const rtm::vector4f scale = rtm::vector_lerp(value0.scale, value1.scale, interpolation_alpha);
writer.write_rotation(track_index, rotation);
writer.write_translation(track_index, translation);
writer.write_scale(track_index, scale);
break;
}
default:
ACL_ASSERT(false, "Invalid track type");
break;
}
}
inline uint32_t track_array::get_raw_size() const
{
const uint32_t num_samples = get_num_samples_per_track();
const track_type8 track_type = get_track_type();
uint32_t total_size = 0;
for (uint32_t track_index = 0; track_index < m_num_tracks; ++track_index)
{
const track& track_ = m_tracks[track_index];
if (track_type == track_type8::qvvf)
total_size += num_samples * 10 * sizeof(float); // 4 rotation floats, 3 translation floats, 3 scale floats
else
total_size += num_samples * track_.get_sample_size();
}
return total_size;
}
template<track_type8 track_type_>
inline typename track_array_typed<track_type_>::track_member_type& track_array_typed<track_type_>::operator[](uint32_t index)
{
ACL_ASSERT(index < m_num_tracks, "Invalid track index. %u >= %u", index, m_num_tracks);
return track_cast<track_member_type>(m_tracks[index]);
}
template<track_type8 track_type_>
inline const typename track_array_typed<track_type_>::track_member_type& track_array_typed<track_type_>::operator[](uint32_t index) const
{
ACL_ASSERT(index < m_num_tracks, "Invalid track index. %u >= %u", index, m_num_tracks);
return track_cast<track_member_type>(m_tracks[index]);
}
template<typename track_array_type>
inline track_array_type& track_array_cast(track_array& track_array_)
{
ACL_ASSERT(track_array_type::type == track_array_.get_track_type() || track_array_.is_empty(), "Unexpected track type");
return static_cast<track_array_type&>(track_array_);
}
template<typename track_array_type>
inline const track_array_type& track_array_cast(const track_array& track_array_)
{
ACL_ASSERT(track_array_type::type == track_array_.get_track_type() || track_array_.is_empty(), "Unexpected track type");
return static_cast<const track_array_type&>(track_array_);
}
template<typename track_array_type>
inline track_array_type* track_array_cast(track_array* track_array_)
{
if (track_array_ == nullptr || (track_array_type::type != track_array_->get_track_type() && !track_array_->is_empty()))
return nullptr;
return static_cast<track_array_type*>(track_array_);
}
template<typename track_array_type>
inline const track_array_type* track_array_cast(const track_array* track_array_)
{
if (track_array_ == nullptr || (track_array_type::type != track_array_->get_track_type() && !track_array_->is_empty()))
return nullptr;
return static_cast<const track_array_type*>(track_array_);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,649 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2020 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
// Included only once from track_error.h
#include "acl/core/compressed_tracks.h"
#include "acl/core/error.h"
#include "acl/core/error_result.h"
#include "acl/core/iallocator.h"
#include "acl/core/impl/debug_track_writer.h"
#include "acl/compression/track_array.h"
#include "acl/compression/transform_error_metrics.h"
#include "acl/compression/impl/track_list_context.h"
#include "acl/decompression/decompress.h"
#include <rtm/scalarf.h>
#include <rtm/vector4f.h>
#include <cstdint>
#include <functional>
namespace acl
{
namespace acl_impl
{
inline rtm::vector4f RTM_SIMD_CALL get_scalar_track_error(track_type8 track_type, uint32_t raw_track_index, uint32_t lossy_track_index, const debug_track_writer& raw_tracks_writer, const debug_track_writer& lossy_tracks_writer)
{
rtm::vector4f error;
switch (track_type)
{
case track_type8::float1f:
{
const float raw_value = raw_tracks_writer.read_float1(raw_track_index);
const float lossy_value = lossy_tracks_writer.read_float1(lossy_track_index);
error = rtm::vector_set(rtm::scalar_abs(raw_value - lossy_value));
break;
}
case track_type8::float2f:
{
const rtm::vector4f raw_value = raw_tracks_writer.read_float2(raw_track_index);
const rtm::vector4f lossy_value = lossy_tracks_writer.read_float2(lossy_track_index);
error = rtm::vector_abs(rtm::vector_sub(raw_value, lossy_value));
error = rtm::vector_mix<rtm::mix4::x, rtm::mix4::y, rtm::mix4::c, rtm::mix4::d>(error, rtm::vector_zero());
break;
}
case track_type8::float3f:
{
const rtm::vector4f raw_value = raw_tracks_writer.read_float3(raw_track_index);
const rtm::vector4f lossy_value = lossy_tracks_writer.read_float3(lossy_track_index);
error = rtm::vector_abs(rtm::vector_sub(raw_value, lossy_value));
error = rtm::vector_mix<rtm::mix4::x, rtm::mix4::y, rtm::mix4::z, rtm::mix4::d>(error, rtm::vector_zero());
break;
}
case track_type8::float4f:
{
const rtm::vector4f raw_value = raw_tracks_writer.read_float4(raw_track_index);
const rtm::vector4f lossy_value = lossy_tracks_writer.read_float4(lossy_track_index);
error = rtm::vector_abs(rtm::vector_sub(raw_value, lossy_value));
break;
}
case track_type8::vector4f:
{
const rtm::vector4f raw_value = raw_tracks_writer.read_vector4(raw_track_index);
const rtm::vector4f lossy_value = lossy_tracks_writer.read_vector4(lossy_track_index);
error = rtm::vector_abs(rtm::vector_sub(raw_value, lossy_value));
break;
}
default:
ACL_ASSERT(false, "Unsupported track type");
error = rtm::vector_zero();
break;
}
return error;
}
struct calculate_track_error_args
{
// Scalar and transforms
uint32_t num_samples = 0;
uint32_t num_tracks = 0;
float duration = 0.0F;
float sample_rate = 0.0F;
track_type8 track_type = track_type8::float1f;
std::function<void(float sample_time, sample_rounding_policy rounding_policy, debug_track_writer& track_writer)> sample_tracks0;
std::function<void(float sample_time, sample_rounding_policy rounding_policy, debug_track_writer& track_writer)> sample_tracks1;
// Transforms only
const itransform_error_metric* error_metric = nullptr;
std::function<uint32_t(uint32_t track_index)> get_parent_index;
std::function<float(uint32_t track_index)> get_shell_distance;
// Optional
uint32_t base_num_samples = 0;
float base_duration = 0.0F;
std::function<void(float sample_time, sample_rounding_policy rounding_policy, debug_track_writer& track_writer)> sample_tracks_base;
std::function<uint32_t(uint32_t track_index)> get_output_index;
std::function<void(debug_track_writer& track_writer0, debug_track_writer& track_writer1, debug_track_writer& track_writer_remapped)> remap_output;
};
inline track_error calculate_scalar_track_error(iallocator& allocator, const calculate_track_error_args& args)
{
const uint32_t num_samples = args.num_samples;
if (args.num_samples == 0)
return track_error(); // Cannot measure any error
const uint32_t num_tracks = args.num_tracks;
if (args.num_tracks == 0)
return track_error(); // Cannot measure any error
track_error result;
result.error = -1.0F; // Can never have a negative error, use -1 so the first sample is used
const float duration = args.duration;
const float sample_rate = args.sample_rate;
const track_type8 track_type = args.track_type;
// We use the nearest sample to accurately measure the loss that happened, if any
const sample_rounding_policy rounding_policy = sample_rounding_policy::nearest;
debug_track_writer tracks_writer0(allocator, track_type, num_tracks);
debug_track_writer tracks_writer1(allocator, track_type, num_tracks);
// Measure our error
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
const float sample_time = rtm::scalar_min(float(sample_index) / sample_rate, duration);
args.sample_tracks0(sample_time, rounding_policy, tracks_writer0);
args.sample_tracks1(sample_time, rounding_policy, tracks_writer1);
// Validate decompress_tracks
for (uint32_t track_index = 0; track_index < num_tracks; ++track_index)
{
const uint32_t output_index = args.get_output_index ? args.get_output_index(track_index) : track_index;
if (output_index == k_invalid_track_index)
continue; // Track is being stripped, ignore it
const rtm::vector4f error = get_scalar_track_error(track_type, track_index, output_index, tracks_writer0, tracks_writer1);
const float max_error = rtm::vector_get_max_component(error);
if (max_error > result.error)
{
result.error = max_error;
result.index = track_index;
result.sample_time = sample_time;
}
}
}
return result;
}
inline track_error calculate_transform_track_error(iallocator& allocator, const calculate_track_error_args& args)
{
ACL_ASSERT(args.error_metric != nullptr, "Must have an error metric");
ACL_ASSERT(args.get_parent_index, "Must be able to query the parent track index");
ACL_ASSERT(args.get_shell_distance, "Must be able to query the shell distance");
const uint32_t num_samples = args.num_samples;
if (num_samples == 0)
return track_error(); // Cannot measure any error
const uint32_t num_tracks = args.num_tracks;
if (num_tracks == 0)
return track_error(); // Cannot measure any error
const float clip_duration = args.duration;
const float sample_rate = args.sample_rate;
const itransform_error_metric& error_metric = *args.error_metric;
const uint32_t additive_num_samples = args.base_num_samples;
const float additive_duration = args.base_duration;
// Always calculate the error with scale, slower but we don't need to know if we have scale or not
const bool has_scale = true;
// We use the nearest sample to accurately measure the loss that happened, if any
const sample_rounding_policy rounding_policy = sample_rounding_policy::nearest;
debug_track_writer tracks_writer0(allocator, track_type8::qvvf, num_tracks);
debug_track_writer tracks_writer1(allocator, track_type8::qvvf, num_tracks);
debug_track_writer tracks_writer1_remapped(allocator, track_type8::qvvf, num_tracks);
debug_track_writer tracks_writer_base(allocator, track_type8::qvvf, num_tracks);
const size_t transform_size = error_metric.get_transform_size(has_scale);
const bool needs_conversion = error_metric.needs_conversion(has_scale);
uint8_t* raw_local_pose_converted = nullptr;
uint8_t* base_local_pose_converted = nullptr;
uint8_t* lossy_local_pose_converted = nullptr;
if (needs_conversion)
{
raw_local_pose_converted = allocate_type_array_aligned<uint8_t>(allocator, num_tracks * transform_size, 64);
base_local_pose_converted = allocate_type_array_aligned<uint8_t>(allocator, num_tracks * transform_size, 64);
lossy_local_pose_converted = allocate_type_array_aligned<uint8_t>(allocator, num_tracks * transform_size, 64);
}
uint8_t* raw_object_pose = allocate_type_array_aligned<uint8_t>(allocator, num_tracks * transform_size, 64);
uint8_t* lossy_object_pose = allocate_type_array_aligned<uint8_t>(allocator, num_tracks * transform_size, 64);
uint32_t* parent_transform_indices = allocate_type_array<uint32_t>(allocator, num_tracks);
uint32_t* self_transform_indices = allocate_type_array<uint32_t>(allocator, num_tracks);
for (uint32_t transform_index = 0; transform_index < num_tracks; ++transform_index)
{
const uint32_t parent_index = args.get_parent_index(transform_index);
parent_transform_indices[transform_index] = parent_index;
self_transform_indices[transform_index] = transform_index;
}
void* raw_local_pose_ = needs_conversion ? (void*)raw_local_pose_converted : (void*)tracks_writer0.tracks_typed.qvvf;
const void* base_local_pose_ = needs_conversion ? (void*)base_local_pose_converted : (void*)tracks_writer_base.tracks_typed.qvvf;
void* lossy_local_pose_ = needs_conversion ? (void*)lossy_local_pose_converted : (void*)tracks_writer1_remapped.tracks_typed.qvvf;
itransform_error_metric::convert_transforms_args convert_transforms_args_raw;
convert_transforms_args_raw.dirty_transform_indices = self_transform_indices;
convert_transforms_args_raw.num_dirty_transforms = num_tracks;
convert_transforms_args_raw.transforms = tracks_writer0.tracks_typed.qvvf;
convert_transforms_args_raw.num_transforms = num_tracks;
itransform_error_metric::convert_transforms_args convert_transforms_args_base = convert_transforms_args_raw;
convert_transforms_args_base.transforms = tracks_writer_base.tracks_typed.qvvf;
itransform_error_metric::convert_transforms_args convert_transforms_args_lossy = convert_transforms_args_raw;
convert_transforms_args_lossy.transforms = tracks_writer1_remapped.tracks_typed.qvvf;
itransform_error_metric::apply_additive_to_base_args apply_additive_to_base_args_raw;
apply_additive_to_base_args_raw.dirty_transform_indices = self_transform_indices;
apply_additive_to_base_args_raw.num_dirty_transforms = num_tracks;
apply_additive_to_base_args_raw.local_transforms = raw_local_pose_;
apply_additive_to_base_args_raw.base_transforms = base_local_pose_;
apply_additive_to_base_args_raw.num_transforms = num_tracks;
itransform_error_metric::apply_additive_to_base_args apply_additive_to_base_args_lossy = apply_additive_to_base_args_raw;
apply_additive_to_base_args_lossy.local_transforms = lossy_local_pose_;
itransform_error_metric::local_to_object_space_args local_to_object_space_args_raw;
local_to_object_space_args_raw.dirty_transform_indices = self_transform_indices;
local_to_object_space_args_raw.num_dirty_transforms = num_tracks;
local_to_object_space_args_raw.parent_transform_indices = parent_transform_indices;
local_to_object_space_args_raw.local_transforms = raw_local_pose_;
local_to_object_space_args_raw.num_transforms = num_tracks;
itransform_error_metric::local_to_object_space_args local_to_object_space_args_lossy = local_to_object_space_args_raw;
local_to_object_space_args_lossy.local_transforms = lossy_local_pose_;
track_error result;
result.error = -1.0F; // Can never have a negative error, use -1 so the first sample is used
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
const float sample_time = rtm::scalar_min(float(sample_index) / sample_rate, clip_duration);
// Sample our tracks
args.sample_tracks0(sample_time, rounding_policy, tracks_writer0);
args.sample_tracks1(sample_time, rounding_policy, tracks_writer1);
// Maybe remap them
if (args.remap_output)
args.remap_output(tracks_writer0, tracks_writer1, tracks_writer1_remapped);
else
std::memcpy(tracks_writer1_remapped.tracks_typed.qvvf, tracks_writer1.tracks_typed.qvvf, sizeof(rtm::qvvf) * num_tracks);
if (needs_conversion)
{
error_metric.convert_transforms(convert_transforms_args_raw, raw_local_pose_converted);
error_metric.convert_transforms(convert_transforms_args_lossy, lossy_local_pose_converted);
}
if (args.sample_tracks_base)
{
const float normalized_sample_time = additive_num_samples > 1 ? (sample_time / clip_duration) : 0.0F;
const float additive_sample_time = additive_num_samples > 1 ? (normalized_sample_time * additive_duration) : 0.0F;
args.sample_tracks_base(additive_sample_time, rounding_policy, tracks_writer_base);
if (needs_conversion)
error_metric.convert_transforms(convert_transforms_args_base, base_local_pose_converted);
error_metric.apply_additive_to_base(apply_additive_to_base_args_raw, raw_local_pose_);
error_metric.apply_additive_to_base(apply_additive_to_base_args_lossy, lossy_local_pose_);
}
error_metric.local_to_object_space(local_to_object_space_args_raw, raw_object_pose);
error_metric.local_to_object_space(local_to_object_space_args_lossy, lossy_object_pose);
for (uint32_t bone_index = 0; bone_index < num_tracks; ++bone_index)
{
const float shell_distance = args.get_shell_distance(bone_index);
itransform_error_metric::calculate_error_args calculate_error_args;
calculate_error_args.transform0 = raw_object_pose + (bone_index * transform_size);
calculate_error_args.transform1 = lossy_object_pose + (bone_index * transform_size);
calculate_error_args.construct_sphere_shell(shell_distance);
const float error = rtm::scalar_cast(error_metric.calculate_error(calculate_error_args));
if (error > result.error)
{
result.error = error;
result.index = bone_index;
result.sample_time = sample_time;
}
}
}
deallocate_type_array(allocator, raw_local_pose_converted, num_tracks * transform_size);
deallocate_type_array(allocator, base_local_pose_converted, num_tracks * transform_size);
deallocate_type_array(allocator, lossy_local_pose_converted, num_tracks * transform_size);
deallocate_type_array(allocator, raw_object_pose, num_tracks * transform_size);
deallocate_type_array(allocator, lossy_object_pose, num_tracks * transform_size);
deallocate_type_array(allocator, parent_transform_indices, num_tracks);
deallocate_type_array(allocator, self_transform_indices, num_tracks);
return result;
}
inline track_error invalid_track_error()
{
track_error result;
result.index = ~0U;
result.error = -1.0F;
result.sample_time = -1.0F;
return result;
}
}
template<class decompression_context_type, acl_impl::is_decompression_context<decompression_context_type>>
inline track_error calculate_compression_error(iallocator& allocator, const track_array& raw_tracks, decompression_context_type& context)
{
using namespace acl_impl;
ACL_ASSERT(raw_tracks.is_valid().empty(), "Raw tracks are invalid");
ACL_ASSERT(context.is_initialized(), "Context isn't initialized");
if (raw_tracks.get_track_type() == track_type8::qvvf)
return invalid_track_error(); // Only supports scalar tracks
calculate_track_error_args args;
args.num_samples = raw_tracks.get_num_samples_per_track();
args.num_tracks = raw_tracks.get_num_tracks();
args.duration = raw_tracks.get_duration();
args.sample_rate = raw_tracks.get_sample_rate();
args.track_type = raw_tracks.get_track_type();
args.sample_tracks0 = [&raw_tracks](float sample_time, sample_rounding_policy rounding_policy, debug_track_writer& track_writer)
{
raw_tracks.sample_tracks(sample_time, rounding_policy, track_writer);
};
args.sample_tracks1 = [&context](float sample_time, sample_rounding_policy rounding_policy, debug_track_writer& track_writer)
{
context.seek(sample_time, rounding_policy);
context.decompress_tracks(track_writer);
};
args.get_output_index = [&raw_tracks](uint32_t track_index)
{
const track& track_ = raw_tracks[track_index];
return track_.get_output_index();
};
return calculate_scalar_track_error(allocator, args);
}
template<class decompression_context_type, acl_impl::is_decompression_context<decompression_context_type>>
inline track_error calculate_compression_error(iallocator& allocator, const track_array& raw_tracks, decompression_context_type& context, const itransform_error_metric& error_metric)
{
using namespace acl_impl;
ACL_ASSERT(raw_tracks.is_valid().empty(), "Raw tracks are invalid");
ACL_ASSERT(context.is_initialized(), "Context isn't initialized");
calculate_track_error_args args;
args.num_samples = raw_tracks.get_num_samples_per_track();
args.num_tracks = raw_tracks.get_num_tracks();
args.duration = raw_tracks.get_duration();
args.sample_rate = raw_tracks.get_sample_rate();
args.track_type = raw_tracks.get_track_type();
args.sample_tracks0 = [&raw_tracks](float sample_time, sample_rounding_policy rounding_policy, debug_track_writer& track_writer)
{
raw_tracks.sample_tracks(sample_time, rounding_policy, track_writer);
};
args.sample_tracks1 = [&context](float sample_time, sample_rounding_policy rounding_policy, debug_track_writer& track_writer)
{
context.seek(sample_time, rounding_policy);
context.decompress_tracks(track_writer);
};
args.get_output_index = [&raw_tracks](uint32_t track_index)
{
const track& track_ = raw_tracks[track_index];
return track_.get_output_index();
};
if (raw_tracks.get_track_type() != track_type8::qvvf)
return calculate_scalar_track_error(allocator, args);
uint32_t num_output_bones = 0;
uint32_t* output_bone_mapping = create_output_track_mapping(allocator, raw_tracks, num_output_bones);
args.error_metric = &error_metric;
args.get_parent_index = [&raw_tracks](uint32_t track_index)
{
const track_qvvf& track_ = track_cast<track_qvvf>(raw_tracks[track_index]);
return track_.get_description().parent_index;
};
args.get_shell_distance = [&raw_tracks](uint32_t track_index)
{
const track_qvvf& track_ = track_cast<track_qvvf>(raw_tracks[track_index]);
return track_.get_description().shell_distance;
};
args.remap_output = [output_bone_mapping, num_output_bones](debug_track_writer& track_writer0, debug_track_writer& track_writer1, debug_track_writer& track_writer_remapped)
{
// Perform remapping by copying the raw pose first and we overwrite with the decompressed pose if
// the data is available
std::memcpy(track_writer_remapped.tracks_typed.qvvf, track_writer0.tracks_typed.qvvf, sizeof(rtm::qvvf) * track_writer_remapped.num_tracks);
for (uint32_t output_index = 0; output_index < num_output_bones; ++output_index)
{
const uint32_t bone_index = output_bone_mapping[output_index];
track_writer_remapped.tracks_typed.qvvf[bone_index] = track_writer1.tracks_typed.qvvf[output_index];
}
};
const track_error result = calculate_transform_track_error(allocator, args);
deallocate_type_array(allocator, output_bone_mapping, num_output_bones);
return result;
}
template<class decompression_context_type, acl_impl::is_decompression_context<decompression_context_type>>
inline track_error calculate_compression_error(iallocator& allocator, const track_array_qvvf& raw_tracks, decompression_context_type& context, const itransform_error_metric& error_metric, const track_array_qvvf& additive_base_tracks)
{
using namespace acl_impl;
ACL_ASSERT(raw_tracks.is_valid().empty(), "Raw tracks are invalid");
ACL_ASSERT(context.is_initialized(), "Context isn't initialized");
calculate_track_error_args args;
args.num_samples = raw_tracks.get_num_samples_per_track();
args.num_tracks = raw_tracks.get_num_tracks();
args.duration = raw_tracks.get_duration();
args.sample_rate = raw_tracks.get_sample_rate();
args.track_type = raw_tracks.get_track_type();
args.sample_tracks0 = [&raw_tracks](float sample_time, sample_rounding_policy rounding_policy, debug_track_writer& track_writer)
{
raw_tracks.sample_tracks(sample_time, rounding_policy, track_writer);
};
args.sample_tracks1 = [&context](float sample_time, sample_rounding_policy rounding_policy, debug_track_writer& track_writer)
{
context.seek(sample_time, rounding_policy);
context.decompress_tracks(track_writer);
};
args.get_output_index = [&raw_tracks](uint32_t track_index)
{
const track& track_ = raw_tracks[track_index];
return track_.get_output_index();
};
uint32_t num_output_bones = 0;
uint32_t* output_bone_mapping = create_output_track_mapping(allocator, raw_tracks, num_output_bones);
args.error_metric = &error_metric;
args.get_parent_index = [&raw_tracks](uint32_t track_index)
{
const track_qvvf& track_ = track_cast<track_qvvf>(raw_tracks[track_index]);
return track_.get_description().parent_index;
};
args.get_shell_distance = [&raw_tracks](uint32_t track_index)
{
const track_qvvf& track_ = track_cast<track_qvvf>(raw_tracks[track_index]);
return track_.get_description().shell_distance;
};
args.remap_output = [output_bone_mapping, num_output_bones](debug_track_writer& track_writer0, debug_track_writer& track_writer1, debug_track_writer& track_writer_remapped)
{
// Perform remapping by copying the raw pose first and we overwrite with the decompressed pose if
// the data is available
std::memcpy(track_writer_remapped.tracks_typed.qvvf, track_writer0.tracks_typed.qvvf, sizeof(rtm::qvvf) * track_writer_remapped.num_tracks);
for (uint32_t output_index = 0; output_index < num_output_bones; ++output_index)
{
const uint32_t bone_index = output_bone_mapping[output_index];
track_writer_remapped.tracks_typed.qvvf[bone_index] = track_writer1.tracks_typed.qvvf[output_index];
}
};
if (!additive_base_tracks.is_empty())
{
args.base_num_samples = additive_base_tracks.get_num_samples_per_track();
args.base_duration = additive_base_tracks.get_duration();
args.sample_tracks_base = [&additive_base_tracks](float sample_time, sample_rounding_policy rounding_policy, debug_track_writer& track_writer)
{
additive_base_tracks.sample_tracks(sample_time, rounding_policy, track_writer);
};
}
const track_error result = calculate_transform_track_error(allocator, args);
deallocate_type_array(allocator, output_bone_mapping, num_output_bones);
return result;
}
template<class decompression_context_type0, class decompression_context_type1, acl_impl::is_decompression_context<decompression_context_type0>, acl_impl::is_decompression_context<decompression_context_type1>>
inline track_error calculate_compression_error(iallocator& allocator, decompression_context_type0& context0, decompression_context_type1& context1)
{
using namespace acl_impl;
ACL_ASSERT(context0.is_initialized(), "Context isn't initialized");
ACL_ASSERT(context1.is_initialized(), "Context isn't initialized");
const compressed_tracks* tracks0 = context0.get_compressed_tracks();
if (tracks0->get_track_type() == track_type8::qvvf)
return invalid_track_error(); // Only supports scalar tracks
calculate_track_error_args args;
args.num_samples = tracks0->get_num_samples_per_track();
args.num_tracks = tracks0->get_num_tracks();
args.duration = tracks0->get_duration();
args.sample_rate = tracks0->get_sample_rate();
args.track_type = tracks0->get_track_type();
args.sample_tracks0 = [&context0](float sample_time, sample_rounding_policy rounding_policy, debug_track_writer& track_writer)
{
context0.seek(sample_time, rounding_policy);
context0.decompress_tracks(track_writer);
};
args.sample_tracks1 = [&context1](float sample_time, sample_rounding_policy rounding_policy, debug_track_writer& track_writer)
{
context1.seek(sample_time, rounding_policy);
context1.decompress_tracks(track_writer);
};
return calculate_scalar_track_error(allocator, args);
}
inline track_error calculate_compression_error(iallocator& allocator, const track_array& raw_tracks0, const track_array& raw_tracks1)
{
using namespace acl_impl;
ACL_ASSERT(raw_tracks0.is_valid().empty(), "Raw tracks are invalid");
ACL_ASSERT(raw_tracks1.is_valid().empty(), "Raw tracks are invalid");
if (raw_tracks0.get_track_type() == track_type8::qvvf)
return invalid_track_error(); // Only supports scalar tracks
calculate_track_error_args args;
args.num_samples = raw_tracks0.get_num_samples_per_track();
args.num_tracks = raw_tracks0.get_num_tracks();
args.duration = raw_tracks0.get_duration();
args.sample_rate = raw_tracks0.get_sample_rate();
args.track_type = raw_tracks0.get_track_type();
args.sample_tracks0 = [&raw_tracks0](float sample_time, sample_rounding_policy rounding_policy, debug_track_writer& track_writer)
{
raw_tracks0.sample_tracks(sample_time, rounding_policy, track_writer);
};
args.sample_tracks1 = [&raw_tracks1](float sample_time, sample_rounding_policy rounding_policy, debug_track_writer& track_writer)
{
raw_tracks1.sample_tracks(sample_time, rounding_policy, track_writer);
};
return calculate_scalar_track_error(allocator, args);
}
inline track_error calculate_compression_error(iallocator& allocator, const track_array& raw_tracks0, const track_array& raw_tracks1, const itransform_error_metric& error_metric)
{
using namespace acl_impl;
ACL_ASSERT(raw_tracks0.is_valid().empty(), "Raw tracks are invalid");
ACL_ASSERT(raw_tracks1.is_valid().empty(), "Raw tracks are invalid");
calculate_track_error_args args;
args.num_samples = raw_tracks0.get_num_samples_per_track();
args.num_tracks = raw_tracks0.get_num_tracks();
args.duration = raw_tracks0.get_duration();
args.sample_rate = raw_tracks0.get_sample_rate();
args.track_type = raw_tracks0.get_track_type();
args.sample_tracks0 = [&raw_tracks0](float sample_time, sample_rounding_policy rounding_policy, debug_track_writer& track_writer)
{
raw_tracks0.sample_tracks(sample_time, rounding_policy, track_writer);
};
args.sample_tracks1 = [&raw_tracks1](float sample_time, sample_rounding_policy rounding_policy, debug_track_writer& track_writer)
{
raw_tracks1.sample_tracks(sample_time, rounding_policy, track_writer);
};
if (raw_tracks0.get_track_type() != track_type8::qvvf)
return calculate_scalar_track_error(allocator, args);
args.error_metric = &error_metric;
args.get_parent_index = [&raw_tracks0](uint32_t track_index)
{
const track_qvvf& track_ = track_cast<track_qvvf>(raw_tracks0[track_index]);
return track_.get_description().parent_index;
};
args.get_shell_distance = [&raw_tracks0](uint32_t track_index)
{
const track_qvvf& track_ = track_cast<track_qvvf>(raw_tracks0[track_index]);
return track_.get_description().shell_distance;
};
return calculate_transform_track_error(allocator, args);
}
}

View File

@ -0,0 +1,266 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/iallocator.h"
#include "acl/core/bitset.h"
#include "acl/core/track_desc.h"
#include "acl/core/variable_bit_rates.h"
#include "acl/compression/track_array.h"
#include "acl/compression/impl/track_range.h"
#include <rtm/vector4f.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
struct scalar_bit_rate
{
uint8_t value;
};
struct qvv_bit_rate
{
uint8_t rotation;
uint8_t translation;
uint8_t scale;
};
union track_bit_rate
{
scalar_bit_rate scalar;
qvv_bit_rate qvv;
track_bit_rate() : qvv{k_invalid_bit_rate, k_invalid_bit_rate, k_invalid_bit_rate} {}
};
struct track_list_context
{
iallocator* allocator;
const track_array* reference_list;
track_array track_list;
track_range* range_list;
uint32_t* constant_tracks_bitset;
track_bit_rate* bit_rate_list;
uint32_t* track_output_indices;
uint32_t num_tracks;
uint32_t num_output_tracks;
uint32_t num_samples;
float sample_rate;
float duration;
track_list_context()
: allocator(nullptr)
, reference_list(nullptr)
, track_list()
, range_list(nullptr)
, constant_tracks_bitset(nullptr)
, bit_rate_list(nullptr)
, track_output_indices(nullptr)
, num_tracks(0)
, num_output_tracks(0)
, num_samples(0)
, sample_rate(0.0F)
, duration(0.0F)
{}
~track_list_context()
{
if (allocator != nullptr)
{
deallocate_type_array(*allocator, range_list, num_tracks);
const bitset_description bitset_desc = bitset_description::make_from_num_bits(num_tracks);
deallocate_type_array(*allocator, constant_tracks_bitset, bitset_desc.get_size());
deallocate_type_array(*allocator, bit_rate_list, num_tracks);
deallocate_type_array(*allocator, track_output_indices, num_output_tracks);
}
}
bool is_valid() const { return allocator != nullptr; }
bool is_constant(uint32_t track_index) const { return bitset_test(constant_tracks_bitset, bitset_description::make_from_num_bits(num_tracks), track_index); }
track_list_context(const track_list_context&) = delete;
track_list_context(track_list_context&&) = delete;
track_list_context& operator=(const track_list_context&) = delete;
track_list_context& operator=(track_list_context&&) = delete;
};
// Promote scalar tracks to vector tracks for SIMD alignment and padding
inline track_array copy_and_promote_track_list(iallocator& allocator, const track_array& ref_track_list, bool& out_are_samples_valid)
{
using namespace rtm;
const uint32_t num_tracks = ref_track_list.get_num_tracks();
const uint32_t num_samples = ref_track_list.get_num_samples_per_track();
const float sample_rate = ref_track_list.get_sample_rate();
bool are_samples_valid = true;
track_array out_track_list(allocator, num_tracks);
for (uint32_t track_index = 0; track_index < num_tracks; ++track_index)
{
const track& ref_track = ref_track_list[track_index];
track& out_track = out_track_list[track_index];
switch (ref_track.get_type())
{
case track_type8::float1f:
{
const track_float1f& typed_ref_track = track_cast<const track_float1f>(ref_track);
track_vector4f track = track_vector4f::make_reserve(ref_track.get_description<track_desc_scalarf>(), allocator, num_samples, sample_rate);
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
const vector4f sample = vector_load1(&typed_ref_track[sample_index]);
are_samples_valid &= scalar_is_finite(scalarf(vector_get_x(sample)));
track[sample_index] = sample;
}
out_track = std::move(track);
break;
}
case track_type8::float2f:
{
const track_float2f& typed_ref_track = track_cast<const track_float2f>(ref_track);
track_vector4f track = track_vector4f::make_reserve(ref_track.get_description<track_desc_scalarf>(), allocator, num_samples, sample_rate);
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
const vector4f sample = vector_load2(&typed_ref_track[sample_index]);
are_samples_valid &= vector_is_finite2(sample);
track[sample_index] = sample;
}
out_track = std::move(track);
break;
}
case track_type8::float3f:
{
const track_float3f& typed_ref_track = track_cast<const track_float3f>(ref_track);
track_vector4f track = track_vector4f::make_reserve(ref_track.get_description<track_desc_scalarf>(), allocator, num_samples, sample_rate);
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
const vector4f sample = vector_load3(&typed_ref_track[sample_index]);
are_samples_valid &= vector_is_finite3(sample);
track[sample_index] = sample;
}
out_track = std::move(track);
break;
}
case track_type8::float4f:
{
const track_float4f& typed_ref_track = track_cast<const track_float4f>(ref_track);
track_vector4f track = track_vector4f::make_reserve(ref_track.get_description<track_desc_scalarf>(), allocator, num_samples, sample_rate);
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
const vector4f sample = vector_load(&typed_ref_track[sample_index]);
are_samples_valid &= vector_is_finite(sample);
track[sample_index] = sample;
}
out_track = std::move(track);
break;
}
case track_type8::vector4f:
{
const track_vector4f& typed_ref_track = track_cast<const track_vector4f>(ref_track);
track_vector4f track = track_vector4f::make_reserve(ref_track.get_description<track_desc_scalarf>(), allocator, num_samples, sample_rate);
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
const vector4f sample = typed_ref_track[sample_index];
are_samples_valid &= vector_is_finite(sample);
track[sample_index] = sample;
}
out_track = std::move(track);
break;
}
default:
ACL_ASSERT(false, "Unexpected track type");
are_samples_valid = false;
break;
}
}
out_are_samples_valid = are_samples_valid;
return out_track_list;
}
inline uint32_t* create_output_track_mapping(iallocator& allocator, const track_array& track_list, uint32_t& out_num_output_tracks)
{
const uint32_t num_tracks = track_list.get_num_tracks();
uint32_t num_output_tracks = num_tracks;
for (uint32_t track_index = 0; track_index < num_tracks; ++track_index)
{
const uint32_t output_index = track_list[track_index].get_output_index();
if (output_index == k_invalid_track_index)
num_output_tracks--; // Stripped from the output
}
uint32_t* output_indices = allocate_type_array<uint32_t>(allocator, num_output_tracks);
for (uint32_t track_index = 0; track_index < num_tracks; ++track_index)
{
const uint32_t output_index = track_list[track_index].get_output_index();
if (output_index != k_invalid_track_index)
output_indices[output_index] = track_index;
}
out_num_output_tracks = num_output_tracks;
return output_indices;
}
inline bool initialize_context(iallocator& allocator, const track_array& track_list, track_list_context& context)
{
ACL_ASSERT(track_list.is_valid().empty(), "Invalid track list");
ACL_ASSERT(!context.is_valid(), "Context already initialized");
bool are_samples_valid = true;
context.allocator = &allocator;
context.reference_list = &track_list;
context.track_list = copy_and_promote_track_list(allocator, track_list, are_samples_valid);
context.range_list = nullptr;
context.constant_tracks_bitset = nullptr;
context.track_output_indices = nullptr;
context.num_tracks = track_list.get_num_tracks();
context.num_output_tracks = 0;
context.num_samples = track_list.get_num_samples_per_track();
context.sample_rate = track_list.get_sample_rate();
context.duration = track_list.get_duration();
context.track_output_indices = create_output_track_mapping(allocator, track_list, context.num_output_tracks);
return are_samples_valid;
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,98 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/iallocator.h"
#include "acl/core/track_types.h"
#include <rtm/vector4f.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
class scalarf_range
{
public:
static constexpr track_category8 type = track_category8::scalarf;
scalarf_range() : m_min(), m_extent() {}
scalarf_range(rtm::vector4f_arg0 min, rtm::vector4f_arg1 extent) : m_min(min), m_extent(extent) {}
static scalarf_range RTM_SIMD_CALL from_min_max(rtm::vector4f_arg0 min, rtm::vector4f_arg1 max) { return scalarf_range(min, rtm::vector_sub(max, min)); }
static scalarf_range RTM_SIMD_CALL from_min_extent(rtm::vector4f_arg0 min, rtm::vector4f_arg1 extent) { return scalarf_range(min, extent); }
rtm::vector4f RTM_SIMD_CALL get_min() const { return m_min; }
rtm::vector4f RTM_SIMD_CALL get_max() const { return rtm::vector_add(m_min, m_extent); }
rtm::vector4f RTM_SIMD_CALL get_center() const { return rtm::vector_add(m_min, rtm::vector_mul(m_extent, 0.5F)); }
rtm::vector4f RTM_SIMD_CALL get_extent() const { return m_extent; }
bool is_constant(float threshold) const { return rtm::vector_all_less_than(rtm::vector_abs(m_extent), rtm::vector_set(threshold)); }
private:
rtm::vector4f m_min;
rtm::vector4f m_extent;
};
struct track_range
{
track_range() : range(), category(track_category8::scalarf) {}
explicit track_range(const scalarf_range& range_) : range(range_), category(track_category8::scalarf) {}
bool is_constant(float threshold) const
{
switch (category)
{
case track_category8::scalarf: return range.scalarf.is_constant(threshold);
default:
ACL_ASSERT(false, "Invalid track category");
return false;
}
}
union range_union
{
scalarf_range scalarf;
// TODO: Add qvv range and scalard/i/q ranges
range_union() : scalarf(scalarf_range::from_min_extent(rtm::vector_zero(), rtm::vector_zero())) {}
explicit range_union(const scalarf_range& range) : scalarf(range) {}
};
range_union range;
track_category8 category;
uint8_t padding[15];
};
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,86 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/iallocator.h"
#include "acl/compression/impl/track_list_context.h"
#include <rtm/vector4f.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
inline scalarf_range extract_scalarf_range(const track& track)
{
using namespace rtm;
vector4f min = rtm::vector_set(1e10F);
vector4f max = rtm::vector_set(-1e10F);
const uint32_t num_samples = track.get_num_samples();
const track_vector4f& typed_track = track_cast<const track_vector4f>(track);
for (uint32_t sample_index = 0; sample_index < num_samples; ++sample_index)
{
const vector4f sample = typed_track[sample_index];
min = vector_min(min, sample);
max = vector_max(max, sample);
}
return scalarf_range::from_min_max(min, max);
}
inline void extract_track_ranges(track_list_context& context)
{
ACL_ASSERT(context.is_valid(), "Invalid context");
context.range_list = allocate_type_array<track_range>(*context.allocator, context.num_tracks);
for (uint32_t track_index = 0; track_index < context.num_tracks; ++track_index)
{
const track& track = context.track_list[track_index];
switch (track.get_category())
{
case track_category8::scalarf:
context.range_list[track_index] = track_range(extract_scalarf_range(track));
break;
default:
ACL_ASSERT(false, "Invalid track category");
break;
}
}
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,360 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/iallocator.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/error.h"
#include "acl/core/track_formats.h"
#include "acl/core/track_types.h"
#include "acl/core/utils.h"
#include "acl/core/variable_bit_rates.h"
#include "acl/math/quat_packing.h"
#include "acl/math/vector4_packing.h"
#include <rtm/quatf.h>
#include <rtm/vector4f.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
class TrackStream
{
public:
uint8_t* get_raw_sample_ptr(uint32_t sample_index)
{
ACL_ASSERT(sample_index < m_num_samples, "Invalid sample index. %u >= %u", sample_index, m_num_samples);
uint32_t offset = sample_index * m_sample_size;
return m_samples + offset;
}
const uint8_t* get_raw_sample_ptr(uint32_t sample_index) const
{
ACL_ASSERT(sample_index < m_num_samples, "Invalid sample index. %u >= %u", sample_index, m_num_samples);
uint32_t offset = sample_index * m_sample_size;
return m_samples + offset;
}
template<typename SampleType>
SampleType RTM_SIMD_CALL get_raw_sample(uint32_t sample_index) const
{
const uint8_t* ptr = get_raw_sample_ptr(sample_index);
return *safe_ptr_cast<const SampleType>(ptr);
}
#if defined(RTM_NO_INTRINSICS)
template<typename SampleType>
void RTM_SIMD_CALL set_raw_sample(uint32_t sample_index, const SampleType& sample)
#else
template<typename SampleType>
void RTM_SIMD_CALL set_raw_sample(uint32_t sample_index, SampleType sample)
#endif
{
ACL_ASSERT(m_sample_size == sizeof(SampleType), "Unexpected sample size. %u != %zu", m_sample_size, sizeof(SampleType));
uint8_t* ptr = get_raw_sample_ptr(sample_index);
*safe_ptr_cast<SampleType>(ptr) = sample;
}
uint32_t get_num_samples() const { return m_num_samples; }
uint32_t get_sample_size() const { return m_sample_size; }
float get_sample_rate() const { return m_sample_rate; }
animation_track_type8 get_track_type() const { return m_type; }
uint8_t get_bit_rate() const { return m_bit_rate; }
bool is_bit_rate_variable() const { return m_bit_rate != k_invalid_bit_rate; }
float get_duration() const { return calculate_duration(m_num_samples, m_sample_rate); }
uint32_t get_packed_sample_size() const
{
if (m_type == animation_track_type8::rotation)
return get_packed_rotation_size(m_format.rotation);
else
return get_packed_vector_size(m_format.vector);
}
protected:
TrackStream(animation_track_type8 type, track_format8 format) noexcept : m_allocator(nullptr), m_samples(nullptr), m_num_samples(0), m_sample_size(0), m_sample_rate(0.0F), m_type(type), m_format(format), m_bit_rate(0) {}
TrackStream(iallocator& allocator, uint32_t num_samples, uint32_t sample_size, float sample_rate, animation_track_type8 type, track_format8 format, uint8_t bit_rate)
: m_allocator(&allocator)
, m_samples(reinterpret_cast<uint8_t*>(allocator.allocate(sample_size * num_samples + k_padding, 16)))
, m_num_samples(num_samples)
, m_sample_size(sample_size)
, m_sample_rate(sample_rate)
, m_type(type)
, m_format(format)
, m_bit_rate(bit_rate)
{}
TrackStream(const TrackStream&) = delete;
TrackStream(TrackStream&& other) noexcept
: m_allocator(other.m_allocator)
, m_samples(other.m_samples)
, m_num_samples(other.m_num_samples)
, m_sample_size(other.m_sample_size)
, m_sample_rate(other.m_sample_rate)
, m_type(other.m_type)
, m_format(other.m_format)
, m_bit_rate(other.m_bit_rate)
{
new(&other) TrackStream(other.m_type, other.m_format);
}
~TrackStream()
{
if (m_allocator != nullptr && m_num_samples != 0)
m_allocator->deallocate(m_samples, m_sample_size * m_num_samples + k_padding);
}
TrackStream& operator=(const TrackStream&) = delete;
TrackStream& operator=(TrackStream&& rhs) noexcept
{
std::swap(m_allocator, rhs.m_allocator);
std::swap(m_samples, rhs.m_samples);
std::swap(m_num_samples, rhs.m_num_samples);
std::swap(m_sample_size, rhs.m_sample_size);
std::swap(m_sample_rate, rhs.m_sample_rate);
std::swap(m_type, rhs.m_type);
std::swap(m_format, rhs.m_format);
std::swap(m_bit_rate, rhs.m_bit_rate);
return *this;
}
void duplicate(TrackStream& copy) const
{
ACL_ASSERT(copy.m_type == m_type, "Attempting to duplicate streams with incompatible types!");
if (m_allocator != nullptr)
{
copy.m_allocator = m_allocator;
copy.m_samples = reinterpret_cast<uint8_t*>(m_allocator->allocate(m_sample_size * m_num_samples + k_padding, 16));
copy.m_num_samples = m_num_samples;
copy.m_sample_size = m_sample_size;
copy.m_sample_rate = m_sample_rate;
copy.m_format = m_format;
copy.m_bit_rate = m_bit_rate;
std::memcpy(copy.m_samples, m_samples, (size_t)m_sample_size * m_num_samples);
}
}
// In order to guarantee the safety of unaligned SIMD loads of every byte, we add some padding
static constexpr uint32_t k_padding = 15;
iallocator* m_allocator;
uint8_t* m_samples;
uint32_t m_num_samples;
uint32_t m_sample_size;
float m_sample_rate;
animation_track_type8 m_type;
track_format8 m_format;
uint8_t m_bit_rate;
};
class RotationTrackStream final : public TrackStream
{
public:
RotationTrackStream() noexcept : TrackStream(animation_track_type8::rotation, track_format8(rotation_format8::quatf_full)) {}
RotationTrackStream(iallocator& allocator, uint32_t num_samples, uint32_t sample_size, float sample_rate, rotation_format8 format, uint8_t bit_rate = k_invalid_bit_rate)
: TrackStream(allocator, num_samples, sample_size, sample_rate, animation_track_type8::rotation, track_format8(format), bit_rate)
{}
RotationTrackStream(const RotationTrackStream&) = delete;
RotationTrackStream(RotationTrackStream&& other) noexcept
: TrackStream(static_cast<TrackStream&&>(other))
{}
~RotationTrackStream() = default;
RotationTrackStream& operator=(const RotationTrackStream&) = delete;
RotationTrackStream& operator=(RotationTrackStream&& rhs) noexcept
{
TrackStream::operator=(static_cast<TrackStream&&>(rhs));
return *this;
}
RotationTrackStream duplicate() const
{
RotationTrackStream copy;
TrackStream::duplicate(copy);
return copy;
}
rotation_format8 get_rotation_format() const { return m_format.rotation; }
};
class TranslationTrackStream final : public TrackStream
{
public:
TranslationTrackStream() noexcept : TrackStream(animation_track_type8::translation, track_format8(vector_format8::vector3f_full)) {}
TranslationTrackStream(iallocator& allocator, uint32_t num_samples, uint32_t sample_size, float sample_rate, vector_format8 format, uint8_t bit_rate = k_invalid_bit_rate)
: TrackStream(allocator, num_samples, sample_size, sample_rate, animation_track_type8::translation, track_format8(format), bit_rate)
{}
TranslationTrackStream(const TranslationTrackStream&) = delete;
TranslationTrackStream(TranslationTrackStream&& other) noexcept
: TrackStream(static_cast<TrackStream&&>(other))
{}
~TranslationTrackStream() = default;
TranslationTrackStream& operator=(const TranslationTrackStream&) = delete;
TranslationTrackStream& operator=(TranslationTrackStream&& rhs) noexcept
{
TrackStream::operator=(static_cast<TrackStream&&>(rhs));
return *this;
}
TranslationTrackStream duplicate() const
{
TranslationTrackStream copy;
TrackStream::duplicate(copy);
return copy;
}
vector_format8 get_vector_format() const { return m_format.vector; }
};
class ScaleTrackStream final : public TrackStream
{
public:
ScaleTrackStream() noexcept : TrackStream(animation_track_type8::scale, track_format8(vector_format8::vector3f_full)) {}
ScaleTrackStream(iallocator& allocator, uint32_t num_samples, uint32_t sample_size, float sample_rate, vector_format8 format, uint8_t bit_rate = k_invalid_bit_rate)
: TrackStream(allocator, num_samples, sample_size, sample_rate, animation_track_type8::scale, track_format8(format), bit_rate)
{}
ScaleTrackStream(const ScaleTrackStream&) = delete;
ScaleTrackStream(ScaleTrackStream&& other) noexcept
: TrackStream(static_cast<TrackStream&&>(other))
{}
~ScaleTrackStream() = default;
ScaleTrackStream& operator=(const ScaleTrackStream&) = delete;
ScaleTrackStream& operator=(ScaleTrackStream&& rhs) noexcept
{
TrackStream::operator=(static_cast<TrackStream&&>(rhs));
return *this;
}
ScaleTrackStream duplicate() const
{
ScaleTrackStream copy;
TrackStream::duplicate(copy);
return copy;
}
vector_format8 get_vector_format() const { return m_format.vector; }
};
// For a rotation track, the extent only tells us if the track is constant or not
// since the min/max we maintain aren't valid rotations.
// Similarly, the center isn't a valid rotation and is meaningless.
class TrackStreamRange
{
public:
static TrackStreamRange RTM_SIMD_CALL from_min_max(rtm::vector4f_arg0 min, rtm::vector4f_arg1 max)
{
return TrackStreamRange(min, rtm::vector_sub(max, min));
}
static TrackStreamRange RTM_SIMD_CALL from_min_extent(rtm::vector4f_arg0 min, rtm::vector4f_arg1 extent)
{
return TrackStreamRange(min, extent);
}
TrackStreamRange()
: m_min(rtm::vector_zero())
, m_extent(rtm::vector_zero())
{}
rtm::vector4f RTM_SIMD_CALL get_min() const { return m_min; }
rtm::vector4f RTM_SIMD_CALL get_max() const { return rtm::vector_add(m_min, m_extent); }
rtm::vector4f RTM_SIMD_CALL get_center() const { return rtm::vector_add(m_min, rtm::vector_mul(m_extent, 0.5F)); }
rtm::vector4f RTM_SIMD_CALL get_extent() const { return m_extent; }
bool is_constant(float threshold) const { return rtm::vector_all_less_than(rtm::vector_abs(m_extent), rtm::vector_set(threshold)); }
private:
TrackStreamRange(rtm::vector4f_arg0 min, rtm::vector4f_arg1 extent)
: m_min(min)
, m_extent(extent)
{}
rtm::vector4f m_min;
rtm::vector4f m_extent;
};
struct BoneRanges
{
TrackStreamRange rotation;
TrackStreamRange translation;
TrackStreamRange scale;
};
struct SegmentContext;
struct BoneStreams
{
SegmentContext* segment;
uint32_t bone_index;
uint32_t parent_bone_index;
uint32_t output_index;
RotationTrackStream rotations;
TranslationTrackStream translations;
ScaleTrackStream scales;
bool is_rotation_constant;
bool is_rotation_default;
bool is_translation_constant;
bool is_translation_default;
bool is_scale_constant;
bool is_scale_default;
bool is_stripped_from_output() const { return output_index == k_invalid_track_index; }
BoneStreams duplicate() const
{
BoneStreams copy;
copy.segment = segment;
copy.bone_index = bone_index;
copy.parent_bone_index = parent_bone_index;
copy.output_index = output_index;
copy.rotations = rotations.duplicate();
copy.translations = translations.duplicate();
copy.scales = scales.duplicate();
copy.is_rotation_constant = is_rotation_constant;
copy.is_rotation_default = is_rotation_default;
copy.is_translation_constant = is_translation_constant;
copy.is_translation_default = is_translation_default;
copy.is_scale_constant = is_scale_constant;
copy.is_scale_default = is_scale_default;
return copy;
}
};
}
}
ACL_IMPL_FILE_PRAGMA_POP

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,70 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#if defined(SJSON_CPP_WRITER)
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/error.h"
#include "acl/core/scope_profiler.h"
#include "acl/compression/impl/track_list_context.h"
#include "acl/compression/output_stats.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
inline void write_compression_stats(const track_list_context& context, const compressed_tracks& tracks, const scope_profiler& compression_time, output_stats& stats)
{
ACL_ASSERT(stats.writer != nullptr, "Attempted to log stats without a writer");
if (stats.writer == nullptr)
return;
const uint32_t raw_size = context.reference_list->get_raw_size();
const uint32_t compressed_size = tracks.get_size();
const double compression_ratio = double(raw_size) / double(compressed_size);
sjson::ObjectWriter& writer = *stats.writer;
writer["algorithm_name"] = get_algorithm_name(algorithm_type8::uniformly_sampled);
//writer["algorithm_uid"] = settings.get_hash();
//writer["clip_name"] = clip.get_name().c_str();
writer["raw_size"] = raw_size;
writer["compressed_size"] = compressed_size;
writer["compression_ratio"] = compression_ratio;
writer["compression_time"] = compression_time.get_elapsed_seconds();
writer["duration"] = context.duration;
writer["num_samples"] = context.num_samples;
writer["num_tracks"] = context.num_tracks;
}
}
}
ACL_IMPL_FILE_PRAGMA_POP
#endif // #if defined(SJSON_CPP_WRITER)

View File

@ -0,0 +1,352 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#if defined(SJSON_CPP_WRITER)
#include "acl/core/compressed_tracks_version.h"
#include "acl/core/scope_profiler.h"
#include "acl/core/track_formats.h"
#include "acl/core/utils.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/impl/memory_cache.h"
#include "acl/compression/output_stats.h"
#include "acl/decompression/decompress.h"
#include <rtm/scalard.h>
#include <rtm/scalarf.h>
#include <algorithm>
#include <thread>
#include <chrono>
#include <cstring>
#include <random>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
constexpr uint32_t k_num_decompression_samples = 100;
constexpr uint32_t k_num_decompression_evaluations = 100;
enum class PlaybackDirection
{
Forward,
Backward,
Random,
};
enum class DecompressionFunction
{
DecompressPose,
DecompressBone,
};
template<class DecompressionContextType>
inline void write_decompression_performance_stats(
stat_logging logging, sjson::ObjectWriter& writer, const char* action_type,
PlaybackDirection playback_direction, DecompressionFunction decompression_function,
compressed_tracks* compressed_clips[k_num_decompression_evaluations],
DecompressionContextType* contexts[k_num_decompression_evaluations],
CPUCacheFlusher* cache_flusher, debug_track_writer& pose_writer)
{
const uint32_t num_tracks = compressed_clips[0]->get_num_tracks();
const float duration = compressed_clips[0]->get_duration();
const bool is_cold_cache_profiling = cache_flusher != nullptr;
float sample_times[k_num_decompression_samples];
for (uint32_t sample_index = 0; sample_index < k_num_decompression_samples; ++sample_index)
{
const float normalized_sample_time = float(sample_index) / float(k_num_decompression_samples - 1);
sample_times[sample_index] = rtm::scalar_clamp(normalized_sample_time, 0.0F, 1.0F) * duration;
}
switch (playback_direction)
{
case PlaybackDirection::Forward:
default:
break;
case PlaybackDirection::Backward:
std::reverse(&sample_times[0], &sample_times[k_num_decompression_samples]);
break;
case PlaybackDirection::Random:
std::shuffle(&sample_times[0], &sample_times[k_num_decompression_samples], std::default_random_engine(0));
break;
}
// Initialize and clear our contexts
bool init_success = true;
for (uint32_t clip_index = 0; clip_index < k_num_decompression_evaluations; ++clip_index)
init_success |= contexts[clip_index]->initialize(*compressed_clips[clip_index]);
ACL_ASSERT(init_success, "Failed to initialize decompression context");
if (!init_success)
return;
writer[action_type] = [&](sjson::ObjectWriter& action_writer)
{
double clip_max_ms = 0.0;
double clip_min_ms = 1000000.0;
double clip_total_ms = 0.0;
double clip_time_ms[k_num_decompression_samples];
action_writer["data"] = [&](sjson::ArrayWriter& data_writer)
{
for (uint32_t sample_index = 0; sample_index < k_num_decompression_samples; ++sample_index)
{
const float sample_time = sample_times[sample_index];
// Clearing the context ensures the decoder cannot reuse any state cached from the last sample.
if (playback_direction == PlaybackDirection::Random)
{
for (uint32_t clip_index = 0; clip_index < k_num_decompression_evaluations; ++clip_index)
contexts[clip_index]->initialize(*compressed_clips[clip_index]);
}
// Clear the CPU cache if necessary
if (is_cold_cache_profiling)
{
cache_flusher->begin_flushing();
for (uint32_t clip_index = 0; clip_index < k_num_decompression_evaluations; ++clip_index)
{
cache_flusher->flush_buffer(contexts[clip_index], sizeof(DecompressionContextType));
cache_flusher->flush_buffer(compressed_clips[clip_index], compressed_clips[clip_index]->get_size());
}
cache_flusher->end_flushing();
}
else
{
// If we want the cache warm, decompress everything once to prime it
DecompressionContextType* context = contexts[0];
context->seek(sample_time, sample_rounding_policy::none);
context->decompress_tracks(pose_writer);
}
// We yield our time slice and wait for a new one before priming the cache
// to help keep it warm and minimize the risk that we'll be interrupted during decompression
std::this_thread::sleep_for(std::chrono::nanoseconds(1));
scope_profiler timer;
for (uint32_t clip_index = 0; clip_index < k_num_decompression_evaluations; ++clip_index)
{
// If we measure with a cold CPU cache, we use a different context every time otherwise we use the first one
DecompressionContextType* context = is_cold_cache_profiling ? contexts[clip_index] : contexts[0];
context->seek(sample_time, sample_rounding_policy::none);
switch (decompression_function)
{
case DecompressionFunction::DecompressPose:
context->decompress_tracks(pose_writer);
break;
case DecompressionFunction::DecompressBone:
for (uint32_t bone_index = 0; bone_index < num_tracks; ++bone_index)
context->decompress_track(bone_index, pose_writer);
break;
}
}
timer.stop();
const double elapsed_ms = timer.get_elapsed_milliseconds() / k_num_decompression_evaluations;
if (are_any_enum_flags_set(logging, stat_logging::exhaustive_decompression))
data_writer.push(elapsed_ms);
clip_min_ms = rtm::scalar_min(clip_min_ms, elapsed_ms);
clip_max_ms = rtm::scalar_max(clip_max_ms, elapsed_ms);
clip_total_ms += elapsed_ms;
clip_time_ms[sample_index] = elapsed_ms;
}
};
std::sort(&clip_time_ms[0], &clip_time_ms[k_num_decompression_samples]);
action_writer["min_time_ms"] = clip_min_ms;
action_writer["max_time_ms"] = clip_max_ms;
action_writer["avg_time_ms"] = clip_total_ms / double(k_num_decompression_samples);
action_writer["med_time_ms"] = clip_time_ms[k_num_decompression_samples / 2];
};
}
inline void write_memcpy_performance_stats(iallocator& allocator, sjson::ObjectWriter& writer, CPUCacheFlusher* cache_flusher, rtm::qvvf* lossy_pose_transforms, uint32_t num_bones)
{
rtm::qvvf* memcpy_src_transforms = allocate_type_array<rtm::qvvf>(allocator, num_bones);
double decompression_time_ms = 1000000.0;
for (uint32_t pass_index = 0; pass_index < 3; ++pass_index)
{
if (cache_flusher != nullptr)
{
cache_flusher->begin_flushing();
cache_flusher->flush_buffer(memcpy_src_transforms, sizeof(rtm::qvvf) * num_bones);
cache_flusher->end_flushing();
// Now that the cache is cold, yield our time slice and wait for a new one
// This helps minimize the risk that we'll be interrupted during decompression
std::this_thread::sleep_for(std::chrono::nanoseconds(1));
}
else
{
// We yield our time slice and wait for a new one before priming the cache
// to help keep it warm and minimize the risk that we'll be interrupted during decompression
std::this_thread::sleep_for(std::chrono::nanoseconds(1));
std::memcpy(lossy_pose_transforms, memcpy_src_transforms, sizeof(rtm::qvvf) * num_bones);
}
double execution_count;
scope_profiler timer;
if (cache_flusher != nullptr)
{
std::memcpy(lossy_pose_transforms, memcpy_src_transforms, sizeof(rtm::qvvf) * num_bones);
execution_count = 1.0;
}
else
{
// Warm cache is too fast, execute multiple times and divide by the count
std::memcpy(lossy_pose_transforms, memcpy_src_transforms, sizeof(rtm::qvvf) * num_bones);
std::memcpy(lossy_pose_transforms, memcpy_src_transforms, sizeof(rtm::qvvf) * num_bones);
std::memcpy(lossy_pose_transforms, memcpy_src_transforms, sizeof(rtm::qvvf) * num_bones);
std::memcpy(lossy_pose_transforms, memcpy_src_transforms, sizeof(rtm::qvvf) * num_bones);
std::memcpy(lossy_pose_transforms, memcpy_src_transforms, sizeof(rtm::qvvf) * num_bones);
std::memcpy(lossy_pose_transforms, memcpy_src_transforms, sizeof(rtm::qvvf) * num_bones);
std::memcpy(lossy_pose_transforms, memcpy_src_transforms, sizeof(rtm::qvvf) * num_bones);
std::memcpy(lossy_pose_transforms, memcpy_src_transforms, sizeof(rtm::qvvf) * num_bones);
std::memcpy(lossy_pose_transforms, memcpy_src_transforms, sizeof(rtm::qvvf) * num_bones);
std::memcpy(lossy_pose_transforms, memcpy_src_transforms, sizeof(rtm::qvvf) * num_bones);
execution_count = 10.0;
}
timer.stop();
const double elapsed_ms = timer.get_elapsed_milliseconds() / execution_count;
decompression_time_ms = rtm::scalar_min(decompression_time_ms, elapsed_ms);
}
writer[cache_flusher != nullptr ? "memcpy_cold" : "memcpy_warm"] = [&](sjson::ObjectWriter& memcpy_writer)
{
memcpy_writer["data"] = [&](sjson::ArrayWriter&) {};
memcpy_writer["min_time_ms"] = decompression_time_ms;
memcpy_writer["max_time_ms"] = decompression_time_ms;
memcpy_writer["avg_time_ms"] = decompression_time_ms;
};
deallocate_type_array(allocator, memcpy_src_transforms, num_bones);
}
template<class DecompressionContextType>
inline void write_decompression_performance_stats(iallocator& allocator, compressed_tracks* compressed_clips[k_num_decompression_evaluations], DecompressionContextType* contexts[k_num_decompression_evaluations], stat_logging logging, sjson::ObjectWriter& writer)
{
CPUCacheFlusher* cache_flusher = allocate_type<CPUCacheFlusher>(allocator);
const uint32_t num_tracks = compressed_clips[0]->get_num_tracks();
debug_track_writer pose_writer(allocator, track_type8::qvvf, num_tracks);
const uint32_t num_bytes_per_bone = (4 + 3 + 3) * sizeof(float); // Rotation, Translation, Scale
writer["pose_size"] = num_tracks * num_bytes_per_bone;
writer["decompression_time_per_sample"] = [&](sjson::ObjectWriter& per_sample_writer)
{
// Cold/Warm CPU cache, memcpy
write_memcpy_performance_stats(allocator, per_sample_writer, cache_flusher, pose_writer.tracks_typed.qvvf, num_tracks);
write_memcpy_performance_stats(allocator, per_sample_writer, nullptr, pose_writer.tracks_typed.qvvf, num_tracks);
// Cold CPU cache, decompress_pose
write_decompression_performance_stats(logging, per_sample_writer, "forward_pose_cold", PlaybackDirection::Forward, DecompressionFunction::DecompressPose, compressed_clips, contexts, cache_flusher, pose_writer);
write_decompression_performance_stats(logging, per_sample_writer, "backward_pose_cold", PlaybackDirection::Backward, DecompressionFunction::DecompressPose, compressed_clips, contexts, cache_flusher, pose_writer);
write_decompression_performance_stats(logging, per_sample_writer, "random_pose_cold", PlaybackDirection::Random, DecompressionFunction::DecompressPose, compressed_clips, contexts, cache_flusher, pose_writer);
// Warm CPU cache, decompress_pose
write_decompression_performance_stats(logging, per_sample_writer, "forward_pose_warm", PlaybackDirection::Forward, DecompressionFunction::DecompressPose, compressed_clips, contexts, nullptr, pose_writer);
write_decompression_performance_stats(logging, per_sample_writer, "backward_pose_warm", PlaybackDirection::Backward, DecompressionFunction::DecompressPose, compressed_clips, contexts, nullptr, pose_writer);
write_decompression_performance_stats(logging, per_sample_writer, "random_pose_warm", PlaybackDirection::Random, DecompressionFunction::DecompressPose, compressed_clips, contexts, nullptr, pose_writer);
// Cold CPU cache, decompress_bone
write_decompression_performance_stats(logging, per_sample_writer, "forward_bone_cold", PlaybackDirection::Forward, DecompressionFunction::DecompressBone, compressed_clips, contexts, cache_flusher, pose_writer);
write_decompression_performance_stats(logging, per_sample_writer, "backward_bone_cold", PlaybackDirection::Backward, DecompressionFunction::DecompressBone, compressed_clips, contexts, cache_flusher, pose_writer);
write_decompression_performance_stats(logging, per_sample_writer, "random_bone_cold", PlaybackDirection::Random, DecompressionFunction::DecompressBone, compressed_clips, contexts, cache_flusher, pose_writer);
// Warm CPU cache, decompress_bone
write_decompression_performance_stats(logging, per_sample_writer, "forward_bone_warm", PlaybackDirection::Forward, DecompressionFunction::DecompressBone, compressed_clips, contexts, nullptr, pose_writer);
write_decompression_performance_stats(logging, per_sample_writer, "backward_bone_warm", PlaybackDirection::Backward, DecompressionFunction::DecompressBone, compressed_clips, contexts, nullptr, pose_writer);
write_decompression_performance_stats(logging, per_sample_writer, "random_bone_warm", PlaybackDirection::Random, DecompressionFunction::DecompressBone, compressed_clips, contexts, nullptr, pose_writer);
};
deallocate_type(allocator, cache_flusher);
}
struct default_transform_decompression_settings_latest final : public default_transform_decompression_settings
{
static constexpr compressed_tracks_version16 version_supported() { return compressed_tracks_version16::latest; }
};
inline void write_decompression_performance_stats(iallocator& allocator, const compression_settings& settings, const compressed_tracks& compressed_clip, stat_logging logging, sjson::ObjectWriter& writer)
{
(void)settings;
if (compressed_clip.get_algorithm_type() != algorithm_type8::uniformly_sampled)
return;
#if defined(ACL_HAS_ASSERT_CHECKS)
// If we can, we use a fast-path that simulates what a real game engine would use
// by disabling the things they normally wouldn't care about like deprecated formats
// and debugging features
const bool use_uniform_fast_path = settings.rotation_format == rotation_format8::quatf_drop_w_variable
&& settings.translation_format == vector_format8::vector3f_variable
&& settings.scale_format == vector_format8::vector3f_variable;
ACL_ASSERT(use_uniform_fast_path, "We do not support profiling the debug code path");
#endif
compressed_tracks* compressed_clips[k_num_decompression_evaluations];
for (uint32_t clip_index = 0; clip_index < k_num_decompression_evaluations; ++clip_index)
{
void* clip = allocator.allocate(compressed_clip.get_size(), alignof(compressed_tracks));
std::memcpy(clip, &compressed_clip, compressed_clip.get_size());
compressed_clips[clip_index] = reinterpret_cast<compressed_tracks*>(clip);
}
decompression_context<default_transform_decompression_settings_latest>* contexts[k_num_decompression_evaluations];
for (uint32_t clip_index = 0; clip_index < k_num_decompression_evaluations; ++clip_index)
contexts[clip_index] = make_decompression_context<default_transform_decompression_settings_latest>(allocator);
write_decompression_performance_stats(allocator, compressed_clips, contexts, logging, writer);
for (uint32_t pass_index = 0; pass_index < k_num_decompression_evaluations; ++pass_index)
deallocate_type(allocator, contexts[pass_index]);
for (uint32_t clip_index = 0; clip_index < k_num_decompression_evaluations; ++clip_index)
allocator.deallocate(compressed_clips[clip_index], compressed_clip.get_size());
}
}
}
ACL_IMPL_FILE_PRAGMA_POP
#endif // #if defined(SJSON_CPP_WRITER)

View File

@ -0,0 +1,466 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/iallocator.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/error.h"
#include "acl/core/enum_utils.h"
#include "acl/core/track_formats.h"
#include "acl/core/track_types.h"
#include "acl/core/range_reduction_types.h"
#include "acl/core/variable_bit_rates.h"
#include "acl/math/quat_packing.h"
#include "acl/math/vector4_packing.h"
#include "acl/compression/impl/animated_track_utils.h"
#include "acl/compression/impl/clip_context.h"
#include "acl/core/impl/compressed_headers.h"
#include <rtm/vector4f.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
inline uint32_t get_stream_range_data_size(const clip_context& clip, range_reduction_flags8 range_reduction, rotation_format8 rotation_format)
{
const uint32_t rotation_size = are_any_enum_flags_set(range_reduction, range_reduction_flags8::rotations) ? get_range_reduction_rotation_size(rotation_format) : 0;
const uint32_t translation_size = are_any_enum_flags_set(range_reduction, range_reduction_flags8::translations) ? k_clip_range_reduction_vector3_range_size : 0;
const uint32_t scale_size = are_any_enum_flags_set(range_reduction, range_reduction_flags8::scales) ? k_clip_range_reduction_vector3_range_size : 0;
uint32_t range_data_size = 0;
// Only use the first segment, it contains the necessary information
const SegmentContext& segment = clip.segments[0];
for (const BoneStreams& bone_stream : segment.const_bone_iterator())
{
if (!bone_stream.is_rotation_constant)
range_data_size += rotation_size;
if (!bone_stream.is_translation_constant)
range_data_size += translation_size;
if (!bone_stream.is_scale_constant)
range_data_size += scale_size;
}
return range_data_size;
}
inline void write_range_track_data_impl(const TrackStream& track, const TrackStreamRange& range, bool is_clip_range_data, uint8_t*& out_range_data)
{
const rtm::vector4f range_min = range.get_min();
const rtm::vector4f range_extent = range.get_extent();
if (is_clip_range_data)
{
const uint32_t range_member_size = sizeof(float) * 3;
std::memcpy(out_range_data, &range_min, range_member_size);
out_range_data += range_member_size;
std::memcpy(out_range_data, &range_extent, range_member_size);
out_range_data += range_member_size;
}
else
{
if (is_constant_bit_rate(track.get_bit_rate()))
{
const uint8_t* sample_ptr = track.get_raw_sample_ptr(0);
std::memcpy(out_range_data, sample_ptr, sizeof(uint16_t) * 3);
out_range_data += sizeof(uint16_t) * 3;
}
else
{
pack_vector3_u24_unsafe(range_min, out_range_data);
out_range_data += sizeof(uint8_t) * 3;
pack_vector3_u24_unsafe(range_extent, out_range_data);
out_range_data += sizeof(uint8_t) * 3;
}
}
}
inline uint32_t write_range_track_data(const BoneStreams* bone_streams, const BoneRanges* bone_ranges,
range_reduction_flags8 range_reduction, bool is_clip_range_data,
uint8_t* range_data, uint32_t range_data_size,
const uint32_t* output_bone_mapping, uint32_t num_output_bones)
{
ACL_ASSERT(range_data != nullptr, "'range_data' cannot be null!");
(void)range_data_size;
#if defined(ACL_HAS_ASSERT_CHECKS)
const uint8_t* range_data_end = add_offset_to_ptr<uint8_t>(range_data, range_data_size);
#endif
const uint8_t* range_data_start = range_data;
for (uint32_t output_index = 0; output_index < num_output_bones; ++output_index)
{
const uint32_t bone_index = output_bone_mapping[output_index];
const BoneStreams& bone_stream = bone_streams[bone_index];
const BoneRanges& bone_range = bone_ranges[bone_index];
// normalized value is between [0.0 .. 1.0]
// value = (normalized value * range extent) + range min
// normalized value = (value - range min) / range extent
if (are_any_enum_flags_set(range_reduction, range_reduction_flags8::rotations) && !bone_stream.is_rotation_constant)
{
const rtm::vector4f range_min = bone_range.rotation.get_min();
const rtm::vector4f range_extent = bone_range.rotation.get_extent();
if (is_clip_range_data)
{
const uint32_t range_member_size = bone_stream.rotations.get_rotation_format() == rotation_format8::quatf_full ? (sizeof(float) * 4) : (sizeof(float) * 3);
std::memcpy(range_data, &range_min, range_member_size);
range_data += range_member_size;
std::memcpy(range_data, &range_extent, range_member_size);
range_data += range_member_size;
}
else
{
if (bone_stream.rotations.get_rotation_format() == rotation_format8::quatf_full)
{
pack_vector4_32(range_min, true, range_data);
range_data += sizeof(uint8_t) * 4;
pack_vector4_32(range_extent, true, range_data);
range_data += sizeof(uint8_t) * 4;
}
else
{
if (is_constant_bit_rate(bone_stream.rotations.get_bit_rate()))
{
const uint8_t* rotation = bone_stream.rotations.get_raw_sample_ptr(0);
std::memcpy(range_data, rotation, sizeof(uint16_t) * 3);
range_data += sizeof(uint16_t) * 3;
}
else
{
pack_vector3_u24_unsafe(range_min, range_data);
range_data += sizeof(uint8_t) * 3;
pack_vector3_u24_unsafe(range_extent, range_data);
range_data += sizeof(uint8_t) * 3;
}
}
}
}
if (are_any_enum_flags_set(range_reduction, range_reduction_flags8::translations) && !bone_stream.is_translation_constant)
write_range_track_data_impl(bone_stream.translations, bone_range.translation, is_clip_range_data, range_data);
if (are_any_enum_flags_set(range_reduction, range_reduction_flags8::scales) && !bone_stream.is_scale_constant)
write_range_track_data_impl(bone_stream.scales, bone_range.scale, is_clip_range_data, range_data);
ACL_ASSERT(range_data <= range_data_end, "Invalid range data offset. Wrote too much data.");
}
ACL_ASSERT(range_data == range_data_end, "Invalid range data offset. Wrote too little data.");
return safe_static_cast<uint32_t>(range_data - range_data_start);
}
inline uint32_t write_clip_range_data(const clip_context& clip, range_reduction_flags8 range_reduction, uint8_t* range_data, uint32_t range_data_size, const uint32_t* output_bone_mapping, uint32_t num_output_bones)
{
// Only use the first segment, it contains the necessary information
const SegmentContext& segment = clip.segments[0];
(void)range_reduction;
(void)range_data_size;
// Data is ordered in groups of 4 animated sub-tracks (e.g rot0, rot1, rot2, rot3)
// Order depends on animated track order. If we have 6 animated rotation tracks before the first animated
// translation track, we'll have 8 animated rotation sub-tracks followed by 4 animated translation sub-tracks.
// Once we reach the end, there is no extra padding. The last group might be less than 4 sub-tracks.
// This is because we always process 4 animated sub-tracks at a time and cache the results.
// Groups are written in the order of first use and as such are sorted by their lowest sub-track index.
#if defined(ACL_HAS_ASSERT_CHECKS)
const uint8_t* range_data_end = add_offset_to_ptr<uint8_t>(range_data, range_data_size);
#endif
const uint8_t* range_data_start = range_data;
const rotation_format8 rotation_format = segment.bone_streams[0].rotations.get_rotation_format(); // The same for every track
// Each range entry is a min/extent at most sizeof(float4f) each, 32 bytes total max per sub-track, 4 sub-tracks per group
rtm::vector4f range_group_min[4];
rtm::vector4f range_group_extent[4];
auto group_filter_action = [&](animation_track_type8 group_type, uint32_t bone_index)
{
(void)bone_index;
if (group_type == animation_track_type8::rotation)
return are_any_enum_flags_set(range_reduction, range_reduction_flags8::rotations);
else if (group_type == animation_track_type8::translation)
return are_any_enum_flags_set(range_reduction, range_reduction_flags8::translations);
else
return are_any_enum_flags_set(range_reduction, range_reduction_flags8::scales);
};
auto group_entry_action = [&](animation_track_type8 group_type, uint32_t group_size, uint32_t bone_index)
{
if (group_type == animation_track_type8::rotation)
{
const BoneRanges& bone_range = clip.ranges[bone_index];
const rtm::vector4f range_min = bone_range.rotation.get_min();
const rtm::vector4f range_extent = bone_range.rotation.get_extent();
range_group_min[group_size] = range_min;
range_group_extent[group_size] = range_extent;
}
else if (group_type == animation_track_type8::translation)
{
const BoneRanges& bone_range = clip.ranges[bone_index];
const rtm::vector4f range_min = bone_range.translation.get_min();
const rtm::vector4f range_extent = bone_range.translation.get_extent();
range_group_min[group_size] = range_min;
range_group_extent[group_size] = range_extent;
}
else
{
const BoneRanges& bone_range = clip.ranges[bone_index];
const rtm::vector4f range_min = bone_range.scale.get_min();
const rtm::vector4f range_extent = bone_range.scale.get_extent();
range_group_min[group_size] = range_min;
range_group_extent[group_size] = range_extent;
}
};
auto group_flush_action = [&](animation_track_type8 group_type, uint32_t group_size)
{
if (group_type == animation_track_type8::rotation)
{
// 2x float3f or float4f, we swizzle into SOA form
RTM_MATRIXF_TRANSPOSE_4X4(range_group_min[0], range_group_min[1], range_group_min[2], range_group_min[3],
range_group_min[0], range_group_min[1], range_group_min[2], range_group_min[3]);
RTM_MATRIXF_TRANSPOSE_4X4(range_group_extent[0], range_group_extent[1], range_group_extent[2], range_group_extent[3],
range_group_extent[0], range_group_extent[1], range_group_extent[2], range_group_extent[3]);
if (rotation_format == rotation_format8::quatf_full)
{
// min
std::memcpy(range_data + group_size * sizeof(float) * 0, &range_group_min[0], group_size * sizeof(float)); // xxxx
std::memcpy(range_data + group_size * sizeof(float) * 1, &range_group_min[1], group_size * sizeof(float)); // yyyy
std::memcpy(range_data + group_size * sizeof(float) * 2, &range_group_min[2], group_size * sizeof(float)); // zzzz
std::memcpy(range_data + group_size * sizeof(float) * 3, &range_group_min[3], group_size * sizeof(float)); // wwww
// extent
std::memcpy(range_data + group_size * sizeof(float) * 4, &range_group_extent[0], group_size * sizeof(float)); // xxxx
std::memcpy(range_data + group_size * sizeof(float) * 5, &range_group_extent[1], group_size * sizeof(float)); // yyyy
std::memcpy(range_data + group_size * sizeof(float) * 6, &range_group_extent[2], group_size * sizeof(float)); // zzzz
std::memcpy(range_data + group_size * sizeof(float) * 7, &range_group_extent[3], group_size * sizeof(float)); // wwww
range_data += group_size * sizeof(float) * 8;
}
else
{
// min
std::memcpy(range_data + group_size * sizeof(float) * 0, &range_group_min[0], group_size * sizeof(float)); // xxxx
std::memcpy(range_data + group_size * sizeof(float) * 1, &range_group_min[1], group_size * sizeof(float)); // yyyy
std::memcpy(range_data + group_size * sizeof(float) * 2, &range_group_min[2], group_size * sizeof(float)); // zzzz
// extent
std::memcpy(range_data + group_size * sizeof(float) * 3, &range_group_extent[0], group_size * sizeof(float)); // xxxx
std::memcpy(range_data + group_size * sizeof(float) * 4, &range_group_extent[1], group_size * sizeof(float)); // yyyy
std::memcpy(range_data + group_size * sizeof(float) * 5, &range_group_extent[2], group_size * sizeof(float)); // zzzz
range_data += group_size * sizeof(float) * 6;
}
}
else
{
// 2x float3f
for (uint32_t group_index = 0; group_index < group_size; ++group_index)
{
std::memcpy(range_data, &range_group_min[group_index], sizeof(rtm::float3f));
std::memcpy(range_data + sizeof(rtm::float3f), &range_group_extent[group_index], sizeof(rtm::float3f));
range_data += sizeof(rtm::float3f) * 2;
}
}
ACL_ASSERT(range_data <= range_data_end, "Invalid range data offset. Wrote too little data.");
};
animated_group_writer(segment, output_bone_mapping, num_output_bones, group_filter_action, group_entry_action, group_flush_action);
ACL_ASSERT(range_data == range_data_end, "Invalid range data offset. Wrote too little data.");
return safe_static_cast<uint32_t>(range_data - range_data_start);
}
inline uint32_t write_segment_range_data(const SegmentContext& segment, range_reduction_flags8 range_reduction, uint8_t* range_data, uint32_t range_data_size, const uint32_t* output_bone_mapping, uint32_t num_output_bones)
{
ACL_ASSERT(range_data != nullptr, "'range_data' cannot be null!");
(void)range_reduction;
(void)range_data_size;
// Data is ordered in groups of 4 animated sub-tracks (e.g rot0, rot1, rot2, rot3)
// Order depends on animated track order. If we have 6 animated rotation tracks before the first animated
// translation track, we'll have 8 animated rotation sub-tracks followed by 4 animated translation sub-tracks.
// Once we reach the end, there is no extra padding. The last group might be less than 4 sub-tracks.
// This is because we always process 4 animated sub-tracks at a time and cache the results.
// Groups are written in the order of first use and as such are sorted by their lowest sub-track index.
// normalized value is between [0.0 .. 1.0]
// value = (normalized value * range extent) + range min
// normalized value = (value - range min) / range extent
#if defined(ACL_HAS_ASSERT_CHECKS)
const uint8_t* range_data_end = add_offset_to_ptr<uint8_t>(range_data, range_data_size);
#endif
const uint8_t* range_data_start = range_data;
// For rotations contains: min.xxxx, min.yyyy, min.zzzz, extent.xxxx, extent.yyyy, extent.zzzz
// For trans/scale: min0.xyz, extent0.xyz, min1.xyz, extent1.xyz, min2.xyz, extent2.xyz, min3.xyz, extent3.xyz
// To keep decompression simpler, rotations are padded to 4 elements even if the last group is partial
alignas(16) uint8_t range_data_group[6 * 4] = { 0 };
auto group_filter_action = [&](animation_track_type8 group_type, uint32_t bone_index)
{
(void)bone_index;
if (group_type == animation_track_type8::rotation)
return are_any_enum_flags_set(range_reduction, range_reduction_flags8::rotations);
else if (group_type == animation_track_type8::translation)
return are_any_enum_flags_set(range_reduction, range_reduction_flags8::translations);
else
return are_any_enum_flags_set(range_reduction, range_reduction_flags8::scales);
};
auto group_entry_action = [&](animation_track_type8 group_type, uint32_t group_size, uint32_t bone_index)
{
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
if (group_type == animation_track_type8::rotation)
{
if (is_constant_bit_rate(bone_stream.rotations.get_bit_rate()))
{
const uint8_t* sample = bone_stream.rotations.get_raw_sample_ptr(0);
// Swizzle into SOA form
range_data_group[group_size + 0] = sample[1]; // sample.x top 8 bits
range_data_group[group_size + 4] = sample[0]; // sample.x bottom 8 bits
range_data_group[group_size + 8] = sample[3]; // sample.y top 8 bits
range_data_group[group_size + 12] = sample[2]; // sample.y bottom 8 bits
range_data_group[group_size + 16] = sample[5]; // sample.z top 8 bits
range_data_group[group_size + 20] = sample[4]; // sample.z bottom 8 bits
}
else
{
const BoneRanges& bone_range = segment.ranges[bone_index];
const rtm::vector4f range_min = bone_range.rotation.get_min();
const rtm::vector4f range_extent = bone_range.rotation.get_extent();
// Swizzle into SOA form
alignas(16) uint8_t range_min_buffer[16];
alignas(16) uint8_t range_extent_buffer[16];
pack_vector3_u24_unsafe(range_min, range_min_buffer);
pack_vector3_u24_unsafe(range_extent, range_extent_buffer);
range_data_group[group_size + 0] = range_min_buffer[0];
range_data_group[group_size + 4] = range_min_buffer[1];
range_data_group[group_size + 8] = range_min_buffer[2];
range_data_group[group_size + 12] = range_extent_buffer[0];
range_data_group[group_size + 16] = range_extent_buffer[1];
range_data_group[group_size + 20] = range_extent_buffer[2];
}
}
else if (group_type == animation_track_type8::translation)
{
if (is_constant_bit_rate(bone_stream.translations.get_bit_rate()))
{
const uint8_t* sample = bone_stream.translations.get_raw_sample_ptr(0);
uint8_t* sub_track_range_data = &range_data_group[group_size * 6];
std::memcpy(sub_track_range_data, sample, 6);
}
else
{
const BoneRanges& bone_range = segment.ranges[bone_index];
const rtm::vector4f range_min = bone_range.translation.get_min();
const rtm::vector4f range_extent = bone_range.translation.get_extent();
uint8_t* sub_track_range_data = &range_data_group[group_size * 6];
pack_vector3_u24_unsafe(range_min, sub_track_range_data);
pack_vector3_u24_unsafe(range_extent, sub_track_range_data + 3);
}
}
else
{
if (is_constant_bit_rate(bone_stream.scales.get_bit_rate()))
{
const uint8_t* sample = bone_stream.scales.get_raw_sample_ptr(0);
uint8_t* sub_track_range_data = &range_data_group[group_size * 6];
std::memcpy(sub_track_range_data, sample, 6);
}
else
{
const BoneRanges& bone_range = segment.ranges[bone_index];
const rtm::vector4f range_min = bone_range.scale.get_min();
const rtm::vector4f range_extent = bone_range.scale.get_extent();
uint8_t* sub_track_range_data = &range_data_group[group_size * 6];
pack_vector3_u24_unsafe(range_min, sub_track_range_data);
pack_vector3_u24_unsafe(range_extent, sub_track_range_data + 3);
}
}
};
auto group_flush_action = [&](animation_track_type8 group_type, uint32_t group_size)
{
const uint32_t copy_size = group_type == animation_track_type8::rotation ? 4 : group_size;
std::memcpy(range_data, &range_data_group[0], copy_size * 6);
range_data += copy_size * 6;
// Zero out the temporary buffer for the final group to not contain partial garbage
std::memset(&range_data_group[0], 0, sizeof(range_data_group));
ACL_ASSERT(range_data <= range_data_end, "Invalid range data offset. Wrote too little data.");
};
animated_group_writer(segment, output_bone_mapping, num_output_bones, group_filter_action, group_entry_action, group_flush_action);
ACL_ASSERT(range_data == range_data_end, "Invalid range data offset. Wrote too little data.");
return safe_static_cast<uint32_t>(range_data - range_data_start);
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,142 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/iallocator.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/impl/compressed_headers.h"
#include "acl/compression/compression_settings.h"
#include "acl/compression/impl/clip_context.h"
#include "acl/compression/impl/segment_context.h"
#include "acl/compression/impl/write_range_data.h"
#include "acl/compression/impl/write_stream_data.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
inline uint32_t write_segment_start_indices(const clip_context& clip, uint32_t* segment_start_indices)
{
uint32_t size_written = 0;
const uint32_t num_segments = clip.num_segments;
for (uint32_t segment_index = 0; segment_index < num_segments; ++segment_index)
{
const SegmentContext& segment = clip.segments[segment_index];
segment_start_indices[segment_index] = segment.clip_sample_offset;
size_written += sizeof(uint32_t);
}
// Write our sentinel value
segment_start_indices[clip.num_segments] = 0xFFFFFFFFU;
size_written += sizeof(uint32_t);
return size_written;
}
inline uint32_t write_segment_headers(const clip_context& clip, const compression_settings& settings, segment_header* segment_headers, uint32_t segment_data_start_offset)
{
uint32_t size_written = 0;
const uint32_t format_per_track_data_size = get_format_per_track_data_size(clip, settings.rotation_format, settings.translation_format, settings.scale_format);
uint32_t segment_data_offset = segment_data_start_offset;
for (uint32_t segment_index = 0; segment_index < clip.num_segments; ++segment_index)
{
const SegmentContext& segment = clip.segments[segment_index];
segment_header& header = segment_headers[segment_index];
ACL_ASSERT(header.animated_pose_bit_size == 0, "Buffer overrun detected");
header.animated_pose_bit_size = segment.animated_pose_bit_size;
header.segment_data = segment_data_offset;
segment_data_offset = align_to(segment_data_offset + format_per_track_data_size, 2); // Aligned to 2 bytes
segment_data_offset = align_to(segment_data_offset + segment.range_data_size, 4); // Aligned to 4 bytes
segment_data_offset = segment_data_offset + segment.animated_data_size;
size_written += sizeof(segment_header);
ACL_ASSERT((segment_data_offset - (uint32_t)header.segment_data) == segment.segment_data_size, "Unexpected segment size");
}
return size_written;
}
inline uint32_t write_segment_data(const clip_context& clip, const compression_settings& settings, range_reduction_flags8 range_reduction, transform_tracks_header& header, const uint32_t* output_bone_mapping, uint32_t num_output_bones)
{
segment_header* segment_headers = header.get_segment_headers();
const uint32_t format_per_track_data_size = get_format_per_track_data_size(clip, settings.rotation_format, settings.translation_format, settings.scale_format);
uint32_t size_written = 0;
const uint32_t num_segments = clip.num_segments;
for (uint32_t segment_index = 0; segment_index < num_segments; ++segment_index)
{
const SegmentContext& segment = clip.segments[segment_index];
segment_header& segment_header_ = segment_headers[segment_index];
uint8_t* format_per_track_data = nullptr;
uint8_t* range_data = nullptr;
uint8_t* animated_data = nullptr;
header.get_segment_data(segment_header_, format_per_track_data, range_data, animated_data);
ACL_ASSERT(format_per_track_data[0] == 0, "Buffer overrun detected");
ACL_ASSERT(range_data[0] == 0, "Buffer overrun detected");
ACL_ASSERT(animated_data[0] == 0, "Buffer overrun detected");
if (format_per_track_data_size != 0)
{
const uint32_t size = write_format_per_track_data(segment, format_per_track_data, format_per_track_data_size, output_bone_mapping, num_output_bones);
(void)size;
ACL_ASSERT(size == format_per_track_data_size, "Unexpected format per track data size");
}
if (segment.range_data_size != 0)
{
const uint32_t size = write_segment_range_data(segment, range_reduction, range_data, segment.range_data_size, output_bone_mapping, num_output_bones);
(void)size;
ACL_ASSERT(size == segment.range_data_size, "Unexpected range data size");
}
if (segment.animated_data_size != 0)
{
const uint32_t size = write_animated_track_data(segment, animated_data, segment.animated_data_size, output_bone_mapping, num_output_bones);
(void)size;
ACL_ASSERT(size == segment.animated_data_size, "Unexpected animated data size");
}
size_written += segment.segment_data_size;
}
return size_written;
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,555 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#if defined(SJSON_CPP_WRITER)
#include "acl/core/track_formats.h"
#include "acl/core/utils.h"
#include "acl/core/variable_bit_rates.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/impl/memory_cache.h"
#include "acl/compression/transform_error_metrics.h"
#include "acl/compression/track_error.h"
#include "acl/compression/impl/clip_context.h"
#include <chrono>
#include <cstdint>
#include <functional>
#include <thread>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
inline void write_summary_segment_stats(const SegmentContext& segment, rotation_format8 rotation_format, vector_format8 translation_format, vector_format8 scale_format, sjson::ObjectWriter& writer)
{
writer["segment_index"] = segment.segment_index;
writer["num_samples"] = segment.num_samples;
const uint32_t format_per_track_data_size = get_format_per_track_data_size(*segment.clip, rotation_format, translation_format, scale_format);
uint32_t segment_size = 0;
segment_size += format_per_track_data_size; // Format per track data
segment_size = align_to(segment_size, 2); // Align range data
segment_size += segment.range_data_size; // Range data
segment_size = align_to(segment_size, 4); // Align animated data
segment_size += segment.animated_data_size; // Animated track data
writer["segment_size"] = segment_size;
writer["animated_frame_size"] = double(segment.animated_data_size) / double(segment.num_samples);
}
inline void write_detailed_segment_stats(const SegmentContext& segment, sjson::ObjectWriter& writer)
{
uint32_t bit_rate_counts[k_num_bit_rates] = { 0 };
for (const BoneStreams& bone_stream : segment.const_bone_iterator())
{
const uint8_t rotation_bit_rate = bone_stream.rotations.get_bit_rate();
if (rotation_bit_rate != k_invalid_bit_rate)
bit_rate_counts[rotation_bit_rate]++;
const uint8_t translation_bit_rate = bone_stream.translations.get_bit_rate();
if (translation_bit_rate != k_invalid_bit_rate)
bit_rate_counts[translation_bit_rate]++;
const uint8_t scale_bit_rate = bone_stream.scales.get_bit_rate();
if (scale_bit_rate != k_invalid_bit_rate)
bit_rate_counts[scale_bit_rate]++;
}
writer["bit_rate_counts"] = [&](sjson::ArrayWriter& bitrate_writer)
{
for (uint32_t bit_rate = 0; bit_rate < k_num_bit_rates; ++bit_rate)
bitrate_writer.push(bit_rate_counts[bit_rate]);
};
// We assume that we always interpolate between 2 poses
const uint32_t animated_pose_byte_size = align_to(segment.animated_pose_bit_size * 2, 8) / 8;
constexpr uint32_t k_cache_line_byte_size = 64;
const uint32_t num_segment_header_cache_lines = align_to(segment.total_header_size, k_cache_line_byte_size) / k_cache_line_byte_size;
const uint32_t num_animated_pose_cache_lines = align_to(animated_pose_byte_size, k_cache_line_byte_size) / k_cache_line_byte_size;
writer["decomp_touched_bytes"] = segment.clip->decomp_touched_bytes + segment.total_header_size + animated_pose_byte_size;
writer["decomp_touched_cache_lines"] = segment.clip->decomp_touched_cache_lines + num_segment_header_cache_lines + num_animated_pose_cache_lines;
}
inline void write_exhaustive_segment_stats(iallocator& allocator, const SegmentContext& segment, const clip_context& raw_clip_context, const clip_context& additive_base_clip_context, const compression_settings& settings, const track_array_qvvf& track_list, sjson::ObjectWriter& writer)
{
const uint32_t num_bones = raw_clip_context.num_bones;
const bool has_scale = segment_context_has_scale(segment);
ACL_ASSERT(!settings.error_metric->needs_conversion(has_scale), "Error metric conversion not supported");
const auto local_to_object_space_impl = std::mem_fn(has_scale ? &itransform_error_metric::local_to_object_space : &itransform_error_metric::local_to_object_space_no_scale);
const auto calculate_error_impl = std::mem_fn(has_scale ? &itransform_error_metric::calculate_error : &itransform_error_metric::calculate_error_no_scale);
const auto apply_additive_to_base_impl = std::mem_fn(has_scale ? &itransform_error_metric::apply_additive_to_base : &itransform_error_metric::apply_additive_to_base_no_scale);
rtm::qvvf* raw_local_pose = allocate_type_array<rtm::qvvf>(allocator, num_bones);
rtm::qvvf* base_local_pose = allocate_type_array<rtm::qvvf>(allocator, num_bones);
rtm::qvvf* lossy_local_pose = allocate_type_array<rtm::qvvf>(allocator, num_bones);
rtm::qvvf* raw_object_pose = allocate_type_array<rtm::qvvf>(allocator, num_bones);
rtm::qvvf* lossy_object_pose = allocate_type_array<rtm::qvvf>(allocator, num_bones);
uint32_t* parent_transform_indices = allocate_type_array<uint32_t>(allocator, num_bones);
uint32_t* self_transform_indices = allocate_type_array<uint32_t>(allocator, num_bones);
for (uint32_t transform_index = 0; transform_index < num_bones; ++transform_index)
{
const track_qvvf& track = track_list[transform_index];
const track_desc_transformf& desc = track.get_description();
parent_transform_indices[transform_index] = desc.parent_index;
self_transform_indices[transform_index] = transform_index;
}
const float sample_rate = raw_clip_context.sample_rate;
const float ref_duration = calculate_duration(raw_clip_context.num_samples, sample_rate);
itransform_error_metric::apply_additive_to_base_args apply_additive_to_base_args_raw;
apply_additive_to_base_args_raw.dirty_transform_indices = self_transform_indices;
apply_additive_to_base_args_raw.num_dirty_transforms = num_bones;
apply_additive_to_base_args_raw.local_transforms = raw_local_pose;
apply_additive_to_base_args_raw.base_transforms = base_local_pose;
apply_additive_to_base_args_raw.num_transforms = num_bones;
itransform_error_metric::apply_additive_to_base_args apply_additive_to_base_args_lossy = apply_additive_to_base_args_raw;
apply_additive_to_base_args_lossy.local_transforms = lossy_local_pose;
itransform_error_metric::local_to_object_space_args local_to_object_space_args_raw;
local_to_object_space_args_raw.dirty_transform_indices = self_transform_indices;
local_to_object_space_args_raw.num_dirty_transforms = num_bones;
local_to_object_space_args_raw.parent_transform_indices = parent_transform_indices;
local_to_object_space_args_raw.local_transforms = raw_local_pose;
local_to_object_space_args_raw.num_transforms = num_bones;
itransform_error_metric::local_to_object_space_args local_to_object_space_args_lossy = local_to_object_space_args_raw;
local_to_object_space_args_lossy.local_transforms = lossy_local_pose;
track_error worst_bone_error;
writer["error_per_frame_and_bone"] = [&](sjson::ArrayWriter& frames_writer)
{
for (uint32_t sample_index = 0; sample_index < segment.num_samples; ++sample_index)
{
const float sample_time = rtm::scalar_min(float(segment.clip_sample_offset + sample_index) / sample_rate, ref_duration);
sample_streams(raw_clip_context.segments[0].bone_streams, num_bones, sample_time, raw_local_pose);
sample_streams(segment.bone_streams, num_bones, sample_time, lossy_local_pose);
if (raw_clip_context.has_additive_base)
{
const float normalized_sample_time = additive_base_clip_context.num_samples > 1 ? (sample_time / ref_duration) : 0.0F;
const float additive_sample_time = additive_base_clip_context.num_samples > 1 ? (normalized_sample_time * additive_base_clip_context.duration) : 0.0F;
sample_streams(additive_base_clip_context.segments[0].bone_streams, num_bones, additive_sample_time, base_local_pose);
apply_additive_to_base_impl(settings.error_metric, apply_additive_to_base_args_raw, raw_local_pose);
apply_additive_to_base_impl(settings.error_metric, apply_additive_to_base_args_lossy, lossy_local_pose);
}
local_to_object_space_impl(settings.error_metric, local_to_object_space_args_raw, raw_object_pose);
local_to_object_space_impl(settings.error_metric, local_to_object_space_args_lossy, lossy_object_pose);
frames_writer.push_newline();
frames_writer.push([&](sjson::ArrayWriter& frame_writer)
{
for (uint32_t bone_index = 0; bone_index < num_bones; ++bone_index)
{
const track_qvvf& track = track_list[bone_index];
const track_desc_transformf& desc = track.get_description();
itransform_error_metric::calculate_error_args calculate_error_args;
calculate_error_args.transform0 = raw_object_pose + bone_index;
calculate_error_args.transform1 = lossy_object_pose + bone_index;
calculate_error_args.construct_sphere_shell(desc.shell_distance);
const float error = rtm::scalar_cast(calculate_error_impl(settings.error_metric, calculate_error_args));
frame_writer.push(error);
if (error > worst_bone_error.error)
{
worst_bone_error.error = error;
worst_bone_error.index = bone_index;
worst_bone_error.sample_time = sample_time;
}
}
});
}
};
writer["max_error"] = worst_bone_error.error;
writer["worst_bone"] = worst_bone_error.index;
writer["worst_time"] = worst_bone_error.sample_time;
deallocate_type_array(allocator, raw_local_pose, num_bones);
deallocate_type_array(allocator, base_local_pose, num_bones);
deallocate_type_array(allocator, lossy_local_pose, num_bones);
deallocate_type_array(allocator, raw_object_pose, num_bones);
deallocate_type_array(allocator, lossy_object_pose, num_bones);
deallocate_type_array(allocator, parent_transform_indices, num_bones);
deallocate_type_array(allocator, self_transform_indices, num_bones);
}
inline uint32_t calculate_clip_metadata_common_size(const clip_context& clip, const compressed_tracks& compressed_clip)
{
uint32_t result = 0;
// Segment start indices and headers
result += clip.num_segments > 1 ? (sizeof(uint32_t) * (clip.num_segments + 1)) : 0;
result += sizeof(segment_header) * clip.num_segments;
// Default/constant track bit sets
const bitset_description bitset_desc = bitset_description::make_from_num_bits(compressed_clip.get_num_tracks());
result += bitset_desc.get_num_bytes();
result += bitset_desc.get_num_bytes();
return result;
}
inline uint32_t calculate_segment_metadata_common_size(const clip_context& clip, const compression_settings& settings)
{
const bool is_rotation_variable = is_rotation_format_variable(settings.rotation_format);
const bool is_translation_variable = is_vector_format_variable(settings.translation_format);
const bool is_scale_variable = is_vector_format_variable(settings.scale_format);
// Only use the first segment, it contains the necessary information
const SegmentContext& segment = clip.segments[0];
uint32_t result = 0;
for (const BoneStreams& bone_stream : segment.const_bone_iterator())
{
if (bone_stream.is_stripped_from_output())
continue;
// Format per track
if (!bone_stream.is_rotation_constant && is_rotation_variable)
result++;
if (!bone_stream.is_translation_constant && is_translation_variable)
result++;
if (!bone_stream.is_scale_constant && is_scale_variable)
result++;
}
return result * clip.num_segments;
}
inline uint32_t calculate_segment_metadata_rotation_size(const clip_context& clip, range_reduction_flags8 range_reduction)
{
if (clip.num_segments == 1)
return 0;
// Only use the first segment, it contains the necessary information
const SegmentContext& segment = clip.segments[0];
uint32_t result = 0;
for (const BoneStreams& bone_stream : segment.const_bone_iterator())
{
if (bone_stream.is_stripped_from_output())
continue;
// Range data
if (are_any_enum_flags_set(range_reduction, range_reduction_flags8::rotations) && !bone_stream.is_rotation_constant)
{
const uint32_t num_components = bone_stream.rotations.get_rotation_format() == rotation_format8::quatf_full ? 8 : 6;
result += num_components * k_segment_range_reduction_num_bytes_per_component;
}
}
return result * clip.num_segments;
}
inline uint32_t calculate_segment_metadata_translation_size(const clip_context& clip, range_reduction_flags8 range_reduction)
{
if (clip.num_segments == 1)
return 0;
// Only use the first segment, it contains the necessary information
const SegmentContext& segment = clip.segments[0];
uint32_t result = 0;
for (const BoneStreams& bone_stream : segment.const_bone_iterator())
{
if (bone_stream.is_stripped_from_output())
continue;
// Range data
if (are_any_enum_flags_set(range_reduction, range_reduction_flags8::translations) && !bone_stream.is_translation_constant)
result += k_segment_range_reduction_num_bytes_per_component * 6;
}
return result * clip.num_segments;
}
inline uint32_t calculate_segment_metadata_scale_size(const clip_context& clip, range_reduction_flags8 range_reduction)
{
if (clip.num_segments == 1)
return 0;
// Only use the first segment, it contains the necessary information
const SegmentContext& segment = clip.segments[0];
uint32_t result = 0;
for (const BoneStreams& bone_stream : segment.const_bone_iterator())
{
if (bone_stream.is_stripped_from_output())
continue;
// Range data
if (are_any_enum_flags_set(range_reduction, range_reduction_flags8::scales) && !bone_stream.is_scale_constant)
result += k_segment_range_reduction_num_bytes_per_component * 6;
}
return result * clip.num_segments;
}
inline uint32_t calculate_segment_animated_rotation_size(const clip_context& clip)
{
uint32_t result = 0;
for (const SegmentContext& segment : clip.segment_iterator())
result += align_to(segment.animated_pose_rotation_bit_size, 8) / 8; // Convert bits to bytes;
return result;
}
inline uint32_t calculate_segment_animated_translation_size(const clip_context& clip)
{
uint32_t result = 0;
for (const SegmentContext& segment : clip.segment_iterator())
result += align_to(segment.animated_pose_translation_bit_size, 8) / 8; // Convert bits to bytes;
return result;
}
inline uint32_t calculate_segment_animated_scale_size(const clip_context& clip)
{
uint32_t result = 0;
for (const SegmentContext& segment : clip.segment_iterator())
result += align_to(segment.animated_pose_scale_bit_size, 8) / 8; // Convert bits to bytes;
return result;
}
inline uint32_t calculate_segment_animated_data_size(const clip_context& clip)
{
uint32_t result = 0;
for (const SegmentContext& segment : clip.segment_iterator())
result += segment.animated_data_size;
return result;
}
inline void write_stats(iallocator& allocator, const track_array_qvvf& track_list, const clip_context& clip,
const compressed_tracks& compressed_clip, const compression_settings& settings, range_reduction_flags8 range_reduction, const clip_context& raw_clip,
const clip_context& additive_base_clip_context, const scope_profiler& compression_time,
output_stats& stats)
{
ACL_ASSERT(stats.writer != nullptr, "Attempted to log stats without a writer");
if (stats.writer == nullptr)
return;
const uint32_t raw_size = track_list.get_raw_size();
const uint32_t compressed_size = compressed_clip.get_size();
const double compression_ratio = double(raw_size) / double(compressed_size);
sjson::ObjectWriter& writer = *stats.writer;
writer["algorithm_name"] = get_algorithm_name(algorithm_type8::uniformly_sampled);
writer["algorithm_uid"] = settings.get_hash();
writer["clip_name"] = track_list.get_name().c_str();
writer["raw_size"] = raw_size;
writer["compressed_size"] = compressed_size;
writer["compression_ratio"] = compression_ratio;
writer["compression_time"] = compression_time.get_elapsed_seconds();
writer["duration"] = track_list.get_duration();
writer["num_samples"] = track_list.get_num_samples_per_track();
writer["num_bones"] = compressed_clip.get_num_tracks();
writer["rotation_format"] = get_rotation_format_name(settings.rotation_format);
writer["translation_format"] = get_vector_format_name(settings.translation_format);
writer["scale_format"] = get_vector_format_name(settings.scale_format);
writer["has_scale"] = clip.has_scale;
writer["error_metric"] = settings.error_metric->get_name();
if (are_all_enum_flags_set(stats.logging, stat_logging::detailed) || are_all_enum_flags_set(stats.logging, stat_logging::exhaustive))
{
uint32_t num_default_rotation_tracks = 0;
uint32_t num_default_translation_tracks = 0;
uint32_t num_default_scale_tracks = 0;
uint32_t num_constant_rotation_tracks = 0;
uint32_t num_constant_translation_tracks = 0;
uint32_t num_constant_scale_tracks = 0;
uint32_t num_animated_rotation_tracks = 0;
uint32_t num_animated_translation_tracks = 0;
uint32_t num_animated_scale_tracks = 0;
for (const BoneStreams& bone_stream : clip.segments[0].bone_iterator())
{
if (bone_stream.is_stripped_from_output())
continue;
if (bone_stream.is_rotation_default)
num_default_rotation_tracks++;
else if (bone_stream.is_rotation_constant)
num_constant_rotation_tracks++;
else
num_animated_rotation_tracks++;
if (bone_stream.is_translation_default)
num_default_translation_tracks++;
else if (bone_stream.is_translation_constant)
num_constant_translation_tracks++;
else
num_animated_translation_tracks++;
if (bone_stream.is_scale_default)
num_default_scale_tracks++;
else if (bone_stream.is_scale_constant)
num_constant_scale_tracks++;
else
num_animated_scale_tracks++;
}
const uint32_t num_default_tracks = num_default_rotation_tracks + num_default_translation_tracks + num_default_scale_tracks;
const uint32_t num_constant_tracks = num_constant_rotation_tracks + num_constant_translation_tracks + num_constant_scale_tracks;
const uint32_t num_animated_tracks = num_animated_rotation_tracks + num_animated_translation_tracks + num_animated_scale_tracks;
writer["num_default_rotation_tracks"] = num_default_rotation_tracks;
writer["num_default_translation_tracks"] = num_default_translation_tracks;
writer["num_default_scale_tracks"] = num_default_scale_tracks;
writer["num_constant_rotation_tracks"] = num_constant_rotation_tracks;
writer["num_constant_translation_tracks"] = num_constant_translation_tracks;
writer["num_constant_scale_tracks"] = num_constant_scale_tracks;
writer["num_animated_rotation_tracks"] = num_animated_rotation_tracks;
writer["num_animated_translation_tracks"] = num_animated_translation_tracks;
writer["num_animated_scale_tracks"] = num_animated_scale_tracks;
writer["num_default_tracks"] = num_default_tracks;
writer["num_constant_tracks"] = num_constant_tracks;
writer["num_animated_tracks"] = num_animated_tracks;
const uint32_t clip_header_size = sizeof(raw_buffer_header) + sizeof(tracks_header) + sizeof(transform_tracks_header);
const uint32_t clip_metadata_common_size = calculate_clip_metadata_common_size(clip, compressed_clip);
const uint32_t clip_metadata_rotation_constant_size = get_packed_rotation_size(get_highest_variant_precision(get_rotation_variant(settings.rotation_format))) * num_constant_rotation_tracks;
const uint32_t clip_metadata_translation_constant_size = get_packed_vector_size(vector_format8::vector3f_full) * num_constant_translation_tracks;
const uint32_t clip_metadata_scale_constant_size = get_packed_vector_size(vector_format8::vector3f_full) * num_constant_scale_tracks;
writer["clip_header_size"] = clip_header_size;
writer["clip_metadata_common_size"] = clip_metadata_common_size;
writer["clip_metadata_rotation_constant_size"] = clip_metadata_rotation_constant_size;
writer["clip_metadata_translation_constant_size"] = clip_metadata_translation_constant_size;
writer["clip_metadata_scale_constant_size"] = clip_metadata_scale_constant_size;
const uint32_t range_rotation_size = are_any_enum_flags_set(range_reduction, range_reduction_flags8::rotations) ? get_range_reduction_rotation_size(settings.rotation_format) : 0;
const uint32_t range_translation_size = are_any_enum_flags_set(range_reduction, range_reduction_flags8::translations) ? k_clip_range_reduction_vector3_range_size : 0;
const uint32_t range_scale_size = are_any_enum_flags_set(range_reduction, range_reduction_flags8::scales) ? k_clip_range_reduction_vector3_range_size : 0;
const uint32_t clip_metadata_rotation_animated_size = range_rotation_size * num_animated_rotation_tracks;
const uint32_t clip_metadata_translation_animated_size = range_translation_size * num_animated_translation_tracks;
const uint32_t clip_metadata_scale_animated_size = range_scale_size * num_animated_scale_tracks;
writer["clip_metadata_rotation_animated_size"] = clip_metadata_rotation_animated_size;
writer["clip_metadata_translation_animated_size"] = clip_metadata_translation_animated_size;
writer["clip_metadata_scale_animated_size"] = clip_metadata_scale_animated_size;
const uint32_t segment_metadata_common_size = calculate_segment_metadata_common_size(clip, settings);
const uint32_t segment_metadata_rotation_size = calculate_segment_metadata_rotation_size(clip, range_reduction);
const uint32_t segment_metadata_translation_size = calculate_segment_metadata_translation_size(clip, range_reduction);
const uint32_t segment_metadata_scale_size = calculate_segment_metadata_scale_size(clip, range_reduction);
const uint32_t segment_animated_rotation_size = calculate_segment_animated_rotation_size(clip);
const uint32_t segment_animated_translation_size = calculate_segment_animated_translation_size(clip);
const uint32_t segment_animated_scale_size = calculate_segment_animated_scale_size(clip);
writer["segment_metadata_common_size"] = segment_metadata_common_size;
writer["segment_metadata_rotation_size"] = segment_metadata_rotation_size;
writer["segment_metadata_translation_size"] = segment_metadata_translation_size;
writer["segment_metadata_scale_size"] = segment_metadata_scale_size;
writer["segment_animated_rotation_size"] = segment_animated_rotation_size;
writer["segment_animated_translation_size"] = segment_animated_translation_size;
writer["segment_animated_scale_size"] = segment_animated_scale_size;
uint32_t known_data_size = 0;
known_data_size += clip_header_size;
known_data_size += clip_metadata_common_size;
known_data_size += clip_metadata_rotation_constant_size;
known_data_size += clip_metadata_translation_constant_size;
known_data_size += clip_metadata_scale_constant_size;
known_data_size += clip_metadata_rotation_animated_size;
known_data_size += clip_metadata_translation_animated_size;
known_data_size += clip_metadata_scale_animated_size;
known_data_size += segment_metadata_common_size;
known_data_size += segment_metadata_rotation_size;
known_data_size += segment_metadata_translation_size;
known_data_size += segment_metadata_scale_size;
const uint32_t segment_animated_data_size = calculate_segment_animated_data_size(clip);
known_data_size += segment_animated_data_size;
const int32_t unknown_overhead_size = compressed_size - known_data_size;
ACL_ASSERT(unknown_overhead_size >= 0, "Overhead size should be positive");
writer["unknown_overhead_size"] = unknown_overhead_size;
}
writer["segmenting"] = [&](sjson::ObjectWriter& segmenting_writer)
{
segmenting_writer["num_segments"] = clip.num_segments;
segmenting_writer["ideal_num_samples"] = settings.segmenting.ideal_num_samples;
segmenting_writer["max_num_samples"] = settings.segmenting.max_num_samples;
};
writer["segments"] = [&](sjson::ArrayWriter& segments_writer)
{
for (const SegmentContext& segment : clip.segment_iterator())
{
segments_writer.push([&](sjson::ObjectWriter& segment_writer)
{
write_summary_segment_stats(segment, settings.rotation_format, settings.translation_format, settings.scale_format, segment_writer);
if (are_all_enum_flags_set(stats.logging, stat_logging::detailed))
write_detailed_segment_stats(segment, segment_writer);
if (are_all_enum_flags_set(stats.logging, stat_logging::exhaustive))
write_exhaustive_segment_stats(allocator, segment, raw_clip, additive_base_clip_context, settings, track_list, segment_writer);
});
}
};
}
}
}
ACL_IMPL_FILE_PRAGMA_POP
#endif // #if defined(SJSON_CPP_WRITER)

View File

@ -0,0 +1,102 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/error.h"
#include "acl/core/bitset.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/compression/impl/clip_context.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
inline uint32_t write_default_track_bitset(const clip_context& clip, uint32_t* default_tracks_bitset, bitset_description bitset_desc, const uint32_t* output_bone_mapping, uint32_t num_output_bones)
{
ACL_ASSERT(default_tracks_bitset != nullptr, "'default_tracks_bitset' cannot be null!");
// Only use the first segment, it contains the necessary information
const SegmentContext& segment = clip.segments[0];
uint32_t default_track_offset = 0;
uint32_t size_written = 0;
bitset_reset(default_tracks_bitset, bitset_desc, false);
for (uint32_t output_index = 0; output_index < num_output_bones; ++output_index)
{
const uint32_t bone_index = output_bone_mapping[output_index];
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
bitset_set(default_tracks_bitset, bitset_desc, default_track_offset++, bone_stream.is_rotation_default);
bitset_set(default_tracks_bitset, bitset_desc, default_track_offset++, bone_stream.is_translation_default);
if (clip.has_scale)
bitset_set(default_tracks_bitset, bitset_desc, default_track_offset++, bone_stream.is_scale_default);
size_written += clip.has_scale ? 3 : 2;
}
ACL_ASSERT(default_track_offset <= bitset_desc.get_num_bits(), "Too many tracks found for bitset");
return ((size_written + 31) / 32) * sizeof(uint32_t); // Convert bits to bytes
}
inline uint32_t write_constant_track_bitset(const clip_context& clip, uint32_t* constant_tracks_bitset, bitset_description bitset_desc, const uint32_t* output_bone_mapping, uint32_t num_output_bones)
{
ACL_ASSERT(constant_tracks_bitset != nullptr, "'constant_tracks_bitset' cannot be null!");
// Only use the first segment, it contains the necessary information
const SegmentContext& segment = clip.segments[0];
uint32_t constant_track_offset = 0;
uint32_t size_written = 0;
bitset_reset(constant_tracks_bitset, bitset_desc, false);
for (uint32_t output_index = 0; output_index < num_output_bones; ++output_index)
{
const uint32_t bone_index = output_bone_mapping[output_index];
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
bitset_set(constant_tracks_bitset, bitset_desc, constant_track_offset++, bone_stream.is_rotation_constant);
bitset_set(constant_tracks_bitset, bitset_desc, constant_track_offset++, bone_stream.is_translation_constant);
if (clip.has_scale)
bitset_set(constant_tracks_bitset, bitset_desc, constant_track_offset++, bone_stream.is_scale_constant);
size_written += clip.has_scale ? 3 : 2;
}
ACL_ASSERT(constant_track_offset <= bitset_desc.get_num_bits(), "Too many tracks found for bitset");
return ((size_written + 31) / 32) * sizeof(uint32_t); // Convert bits to bytes
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,636 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/iallocator.h"
#include "acl/core/error.h"
#include "acl/core/track_formats.h"
#include "acl/core/variable_bit_rates.h"
#include "acl/compression/impl/animated_track_utils.h"
#include "acl/compression/impl/clip_context.h"
#include "acl/core/impl/compressed_headers.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
inline uint32_t get_constant_data_size(const clip_context& clip)
{
// Only use the first segment, it contains the necessary information
const SegmentContext& segment = clip.segments[0];
uint32_t constant_data_size = 0;
for (uint32_t bone_index = 0; bone_index < clip.num_bones; ++bone_index)
{
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
if (bone_stream.output_index == k_invalid_track_index)
continue; // Stripped
if (!bone_stream.is_rotation_default && bone_stream.is_rotation_constant)
constant_data_size += bone_stream.rotations.get_packed_sample_size();
if (!bone_stream.is_translation_default && bone_stream.is_translation_constant)
constant_data_size += bone_stream.translations.get_packed_sample_size();
if (clip.has_scale && !bone_stream.is_scale_default && bone_stream.is_scale_constant)
constant_data_size += bone_stream.scales.get_packed_sample_size();
}
return constant_data_size;
}
inline void get_num_constant_samples(const clip_context& clip, uint32_t& out_num_constant_rotation_samples, uint32_t& out_num_constant_translation_samples, uint32_t& out_num_constant_scale_samples)
{
uint32_t num_constant_rotation_samples = 0;
uint32_t num_constant_translation_samples = 0;
uint32_t num_constant_scale_samples = 0;
// Only use the first segment, it contains the necessary information
const SegmentContext& segment = clip.segments[0];
for (uint32_t bone_index = 0; bone_index < clip.num_bones; ++bone_index)
{
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
if (bone_stream.output_index == k_invalid_track_index)
continue; // Stripped
if (!bone_stream.is_rotation_default && bone_stream.is_rotation_constant)
num_constant_rotation_samples++;
if (!bone_stream.is_translation_default && bone_stream.is_translation_constant)
num_constant_translation_samples++;
if (clip.has_scale && !bone_stream.is_scale_default && bone_stream.is_scale_constant)
num_constant_scale_samples++;
}
out_num_constant_rotation_samples = num_constant_rotation_samples;
out_num_constant_translation_samples = num_constant_translation_samples;
out_num_constant_scale_samples = num_constant_scale_samples;
}
inline void get_animated_variable_bit_rate_data_size(const TrackStream& track_stream, uint32_t num_samples, uint32_t& out_num_animated_data_bits, uint32_t& out_num_animated_pose_bits)
{
const uint8_t bit_rate = track_stream.get_bit_rate();
const uint32_t num_bits_at_bit_rate = get_num_bits_at_bit_rate(bit_rate) * 3; // 3 components
out_num_animated_data_bits += num_bits_at_bit_rate * num_samples;
out_num_animated_pose_bits += num_bits_at_bit_rate;
}
inline void calculate_animated_data_size(const TrackStream& track_stream, uint32_t& num_animated_data_bits, uint32_t& num_animated_pose_bits)
{
const uint32_t num_samples = track_stream.get_num_samples();
if (track_stream.is_bit_rate_variable())
{
get_animated_variable_bit_rate_data_size(track_stream, num_samples, num_animated_data_bits, num_animated_pose_bits);
}
else
{
const uint32_t sample_size = track_stream.get_packed_sample_size();
num_animated_data_bits += sample_size * num_samples * 8;
num_animated_pose_bits += sample_size * 8;
}
}
inline void calculate_animated_data_size(clip_context& clip, const uint32_t* output_bone_mapping, uint32_t num_output_bones)
{
for (SegmentContext& segment : clip.segment_iterator())
{
uint32_t num_animated_rotation_data_bits = 0;
uint32_t num_animated_translation_data_bits = 0;
uint32_t num_animated_scale_data_bits = 0;
uint32_t num_animated_pose_bits = 0;
for (uint32_t output_index = 0; output_index < num_output_bones; ++output_index)
{
const uint32_t bone_index = output_bone_mapping[output_index];
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
if (!bone_stream.is_rotation_constant)
calculate_animated_data_size(bone_stream.rotations, num_animated_rotation_data_bits, num_animated_pose_bits);
if (!bone_stream.is_translation_constant)
calculate_animated_data_size(bone_stream.translations, num_animated_translation_data_bits, num_animated_pose_bits);
if (!bone_stream.is_scale_constant)
calculate_animated_data_size(bone_stream.scales, num_animated_scale_data_bits, num_animated_pose_bits);
}
const uint32_t num_animated_data_bits = num_animated_rotation_data_bits + num_animated_translation_data_bits + num_animated_scale_data_bits;
segment.animated_pose_rotation_bit_size = num_animated_rotation_data_bits;
segment.animated_pose_translation_bit_size = num_animated_translation_data_bits;
segment.animated_pose_scale_bit_size = num_animated_scale_data_bits;
segment.animated_data_size = align_to(num_animated_data_bits, 8) / 8;
segment.animated_pose_bit_size = num_animated_pose_bits;
}
}
inline uint32_t get_format_per_track_data_size(const clip_context& clip, rotation_format8 rotation_format, vector_format8 translation_format, vector_format8 scale_format, uint32_t* out_num_animated_variable_sub_tracks_padded = nullptr)
{
const bool is_rotation_variable = is_rotation_format_variable(rotation_format);
const bool is_translation_variable = is_vector_format_variable(translation_format);
const bool is_scale_variable = is_vector_format_variable(scale_format);
// Only use the first segment, it contains the necessary information
const SegmentContext& segment = clip.segments[0];
uint32_t format_per_track_data_size = 0;
uint32_t num_animated_variable_rotations = 0;
for (const BoneStreams& bone_stream : segment.const_bone_iterator())
{
if (bone_stream.is_stripped_from_output())
continue;
if (!bone_stream.is_rotation_constant && is_rotation_variable)
{
format_per_track_data_size++;
num_animated_variable_rotations++;
}
if (!bone_stream.is_translation_constant && is_translation_variable)
format_per_track_data_size++;
if (!bone_stream.is_scale_constant && is_scale_variable)
format_per_track_data_size++;
}
// Rotations are padded for alignment
const uint32_t num_partial_rotations = num_animated_variable_rotations % 4;
if (num_partial_rotations != 0)
format_per_track_data_size += 4 - num_partial_rotations;
if (out_num_animated_variable_sub_tracks_padded != nullptr)
*out_num_animated_variable_sub_tracks_padded = format_per_track_data_size; // 1 byte per sub-track
return format_per_track_data_size;
}
inline uint32_t write_constant_track_data(const clip_context& clip, rotation_format8 rotation_format, uint8_t* constant_data, uint32_t constant_data_size, const uint32_t* output_bone_mapping, uint32_t num_output_bones)
{
ACL_ASSERT(constant_data != nullptr, "'constant_data' cannot be null!");
(void)constant_data_size;
// Only use the first segment, it contains the necessary information
const SegmentContext& segment = clip.segments[0];
#if defined(ACL_HAS_ASSERT_CHECKS)
const uint8_t* constant_data_end = add_offset_to_ptr<uint8_t>(constant_data, constant_data_size);
#endif
const uint8_t* constant_data_start = constant_data;
#if defined(ACL_IMPL_USE_CONSTANT_GROUPS)
// Data is ordered in groups of 4 constant sub-tracks (e.g rot0, rot1, rot2, rot3)
// Order depends on animated track order. If we have 6 constant rotation tracks before the first constant
// translation track, we'll have 8 constant rotation sub-tracks followed by 4 constant translation sub-tracks.
// Once we reach the end, there is no extra padding. The last group might be less than 4 sub-tracks.
// This is because we always process 4 constant sub-tracks at a time and cache the results.
// Groups are written in the order of first use and as such are sorted by their lowest sub-track index.
// If our rotation format drops the W component, we swizzle the data to store XXXX, YYYY, ZZZZ
const bool swizzle_rotations = get_rotation_variant(rotation_format) == rotation_variant8::quat_drop_w;
float xxxx_group[4];
float yyyy_group[4];
float zzzz_group[4];
rtm::vector4f constant_group4[4];
rtm::float3f constant_group3[4];
auto group_entry_action = [&](animation_track_type8 group_type, uint32_t group_size, uint32_t bone_index)
{
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
if (group_type == animation_track_type8::rotation)
{
if (swizzle_rotations)
{
const rtm::vector4f sample = bone_stream.rotations.get_raw_sample<rtm::vector4f>(0);
xxxx_group[group_size] = rtm::vector_get_x(sample);
yyyy_group[group_size] = rtm::vector_get_y(sample);
zzzz_group[group_size] = rtm::vector_get_z(sample);
}
else
{
const rtm::vector4f sample = bone_stream.rotations.get_raw_sample<rtm::vector4f>(0);
constant_group4[group_size] = sample;
}
}
else if (group_type == animation_track_type8::translation)
{
const rtm::vector4f sample = bone_stream.translations.get_raw_sample<rtm::vector4f>(0);
rtm::vector_store3(sample, &constant_group3[group_size]);
}
else
{
const rtm::vector4f sample = bone_stream.scales.get_raw_sample<rtm::vector4f>(0);
rtm::vector_store3(sample, &constant_group3[group_size]);
}
};
auto group_flush_action = [&](animation_track_type8 group_type, uint32_t group_size)
{
if (group_type == animation_track_type8::rotation)
{
if (swizzle_rotations)
{
std::memcpy(constant_data, &xxxx_group[0], group_size * sizeof(float));
constant_data += group_size * sizeof(float);
std::memcpy(constant_data, &yyyy_group[0], group_size * sizeof(float));
constant_data += group_size * sizeof(float);
std::memcpy(constant_data, &zzzz_group[0], group_size * sizeof(float));
constant_data += group_size * sizeof(float);
}
else
{
// If we don't swizzle, we have a full quaternion
std::memcpy(constant_data, &constant_group4[0], group_size * sizeof(rtm::vector4f));
constant_data += group_size * sizeof(rtm::vector4f);
}
}
else
{
std::memcpy(constant_data, &constant_group3[0], group_size * sizeof(rtm::float3f));
constant_data += group_size * sizeof(rtm::float3f);
}
ACL_ASSERT(constant_data <= constant_data_end, "Invalid constant data offset. Wrote too much data.");
};
constant_group_writer(segment, output_bone_mapping, num_output_bones, group_entry_action, group_flush_action);
#else
// If our rotation format drops the W component, we swizzle the data to store XXXX, YYYY, ZZZZ
const bool swizzle_rotations = get_rotation_variant(rotation_format) == rotation_variant8::quat_drop_w;
float xxxx[4];
float yyyy[4];
float zzzz[4];
uint32_t num_swizzle_written = 0;
// Write rotations first
for (uint32_t output_index = 0; output_index < num_output_bones; ++output_index)
{
const uint32_t bone_index = output_bone_mapping[output_index];
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
if (!bone_stream.is_rotation_default && bone_stream.is_rotation_constant)
{
if (swizzle_rotations)
{
const rtm::vector4f rotation = bone_stream.rotations.get_raw_sample<rtm::vector4f>(0);
xxxx[num_swizzle_written] = rtm::vector_get_x(rotation);
yyyy[num_swizzle_written] = rtm::vector_get_y(rotation);
zzzz[num_swizzle_written] = rtm::vector_get_z(rotation);
num_swizzle_written++;
if (num_swizzle_written >= 4)
{
std::memcpy(constant_data, &xxxx[0], sizeof(xxxx));
constant_data += sizeof(xxxx);
std::memcpy(constant_data, &yyyy[0], sizeof(yyyy));
constant_data += sizeof(yyyy);
std::memcpy(constant_data, &zzzz[0], sizeof(zzzz));
constant_data += sizeof(zzzz);
num_swizzle_written = 0;
}
}
else
{
const uint8_t* rotation_ptr = bone_stream.rotations.get_raw_sample_ptr(0);
uint32_t sample_size = bone_stream.rotations.get_sample_size();
std::memcpy(constant_data, rotation_ptr, sample_size);
constant_data += sample_size;
}
ACL_ASSERT(constant_data <= constant_data_end, "Invalid constant data offset. Wrote too much data.");
}
}
if (swizzle_rotations && num_swizzle_written != 0)
{
std::memcpy(constant_data, &xxxx[0], num_swizzle_written * sizeof(float));
constant_data += num_swizzle_written * sizeof(float);
std::memcpy(constant_data, &yyyy[0], num_swizzle_written * sizeof(float));
constant_data += num_swizzle_written * sizeof(float);
std::memcpy(constant_data, &zzzz[0], num_swizzle_written * sizeof(float));
constant_data += num_swizzle_written * sizeof(float);
ACL_ASSERT(constant_data <= constant_data_end, "Invalid constant data offset. Wrote too much data.");
}
// Next, write translations
for (uint32_t output_index = 0; output_index < num_output_bones; ++output_index)
{
const uint32_t bone_index = output_bone_mapping[output_index];
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
if (!bone_stream.is_translation_default && bone_stream.is_translation_constant)
{
const uint8_t* translation_ptr = bone_stream.translations.get_raw_sample_ptr(0);
uint32_t sample_size = bone_stream.translations.get_sample_size();
std::memcpy(constant_data, translation_ptr, sample_size);
constant_data += sample_size;
ACL_ASSERT(constant_data <= constant_data_end, "Invalid constant data offset. Wrote too much data.");
}
}
// Finally, write scales
if (clip.has_scale)
{
for (uint32_t output_index = 0; output_index < num_output_bones; ++output_index)
{
const uint32_t bone_index = output_bone_mapping[output_index];
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
if (!bone_stream.is_scale_default && bone_stream.is_scale_constant)
{
const uint8_t* scale_ptr = bone_stream.scales.get_raw_sample_ptr(0);
uint32_t sample_size = bone_stream.scales.get_sample_size();
std::memcpy(constant_data, scale_ptr, sample_size);
constant_data += sample_size;
ACL_ASSERT(constant_data <= constant_data_end, "Invalid constant data offset. Wrote too much data.");
}
}
}
#endif
ACL_ASSERT(constant_data == constant_data_end, "Invalid constant data offset. Wrote too little data.");
return safe_static_cast<uint32_t>(constant_data - constant_data_start);
}
inline void write_animated_track_data(const TrackStream& track_stream, uint32_t sample_index, uint8_t* animated_track_data_begin, uint8_t*& out_animated_track_data, uint64_t& out_bit_offset)
{
const uint8_t* raw_sample_ptr = track_stream.get_raw_sample_ptr(sample_index);
if (track_stream.is_bit_rate_variable())
{
const uint8_t bit_rate = track_stream.get_bit_rate();
const uint64_t num_bits_at_bit_rate = get_num_bits_at_bit_rate(bit_rate) * 3; // 3 components
// Track is constant, our constant sample is stored in the range information
ACL_ASSERT(!is_constant_bit_rate(bit_rate), "Cannot write constant variable track data");
if (is_raw_bit_rate(bit_rate))
{
const uint32_t* raw_sample_u32 = safe_ptr_cast<const uint32_t>(raw_sample_ptr);
const uint32_t x = byte_swap(raw_sample_u32[0]);
memcpy_bits(animated_track_data_begin, out_bit_offset + 0, &x, 0, 32);
const uint32_t y = byte_swap(raw_sample_u32[1]);
memcpy_bits(animated_track_data_begin, out_bit_offset + 32, &y, 0, 32);
const uint32_t z = byte_swap(raw_sample_u32[2]);
memcpy_bits(animated_track_data_begin, out_bit_offset + 64, &z, 0, 32);
}
else
{
const uint64_t raw_sample_u64 = *safe_ptr_cast<const uint64_t>(raw_sample_ptr);
memcpy_bits(animated_track_data_begin, out_bit_offset, &raw_sample_u64, 0, num_bits_at_bit_rate);
}
out_bit_offset += num_bits_at_bit_rate;
out_animated_track_data = animated_track_data_begin + (out_bit_offset / 8);
}
else
{
const uint32_t* raw_sample_u32 = safe_ptr_cast<const uint32_t>(raw_sample_ptr);
const uint32_t x = byte_swap(raw_sample_u32[0]);
memcpy_bits(animated_track_data_begin, out_bit_offset + 0, &x, 0, 32);
const uint32_t y = byte_swap(raw_sample_u32[1]);
memcpy_bits(animated_track_data_begin, out_bit_offset + 32, &y, 0, 32);
const uint32_t z = byte_swap(raw_sample_u32[2]);
memcpy_bits(animated_track_data_begin, out_bit_offset + 64, &z, 0, 32);
const uint32_t sample_size = track_stream.get_packed_sample_size();
const bool has_w_component = sample_size == (sizeof(float) * 4);
if (has_w_component)
{
const uint32_t w = byte_swap(raw_sample_u32[3]);
memcpy_bits(animated_track_data_begin, out_bit_offset + 96, &w, 0, 32);
}
out_bit_offset += has_w_component ? 128 : 96;
out_animated_track_data = animated_track_data_begin + (out_bit_offset / 8);
}
}
inline uint32_t write_animated_track_data(const SegmentContext& segment, uint8_t* animated_track_data, uint32_t animated_data_size, const uint32_t* output_bone_mapping, uint32_t num_output_bones)
{
ACL_ASSERT(animated_track_data != nullptr, "'animated_track_data' cannot be null!");
(void)animated_data_size;
uint8_t* animated_track_data_begin = animated_track_data;
#if defined(ACL_HAS_ASSERT_CHECKS)
const uint8_t* animated_track_data_end = add_offset_to_ptr<uint8_t>(animated_track_data, animated_data_size);
#endif
const uint8_t* animated_track_data_start = animated_track_data;
uint64_t bit_offset = 0;
// Data is sorted first by time, second by bone.
// This ensures that all bones are contiguous in memory when we sample a particular time.
// Data is ordered in groups of 4 animated sub-tracks (e.g rot0, rot1, rot2, rot3)
// Order depends on animated track order. If we have 6 animated rotation tracks before the first animated
// translation track, we'll have 8 animated rotation sub-tracks followed by 4 animated translation sub-tracks.
// Once we reach the end, there is no extra padding. The last group might be less than 4 sub-tracks.
// This is because we always process 4 animated sub-tracks at a time and cache the results.
// Groups are written in the order of first use and as such are sorted by their lowest sub-track index.
// For animated samples, when we have a constant bit rate (bit rate 0), we do not store samples
// and as such the group that contains that sub-track won't contain 4 samples.
// The largest sample is a full precision vector4f, we can contain at most 4 samples
alignas(16) uint8_t group_animated_track_data[sizeof(rtm::vector4f) * 4];
uint64_t group_bit_offset = 0;
uint32_t num_group_samples = 0;
uint8_t* dummy_animated_track_data_ptr = nullptr;
auto group_filter_action = [&](animation_track_type8 group_type, uint32_t bone_index)
{
(void)group_type;
(void)bone_index;
// We want a group of every animated track
// If a track is variable with a constant bit rate (bit rate 0), the group will have fewer entries
return true;
};
auto group_flush_action = [&](animation_track_type8 group_type, uint32_t group_size)
{
(void)group_type;
if (group_size == 0)
return; // Empty group, skip
memcpy_bits(animated_track_data_begin, bit_offset, &group_animated_track_data[0], 0, group_bit_offset);
bit_offset += group_bit_offset;
group_bit_offset = 0;
num_group_samples = 0;
animated_track_data = animated_track_data_begin + (bit_offset / 8);
ACL_ASSERT(animated_track_data <= animated_track_data_end, "Invalid animated track data offset. Wrote too much data.");
};
// TODO: Use a group writer context object to avoid alloc/free/work in loop for every sample when it doesn't change
for (uint32_t sample_index = 0; sample_index < segment.num_samples; ++sample_index)
{
auto group_entry_action = [&](animation_track_type8 group_type, uint32_t group_size, uint32_t bone_index)
{
(void)group_size;
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
if (group_type == animation_track_type8::rotation)
{
if (!is_constant_bit_rate(bone_stream.rotations.get_bit_rate()))
{
write_animated_track_data(bone_stream.rotations, sample_index, group_animated_track_data, dummy_animated_track_data_ptr, group_bit_offset);
num_group_samples++;
}
}
else if (group_type == animation_track_type8::translation)
{
if (!is_constant_bit_rate(bone_stream.translations.get_bit_rate()))
{
write_animated_track_data(bone_stream.translations, sample_index, group_animated_track_data, dummy_animated_track_data_ptr, group_bit_offset);
num_group_samples++;
}
}
else
{
if (!is_constant_bit_rate(bone_stream.scales.get_bit_rate()))
{
write_animated_track_data(bone_stream.scales, sample_index, group_animated_track_data, dummy_animated_track_data_ptr, group_bit_offset);
num_group_samples++;
}
}
};
animated_group_writer(segment, output_bone_mapping, num_output_bones, group_filter_action, group_entry_action, group_flush_action);
}
if (bit_offset != 0)
animated_track_data = animated_track_data_begin + (align_to(bit_offset, 8) / 8);
ACL_ASSERT((bit_offset / segment.num_samples) == segment.animated_pose_bit_size, "Unexpected number of bits written");
ACL_ASSERT(animated_track_data == animated_track_data_end, "Invalid animated track data offset. Wrote too little data.");
return safe_static_cast<uint32_t>(animated_track_data - animated_track_data_start);
}
inline uint32_t write_format_per_track_data(const SegmentContext& segment, uint8_t* format_per_track_data, uint32_t format_per_track_data_size, const uint32_t* output_bone_mapping, uint32_t num_output_bones)
{
ACL_ASSERT(format_per_track_data != nullptr, "'format_per_track_data' cannot be null!");
(void)format_per_track_data_size;
#if defined(ACL_HAS_ASSERT_CHECKS)
const uint8_t* format_per_track_data_end = add_offset_to_ptr<uint8_t>(format_per_track_data, format_per_track_data_size);
#endif
const uint8_t* format_per_track_data_start = format_per_track_data;
// Data is ordered in groups of 4 animated sub-tracks (e.g rot0, rot1, rot2, rot3)
// Order depends on animated track order. If we have 6 animated rotation tracks before the first animated
// translation track, we'll have 8 animated rotation sub-tracks followed by 4 animated translation sub-tracks.
// Once we reach the end, there is no extra padding. The last group might be less than 4 sub-tracks.
// This is because we always process 4 animated sub-tracks at a time and cache the results.
// Groups are written in the order of first use and as such are sorted by their lowest sub-track index.
// To keep decompression simpler, rotations are padded to 4 elements even if the last group is partial
uint8_t format_per_track_group[4];
auto group_filter_action = [&](animation_track_type8 group_type, uint32_t bone_index)
{
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
if (group_type == animation_track_type8::rotation)
return bone_stream.rotations.is_bit_rate_variable();
else if (group_type == animation_track_type8::translation)
return bone_stream.translations.is_bit_rate_variable();
else
return bone_stream.scales.is_bit_rate_variable();
};
auto group_entry_action = [&](animation_track_type8 group_type, uint32_t group_size, uint32_t bone_index)
{
const BoneStreams& bone_stream = segment.bone_streams[bone_index];
if (group_type == animation_track_type8::rotation)
format_per_track_group[group_size] = (uint8_t)get_num_bits_at_bit_rate(bone_stream.rotations.get_bit_rate());
else if (group_type == animation_track_type8::translation)
format_per_track_group[group_size] = (uint8_t)get_num_bits_at_bit_rate(bone_stream.translations.get_bit_rate());
else
format_per_track_group[group_size] = (uint8_t)get_num_bits_at_bit_rate(bone_stream.scales.get_bit_rate());
};
auto group_flush_action = [&](animation_track_type8 group_type, uint32_t group_size)
{
const uint32_t copy_size = group_type == animation_track_type8::rotation ? 4 : group_size;
std::memcpy(format_per_track_data, &format_per_track_group[0], copy_size);
format_per_track_data += copy_size;
// Zero out the temporary buffer for the final group to not contain partial garbage
std::memset(&format_per_track_group[0], 0, sizeof(format_per_track_group));
ACL_ASSERT(format_per_track_data <= format_per_track_data_end, "Invalid format per track data offset. Wrote too much data.");
};
animated_group_writer(segment, output_bone_mapping, num_output_bones, group_filter_action, group_entry_action, group_flush_action);
ACL_ASSERT(format_per_track_data == format_per_track_data_end, "Invalid format per track data offset. Wrote too little data.");
return safe_static_cast<uint32_t>(format_per_track_data - format_per_track_data_start);
}
inline uint32_t write_animated_group_types(const animation_track_type8* animated_sub_track_groups, uint32_t num_animated_groups, animation_track_type8* animated_sub_track_groups_data, uint32_t animated_sub_track_groups_data_size)
{
(void)animated_sub_track_groups_data_size;
const animation_track_type8* animated_sub_track_groups_data_start = animated_sub_track_groups_data;
std::memcpy(animated_sub_track_groups_data, animated_sub_track_groups, sizeof(animation_track_type8) * num_animated_groups);
animated_sub_track_groups_data += num_animated_groups;
animated_sub_track_groups_data[0] = static_cast<animation_track_type8>(0xFF); // Terminator
animated_sub_track_groups_data++;
ACL_ASSERT(animated_sub_track_groups_data == animated_sub_track_groups_data_start + animated_sub_track_groups_data_size, "Too little or too much data written");
return static_cast<uint32_t>(animated_sub_track_groups_data - animated_sub_track_groups_data_start);
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,189 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/variable_bit_rates.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/impl/compressed_headers.h"
#include "acl/compression/impl/track_list_context.h"
#include <rtm/vector4f.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
inline uint32_t write_track_metadata(const track_list_context& context, track_metadata* per_track_metadata)
{
ACL_ASSERT(context.is_valid(), "Invalid context");
uint8_t* output_buffer = reinterpret_cast<uint8_t*>(per_track_metadata);
const uint8_t* output_buffer_start = output_buffer;
for (uint32_t output_index = 0; output_index < context.num_output_tracks; ++output_index)
{
const uint32_t track_index = context.track_output_indices[output_index];
if (per_track_metadata != nullptr)
per_track_metadata[output_index].bit_rate = context.is_constant(track_index) ? 0 : context.bit_rate_list[track_index].scalar.value;
output_buffer += sizeof(track_metadata);
}
return safe_static_cast<uint32_t>(output_buffer - output_buffer_start);
}
inline uint32_t write_track_constant_values(const track_list_context& context, float* constant_values)
{
ACL_ASSERT(context.is_valid(), "Invalid context");
uint8_t* output_buffer = reinterpret_cast<uint8_t*>(constant_values);
const uint8_t* output_buffer_start = output_buffer;
for (uint32_t output_index = 0; output_index < context.num_output_tracks; ++output_index)
{
const uint32_t track_index = context.track_output_indices[output_index];
if (!context.is_constant(track_index))
continue;
const track& ref_track = (*context.reference_list)[track_index];
const uint32_t element_size = ref_track.get_sample_size();
if (constant_values != nullptr)
std::memcpy(output_buffer, ref_track[0], element_size);
output_buffer += element_size;
}
return safe_static_cast<uint32_t>(output_buffer - output_buffer_start);
}
inline uint32_t write_track_range_values(const track_list_context& context, float* range_values)
{
ACL_ASSERT(context.is_valid(), "Invalid context");
uint8_t* output_buffer = reinterpret_cast<uint8_t*>(range_values);
const uint8_t* output_buffer_start = output_buffer;
for (uint32_t output_index = 0; output_index < context.num_output_tracks; ++output_index)
{
const uint32_t track_index = context.track_output_indices[output_index];
if (context.is_constant(track_index))
continue;
const uint8_t bit_rate = context.bit_rate_list[track_index].scalar.value;
if (is_raw_bit_rate(bit_rate))
continue;
const track& ref_track = (*context.reference_list)[track_index];
const track_range& range = context.range_list[track_index];
const uint32_t element_size = ref_track.get_sample_size();
if (range_values != nullptr)
{
// Only support scalarf for now
ACL_ASSERT(range.category == track_category8::scalarf, "Unsupported category");
const rtm::vector4f range_min = range.range.scalarf.get_min();
const rtm::vector4f range_extent = range.range.scalarf.get_extent();
std::memcpy(output_buffer, &range_min, element_size);
std::memcpy(output_buffer + element_size, &range_extent, element_size);
}
output_buffer += element_size; // Min
output_buffer += element_size; // Extent
}
return safe_static_cast<uint32_t>(output_buffer - output_buffer_start);
}
inline uint32_t write_track_animated_values(const track_list_context& context, uint8_t* animated_values)
{
ACL_ASSERT(context.is_valid(), "Invalid context");
uint8_t* output_buffer = animated_values;
uint64_t output_bit_offset = 0;
const uint32_t num_components = get_track_num_sample_elements(context.reference_list->get_track_type());
ACL_ASSERT(num_components <= 4, "Unexpected number of elements");
for (uint32_t sample_index = 0; sample_index < context.num_samples; ++sample_index)
{
for (uint32_t output_index = 0; output_index < context.num_output_tracks; ++output_index)
{
const uint32_t track_index = context.track_output_indices[output_index];
if (context.is_constant(track_index))
continue;
const track& ref_track = (*context.reference_list)[track_index];
const track& mut_track = context.track_list[track_index];
// Only support scalarf for now
ACL_ASSERT(ref_track.get_category() == track_category8::scalarf, "Unsupported category");
const scalar_bit_rate bit_rate = context.bit_rate_list[track_index].scalar;
const uint64_t num_bits_per_component = get_num_bits_at_bit_rate(bit_rate.value);
const track& src_track = is_raw_bit_rate(bit_rate.value) ? ref_track : mut_track;
const uint32_t* sample_u32 = safe_ptr_cast<const uint32_t>(src_track[sample_index]);
const float* sample_f32 = safe_ptr_cast<const float>(src_track[sample_index]);
for (uint32_t component_index = 0; component_index < num_components; ++component_index)
{
if (animated_values != nullptr)
{
uint32_t value;
if (is_raw_bit_rate(bit_rate.value))
value = byte_swap(sample_u32[component_index]);
else
{
// TODO: Hacked, our values are still as floats, cast to int, shift, and byte swap
// Ideally should be done in the cache/mutable track with SIMD
value = safe_static_cast<uint32_t>(sample_f32[component_index]);
value = value << (32 - num_bits_per_component);
value = byte_swap(value);
}
memcpy_bits(output_buffer, output_bit_offset, &value, 0, num_bits_per_component);
}
output_bit_offset += num_bits_per_component;
}
}
}
return safe_static_cast<uint32_t>(output_bit_offset);
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,192 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2020 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/memory_utils.h"
#include "acl/core/track_desc.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/compression/track_array.h"
#include <cstdint>
#include <memory>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
inline uint32_t write_track_list_name(const track_array& tracks, char* out_track_list_name)
{
ACL_ASSERT(out_track_list_name == nullptr || out_track_list_name[0] == 0, "Buffer overrun detected");
uint8_t* output_buffer = reinterpret_cast<uint8_t*>(out_track_list_name);
const uint8_t* output_buffer_start = output_buffer;
const string& name = tracks.get_name();
const uint32_t name_size = uint32_t(name.size() + 1); // Include null terminator too
if (out_track_list_name != nullptr)
std::memcpy(out_track_list_name, name.c_str(), name_size);
output_buffer += name_size;
return safe_static_cast<uint32_t>(output_buffer - output_buffer_start);
}
inline uint32_t write_track_names(const track_array& tracks, const uint32_t* track_output_indices, uint32_t num_output_tracks, uint32_t* out_track_names)
{
ACL_ASSERT(out_track_names == nullptr || out_track_names[0] == 0, "Buffer overrun detected");
uint8_t* output_buffer = reinterpret_cast<uint8_t*>(out_track_names);
const uint8_t* output_buffer_start = output_buffer;
// Write offsets first
uint32_t* track_name_offsets = out_track_names;
uint32_t offset = sizeof(uint32_t) * num_output_tracks;
for (uint32_t output_index = 0; output_index < num_output_tracks; ++output_index)
{
const uint32_t track_index = track_output_indices[output_index];
const string& name = tracks[track_index].get_name();
const uint32_t name_size = uint32_t(name.size() + 1); // Include null terminator too
if (out_track_names != nullptr)
*track_name_offsets = offset;
track_name_offsets++;
output_buffer += sizeof(uint32_t);
offset += name_size;
}
// Next write our track names
char* track_names = safe_ptr_cast<char>(output_buffer);
for (uint32_t output_index = 0; output_index < num_output_tracks; ++output_index)
{
const uint32_t track_index = track_output_indices[output_index];
const string& name = tracks[track_index].get_name();
const uint32_t name_size = uint32_t(name.size() + 1); // Include null terminator too
if (out_track_names != nullptr)
std::memcpy(track_names, name.c_str(), name_size);
track_names += name_size;
output_buffer += name_size;
}
return safe_static_cast<uint32_t>(output_buffer - output_buffer_start);
}
inline uint32_t write_parent_track_indices(const track_array_qvvf& tracks, const uint32_t* track_output_indices, uint32_t num_output_tracks, uint32_t* out_parent_track_indices)
{
ACL_ASSERT(out_parent_track_indices == nullptr || out_parent_track_indices[0] == 0, "Buffer overrun detected");
auto find_output_index = [track_output_indices, num_output_tracks](uint32_t track_index)
{
if (track_index == k_invalid_track_index)
return k_invalid_track_index;
for (uint32_t output_index = 0; output_index < num_output_tracks; ++output_index)
{
if (track_output_indices[output_index] == track_index)
return output_index;
}
return k_invalid_track_index;
};
uint8_t* output_buffer = reinterpret_cast<uint8_t*>(out_parent_track_indices);
const uint8_t* output_buffer_start = output_buffer;
for (uint32_t output_index = 0; output_index < num_output_tracks; ++output_index)
{
const uint32_t track_index = track_output_indices[output_index];
const track_qvvf& track = tracks[track_index];
const track_desc_transformf& desc = track.get_description();
const uint32_t parent_track_index = desc.parent_index;
const uint32_t parent_output_index = find_output_index(parent_track_index);
if (out_parent_track_indices != nullptr)
out_parent_track_indices[output_index] = parent_output_index;
output_buffer += sizeof(uint32_t);
}
return safe_static_cast<uint32_t>(output_buffer - output_buffer_start);
}
inline uint32_t write_track_descriptions(const track_array& tracks, const uint32_t* track_output_indices, uint32_t num_output_tracks, uint8_t* out_track_descriptions)
{
ACL_ASSERT(out_track_descriptions == nullptr || out_track_descriptions[0] == 0, "Buffer overrun detected");
uint8_t* output_buffer = out_track_descriptions;
const uint8_t* output_buffer_start = output_buffer;
const bool is_scalar = tracks.get_track_type() != track_type8::qvvf;
for (uint32_t output_index = 0; output_index < num_output_tracks; ++output_index)
{
const uint32_t track_index = track_output_indices[output_index];
if (is_scalar)
{
const track_desc_scalarf& desc = tracks[track_index].get_description<track_desc_scalarf>();
if (out_track_descriptions != nullptr)
{
// We don't write out the output index since the track has already been properly sorted or stripped
float* data = reinterpret_cast<float*>(output_buffer);
data[0] = desc.precision;
}
output_buffer += sizeof(float) * 1;
}
else
{
const track_desc_transformf& desc = tracks[track_index].get_description<track_desc_transformf>();
if (out_track_descriptions != nullptr)
{
// We don't write out the output index since the track has already been properly sorted or stripped
// We don't write out the parent index since it has already been included separately
float* data = reinterpret_cast<float*>(output_buffer);
data[0] = desc.precision;
data[1] = desc.shell_distance;
data[2] = desc.constant_rotation_threshold_angle;
data[3] = desc.constant_translation_threshold;
data[4] = desc.constant_scale_threshold;
}
output_buffer += sizeof(float) * 5;
}
}
return safe_static_cast<uint32_t>(output_buffer - output_buffer_start);
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,55 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/enum_utils.h"
namespace sjson { class ObjectWriter; }
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
enum class stat_logging
{
none = 0x0000,
summary = 0x0001,
detailed = 0x0002 | summary,
exhaustive = 0x0004 | detailed,
summary_decompression = 0x0010,
exhaustive_decompression = 0x0020,
};
ACL_IMPL_ENUM_FLAGS_OPERATORS(stat_logging)
struct output_stats
{
stat_logging logging = stat_logging::none;
sjson::ObjectWriter* writer = nullptr;
};
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,359 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/iallocator.h"
#include "acl/core/string.h"
#include "acl/core/track_desc.h"
#include "acl/core/track_traits.h"
#include "acl/core/track_types.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
#if defined(ACL_COMPILER_MSVC)
#pragma warning(push)
// warning C4582: 'union': constructor is not implicitly called (/Wall)
// This is fine because a track is empty until it is constructed with a valid description.
// Afterwards, access is typesafe.
#pragma warning(disable : 4582)
#endif
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// An untyped track of data. A track is a time series of values sampled
// uniformly over time at a specific sample rate. Tracks can either own
// their memory or reference an external buffer.
// For convenience, this type can be cast with the `track_cast(..)` family
// of functions. Each track type has the same size as every track description
// is contained within a union.
//////////////////////////////////////////////////////////////////////////
class track
{
public:
//////////////////////////////////////////////////////////////////////////
// Creates an empty, untyped track.
track() noexcept;
//////////////////////////////////////////////////////////////////////////
// Move constructor for a track.
track(track&& other) noexcept;
//////////////////////////////////////////////////////////////////////////
// Destroys the track. If it owns the memory referenced, it will be freed.
~track();
//////////////////////////////////////////////////////////////////////////
// Move assignment for a track.
track& operator=(track&& other) noexcept;
//////////////////////////////////////////////////////////////////////////
// Returns a pointer to an untyped sample at the specified index.
void* operator[](uint32_t index);
//////////////////////////////////////////////////////////////////////////
// Returns a pointer to an untyped sample at the specified index.
const void* operator[](uint32_t index) const;
//////////////////////////////////////////////////////////////////////////
// Returns true if the track owns its memory, false otherwise.
bool is_owner() const { return m_allocator != nullptr; }
//////////////////////////////////////////////////////////////////////////
// Returns true if the track owns its memory, false otherwise.
bool is_ref() const { return m_allocator == nullptr; }
//////////////////////////////////////////////////////////////////////////
// Returns true if the track doesn't contain any data, false otherwise.
bool is_empty() const { return m_num_samples == 0; }
//////////////////////////////////////////////////////////////////////////
// Returns a pointer to the allocator instance or nullptr if there is none present.
iallocator* get_allocator() const { return m_allocator; }
//////////////////////////////////////////////////////////////////////////
// Returns the number of samples contained within the track.
uint32_t get_num_samples() const { return m_num_samples; }
//////////////////////////////////////////////////////////////////////////
// Returns the stride in bytes in between samples as laid out in memory.
// This is always sizeof(sample_type) unless the memory isn't owned internally.
uint32_t get_stride() const { return m_stride; }
//////////////////////////////////////////////////////////////////////////
// Returns the track type.
track_type8 get_type() const { return m_type; }
//////////////////////////////////////////////////////////////////////////
// Returns the track category.
track_category8 get_category() const { return m_category; }
//////////////////////////////////////////////////////////////////////////
// Returns the size in bytes of each track sample.
uint32_t get_sample_size() const { return m_sample_size; }
//////////////////////////////////////////////////////////////////////////
// Returns the track sample rate.
// A track has its sampled uniformly distributed in time at a fixed rate (e.g. 30 samples per second).
float get_sample_rate() const { return m_sample_rate; }
//////////////////////////////////////////////////////////////////////////
// Returns the track name.
const string& get_name() const { return m_name; }
//////////////////////////////////////////////////////////////////////////
// Sets the track name.
void set_name(const string& name) { m_name = name.get_copy(); }
//////////////////////////////////////////////////////////////////////////
// Returns the track output index.
// When compressing, it is often desirable to strip or re-order the tracks we output.
// This can be used to sort by LOD or to strip stale tracks. Tracks with an invalid
// track index are stripped in the output.
uint32_t get_output_index() const;
//////////////////////////////////////////////////////////////////////////
// Returns the track description.
template<typename desc_type>
desc_type& get_description();
//////////////////////////////////////////////////////////////////////////
// Returns the track description.
template<typename desc_type>
const desc_type& get_description() const;
//////////////////////////////////////////////////////////////////////////
// Returns a copy of the track where the memory will be owned by the copy.
track get_copy(iallocator& allocator) const;
//////////////////////////////////////////////////////////////////////////
// Returns a reference to the track where the memory isn't owned.
track get_ref() const;
//////////////////////////////////////////////////////////////////////////
// Returns whether a track is valid or not.
// A track is valid if:
// - It is empty
// - It has a positive and finite sample rate
// - A valid description
error_result is_valid() const;
protected:
//////////////////////////////////////////////////////////////////////////
// We prohibit copying, use get_copy() and get_ref() instead.
track(const track&) = delete;
track& operator=(const track&) = delete;
//////////////////////////////////////////////////////////////////////////
// Internal constructor.
// Creates an empty, untyped track.
track(track_type8 type, track_category8 category) noexcept;
//////////////////////////////////////////////////////////////////////////
// Internal constructor.
track(iallocator* allocator, uint8_t* data, uint32_t num_samples, uint32_t stride, size_t data_size, float sample_rate, track_type8 type, track_category8 category, uint8_t sample_size) noexcept;
//////////////////////////////////////////////////////////////////////////
// Internal helper.
void get_copy_impl(iallocator& allocator, track& out_track) const;
//////////////////////////////////////////////////////////////////////////
// Internal helper.
void get_ref_impl(track& out_track) const;
iallocator* m_allocator; // Optional allocator that owns the memory
uint8_t* m_data; // Pointer to the samples
uint32_t m_num_samples; // The number of samples
uint32_t m_stride; // The stride in bytes in between samples as layed out in memory
size_t m_data_size; // The total size of the buffer used by the samples
float m_sample_rate; // The track sample rate
track_type8 m_type; // The track type
track_category8 m_category; // The track category
uint16_t m_sample_size; // The size in bytes of each sample
//////////////////////////////////////////////////////////////////////////
// A union of every track description.
// This ensures every track has the same size regardless of its type.
union track_desc_untyped
{
track_desc_scalarf scalar;
track_desc_transformf transform;
track_desc_untyped() {}
explicit track_desc_untyped(const track_desc_scalarf& desc) : scalar(desc) {}
explicit track_desc_untyped(const track_desc_transformf& desc) : transform(desc) {}
};
track_desc_untyped m_desc; // The track description
string m_name; // An optional name
};
//////////////////////////////////////////////////////////////////////////
// A typed track of data. See `track` for details.
//////////////////////////////////////////////////////////////////////////
template<track_type8 track_type_>
class track_typed final : public track
{
public:
//////////////////////////////////////////////////////////////////////////
// The track type.
static constexpr track_type8 type = track_type_;
//////////////////////////////////////////////////////////////////////////
// The track category.
static constexpr track_category8 category = track_traits<track_type_>::category;
//////////////////////////////////////////////////////////////////////////
// The type of each sample in this track.
using sample_type = typename track_traits<track_type_>::sample_type;
//////////////////////////////////////////////////////////////////////////
// The type of the track description.
using desc_type = typename track_traits<track_type_>::desc_type;
//////////////////////////////////////////////////////////////////////////
// Constructs an empty typed track.
track_typed() noexcept : track(type, category) { static_assert(sizeof(track_typed) == sizeof(track), "You cannot add member variables to this class"); }
//////////////////////////////////////////////////////////////////////////
// Destroys the track and potentially frees any memory it might own.
~track_typed() = default;
//////////////////////////////////////////////////////////////////////////
// Move assignment for a track.
track_typed(track_typed&& other) noexcept : track(static_cast<track&&>(other)) {}
//////////////////////////////////////////////////////////////////////////
// Move assignment for a track.
track_typed& operator=(track_typed&& other) noexcept { return static_cast<track_typed&>(track::operator=(static_cast<track&&>(other))); }
//////////////////////////////////////////////////////////////////////////
// Returns the sample at the specified index.
// If this track does not own the memory, mutable references aren't allowed and an
// invalid reference will be returned, leading to a crash.
sample_type& operator[](uint32_t index);
//////////////////////////////////////////////////////////////////////////
// Returns the sample at the specified index.
const sample_type& operator[](uint32_t index) const;
//////////////////////////////////////////////////////////////////////////
// Returns the track description.
desc_type& get_description();
//////////////////////////////////////////////////////////////////////////
// Returns the track description.
const desc_type& get_description() const;
//////////////////////////////////////////////////////////////////////////
// Returns the track type.
track_type8 get_type() const { return type; }
//////////////////////////////////////////////////////////////////////////
// Returns the track category.
track_category8 get_category() const { return category; }
//////////////////////////////////////////////////////////////////////////
// Returns a copy of the track where the memory will be owned by the copy.
track_typed get_copy(iallocator& allocator) const;
//////////////////////////////////////////////////////////////////////////
// Returns a reference to the track where the memory isn't owned.
track_typed get_ref() const;
//////////////////////////////////////////////////////////////////////////
// Creates a track that copies the data and owns the memory.
static track_typed<track_type_> make_copy(const desc_type& desc, iallocator& allocator, const sample_type* data, uint32_t num_samples, float sample_rate, uint32_t stride = sizeof(sample_type));
//////////////////////////////////////////////////////////////////////////
// Creates a track and preallocates but does not initialize the memory that it owns.
static track_typed<track_type_> make_reserve(const desc_type& desc, iallocator& allocator, uint32_t num_samples, float sample_rate);
//////////////////////////////////////////////////////////////////////////
// Creates a track and takes ownership of the already allocated memory.
static track_typed<track_type_> make_owner(const desc_type& desc, iallocator& allocator, sample_type* data, uint32_t num_samples, float sample_rate, uint32_t stride = sizeof(sample_type));
//////////////////////////////////////////////////////////////////////////
// Creates a track that just references the data without owning it.
static track_typed<track_type_> make_ref(const desc_type& desc, sample_type* data, uint32_t num_samples, float sample_rate, uint32_t stride = sizeof(sample_type));
private:
//////////////////////////////////////////////////////////////////////////
// We prohibit copying, use get_copy() and get_ref() instead.
track_typed(const track_typed&) = delete;
track_typed& operator=(const track_typed&) = delete;
//////////////////////////////////////////////////////////////////////////
// Internal constructor.
track_typed(iallocator* allocator, uint8_t* data, uint32_t num_samples, uint32_t stride, size_t data_size, float sample_rate, const desc_type& desc) noexcept;
};
//////////////////////////////////////////////////////////////////////////
// Casts an untyped track into the desired track type while asserting for safety.
template<typename track_type>
track_type& track_cast(track& track_);
//////////////////////////////////////////////////////////////////////////
// Casts an untyped track into the desired track type while asserting for safety.
template<typename track_type>
const track_type& track_cast(const track& track_);
//////////////////////////////////////////////////////////////////////////
// Casts an untyped track into the desired track type. Returns nullptr if the types
// are not compatible or if the input is nullptr.
template<typename track_type>
track_type* track_cast(track* track_);
//////////////////////////////////////////////////////////////////////////
// Casts an untyped track into the desired track type. Returns nullptr if the types
// are not compatible or if the input is nullptr.
template<typename track_type>
const track_type* track_cast(const track* track_);
//////////////////////////////////////////////////////////////////////////
// Create aliases for the various typed track types.
using track_float1f = track_typed<track_type8::float1f>;
using track_float2f = track_typed<track_type8::float2f>;
using track_float3f = track_typed<track_type8::float3f>;
using track_float4f = track_typed<track_type8::float4f>;
using track_vector4f = track_typed<track_type8::vector4f>;
using track_qvvf = track_typed<track_type8::qvvf>;
}
#include "acl/compression/impl/track.impl.h"
#if defined(ACL_COMPILER_MSVC)
#pragma warning(pop)
#endif
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,272 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/error_result.h"
#include "acl/core/iallocator.h"
#include "acl/core/interpolation_utils.h"
#include "acl/core/track_types.h"
#include "acl/core/track_writer.h"
#include "acl/core/utils.h"
#include "acl/compression/track.h"
#include <rtm/scalarf.h>
#include <rtm/vector4f.h>
#include <cstdint>
#include <limits>
#include <type_traits>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// An array of tracks.
// Although each track contained within is untyped, each track must have
// the same type. They must all have the same sample rate and the same
// number of samples.
//////////////////////////////////////////////////////////////////////////
class track_array
{
public:
//////////////////////////////////////////////////////////////////////////
// Constructs an empty track array.
track_array() noexcept;
//////////////////////////////////////////////////////////////////////////
// Constructs an array with the specified number of tracks.
// Tracks will be empty and untyped by default.
track_array(iallocator& allocator, uint32_t num_tracks);
//////////////////////////////////////////////////////////////////////////
// Move constructor for a track array.
track_array(track_array&& other) noexcept;
//////////////////////////////////////////////////////////////////////////
// Destroys a track array.
~track_array();
//////////////////////////////////////////////////////////////////////////
// Move assignment for a track array.
track_array& operator=(track_array&& other) noexcept;
//////////////////////////////////////////////////////////////////////////
// Returns a pointer to the allocator instance or nullptr if there is none present.
iallocator* get_allocator() const { return m_allocator; }
//////////////////////////////////////////////////////////////////////////
// Returns the number of tracks contained in this array.
uint32_t get_num_tracks() const { return m_num_tracks; }
//////////////////////////////////////////////////////////////////////////
// Returns the number of samples per track in this array.
uint32_t get_num_samples_per_track() const { return m_allocator != nullptr && m_num_tracks != 0 ? m_tracks->get_num_samples() : 0; }
//////////////////////////////////////////////////////////////////////////
// Returns the track type for tracks in this array.
track_type8 get_track_type() const { return m_allocator != nullptr && m_num_tracks != 0 ? m_tracks->get_type() : track_type8::float1f; }
//////////////////////////////////////////////////////////////////////////
// Returns the track category for tracks in this array.
track_category8 get_track_category() const { return m_allocator != nullptr && m_num_tracks != 0 ? m_tracks->get_category() : track_category8::scalarf; }
//////////////////////////////////////////////////////////////////////////
// Returns the sample rate for tracks in this array.
float get_sample_rate() const { return m_allocator != nullptr && m_num_tracks != 0 ? m_tracks->get_sample_rate() : 0.0F; }
//////////////////////////////////////////////////////////////////////////
// Returns the duration for tracks in this array.
float get_duration() const { return m_allocator != nullptr && m_num_tracks != 0 ? calculate_duration(uint32_t(m_tracks->get_num_samples()), m_tracks->get_sample_rate()) : 0.0F; }
//////////////////////////////////////////////////////////////////////////
// Returns the track name.
const string& get_name() const { return m_name; }
//////////////////////////////////////////////////////////////////////////
// Sets the track name.
void set_name(const string& name) { m_name = name.get_copy(); }
//////////////////////////////////////////////////////////////////////////
// Returns the track at the specified index.
track& operator[](uint32_t index);
//////////////////////////////////////////////////////////////////////////
// Returns the track at the specified index.
const track& operator[](uint32_t index) const;
//////////////////////////////////////////////////////////////////////////
// Iterator begin() and end() implementations.
track* begin() { return m_tracks; }
const track* begin() const { return m_tracks; }
const track* end() { return m_tracks + m_num_tracks; }
const track* end() const { return m_tracks + m_num_tracks; }
//////////////////////////////////////////////////////////////////////////
// Returns true if the track array doesn't contain any data, false otherwise.
bool is_empty() const { return m_num_tracks == 0; }
//////////////////////////////////////////////////////////////////////////
// Returns whether a track array is valid or not.
// An array is valid if:
// - It is empty
// - All tracks have the same type
// - All tracks have the same number of samples
// - All tracks have the same sample rate
// - All tracks are valid
error_result is_valid() const;
//////////////////////////////////////////////////////////////////////////
// Sample all tracks within this array at the specified sample time and
// desired rounding policy. Track samples are written out using the `track_writer` provided.
template<class track_writer_type>
void sample_tracks(float sample_time, sample_rounding_policy rounding_policy, track_writer_type& writer) const;
//////////////////////////////////////////////////////////////////////////
// Sample a single track within this array at the specified sample time and
// desired rounding policy. The track sample is written out using the `track_writer` provided.
template<class track_writer_type>
void sample_track(uint32_t track_index, float sample_time, sample_rounding_policy rounding_policy, track_writer_type& writer) const;
//////////////////////////////////////////////////////////////////////////
// Returns the raw size for this track array. Note that this differs from the actual
// memory used by an instance of this class. It is meant for comparison against
// the compressed size.
uint32_t get_raw_size() const;
protected:
//////////////////////////////////////////////////////////////////////////
// We prohibit copying
track_array(const track_array&) = delete;
track_array& operator=(const track_array&) = delete;
iallocator* m_allocator; // The allocator used to allocate our tracks
track* m_tracks; // The track list
uint32_t m_num_tracks; // The number of tracks
string m_name; // An optional name
};
//////////////////////////////////////////////////////////////////////////
// A typed track array. See `track_array` for details.
//////////////////////////////////////////////////////////////////////////
template<track_type8 track_type_>
class track_array_typed final : public track_array
{
public:
//////////////////////////////////////////////////////////////////////////
// The track type.
static constexpr track_type8 type = track_type_;
//////////////////////////////////////////////////////////////////////////
// The track category.
static constexpr track_category8 category = track_traits<track_type_>::category;
//////////////////////////////////////////////////////////////////////////
// The track member type.
using track_member_type = track_typed<track_type_>;
//////////////////////////////////////////////////////////////////////////
// Constructs an empty track array.
track_array_typed() noexcept : track_array() { static_assert(sizeof(track_array_typed) == sizeof(track_array), "You cannot add member variables to this class"); }
//////////////////////////////////////////////////////////////////////////
// Constructs an array with the specified number of tracks.
// Tracks will be empty and untyped by default.
track_array_typed(iallocator& allocator, uint32_t num_tracks) : track_array(allocator, num_tracks) {}
//////////////////////////////////////////////////////////////////////////
// Move constructor for a track array.
track_array_typed(track_array_typed&& other) noexcept : track_array(static_cast<track_array&&>(other)) {}
//////////////////////////////////////////////////////////////////////////
// Destroys a track array.
~track_array_typed() = default;
//////////////////////////////////////////////////////////////////////////
// Move assignment for a track array.
track_array_typed& operator=(track_array_typed&& other) noexcept { return static_cast<track_array_typed&>(track_array::operator=(static_cast<track_array&&>(other))); }
//////////////////////////////////////////////////////////////////////////
// Returns the track type for tracks in this array.
track_type8 get_track_type() const { return type; }
//////////////////////////////////////////////////////////////////////////
// Returns the track category for tracks in this array.
track_category8 get_track_category() const { return category; }
//////////////////////////////////////////////////////////////////////////
// Returns the track at the specified index.
track_member_type& operator[](uint32_t index);
//////////////////////////////////////////////////////////////////////////
// Returns the track at the specified index.
const track_member_type& operator[](uint32_t index) const;
//////////////////////////////////////////////////////////////////////////
// Iterator begin() and end() implementations.
track_member_type* begin() { return track_cast<track_member_type>(m_tracks); }
const track_member_type* begin() const { return track_cast<track_member_type>(m_tracks); }
const track_member_type* end() { return track_cast<track_member_type>(m_tracks) + m_num_tracks; }
const track_member_type* end() const { return track_cast<track_member_type>(m_tracks) + m_num_tracks; }
};
//////////////////////////////////////////////////////////////////////////
// Casts an untyped track array into the desired track array type while asserting for safety.
template<typename track_array_type>
track_array_type& track_array_cast(track_array& track_array_);
//////////////////////////////////////////////////////////////////////////
// Casts an untyped track array into the desired track array type while asserting for safety.
template<typename track_array_type>
const track_array_type& track_array_cast(const track_array& track_array_);
//////////////////////////////////////////////////////////////////////////
// Casts an untyped track array into the desired track array type. Returns nullptr if the types
// are not compatible or if the input is nullptr.
template<typename track_array_type>
track_array_type* track_array_cast(track_array* track_array_);
//////////////////////////////////////////////////////////////////////////
// Casts an untyped track array into the desired track array type. Returns nullptr if the types
// are not compatible or if the input is nullptr.
template<typename track_array_type>
const track_array_type* track_array_cast(const track_array* track_array_);
//////////////////////////////////////////////////////////////////////////
// Create aliases for the various typed track array types.
using track_array_float1f = track_array_typed<track_type8::float1f>;
using track_array_float2f = track_array_typed<track_type8::float2f>;
using track_array_float3f = track_array_typed<track_type8::float3f>;
using track_array_float4f = track_array_typed<track_type8::float4f>;
using track_array_vector4f = track_array_typed<track_type8::vector4f>;
using track_array_qvvf = track_array_typed<track_type8::qvvf>;
}
#include "acl/compression/impl/track_array.impl.h"
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,108 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/compressed_tracks.h"
#include "acl/core/error_result.h"
#include "acl/core/iallocator.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/compression/track_array.h"
#include "acl/compression/transform_error_metrics.h"
#include "acl/decompression/decompress.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// A struct that contains the track index that has the worst error,
// its error, and the sample time at which it happens.
//////////////////////////////////////////////////////////////////////////
struct track_error
{
//////////////////////////////////////////////////////////////////////////
// The track index with the worst error.
uint32_t index = k_invalid_track_index;
//////////////////////////////////////////////////////////////////////////
// The worst error for the track index.
float error = 0.0F;
//////////////////////////////////////////////////////////////////////////
// The sample time that has the worst error.
float sample_time = 0.0F;
};
//////////////////////////////////////////////////////////////////////////
// Calculates the worst compression error between a raw track array and its
// compressed tracks.
// Supports scalar tracks only.
//
// Note: This function uses SFINAE to prevent it from matching when it shouldn't.
template<class decompression_context_type, acl_impl::is_decompression_context<decompression_context_type> = nullptr>
track_error calculate_compression_error(iallocator& allocator, const track_array& raw_tracks, decompression_context_type& context);
//////////////////////////////////////////////////////////////////////////
// Calculates the worst compression error between a raw track array and its
// compressed tracks.
// Supports scalar and transform tracks.
//
// Note: This function uses SFINAE to prevent it from matching when it shouldn't.
template<class decompression_context_type, acl_impl::is_decompression_context<decompression_context_type> = nullptr>
track_error calculate_compression_error(iallocator& allocator, const track_array& raw_tracks, decompression_context_type& context, const itransform_error_metric& error_metric);
//////////////////////////////////////////////////////////////////////////
// Calculates the worst compression error between a raw track array and its
// compressed tracks.
// Supports transform tracks with an additive base.
//
// Note: This function uses SFINAE to prevent it from matching when it shouldn't.
template<class decompression_context_type, acl_impl::is_decompression_context<decompression_context_type> = nullptr>
track_error calculate_compression_error(iallocator& allocator, const track_array_qvvf& raw_tracks, decompression_context_type& context, const itransform_error_metric& error_metric, const track_array_qvvf& additive_base_tracks);
//////////////////////////////////////////////////////////////////////////
// Calculates the worst compression error between two compressed tracks instances.
// Supports scalar tracks only.
//
// Note: This function uses SFINAE to prevent it from matching when it shouldn't.
template<class decompression_context_type0, class decompression_context_type1, acl_impl::is_decompression_context<decompression_context_type0> = nullptr, acl_impl::is_decompression_context<decompression_context_type1> = nullptr>
track_error calculate_compression_error(iallocator& allocator, decompression_context_type0& context0, decompression_context_type1& context1);
//////////////////////////////////////////////////////////////////////////
// Calculates the worst compression error between two raw track arrays.
// Supports scalar tracks only.
track_error calculate_compression_error(iallocator& allocator, const track_array& raw_tracks0, const track_array& raw_tracks1);
//////////////////////////////////////////////////////////////////////////
// Calculates the worst compression error between two raw track arrays.
// Supports scalar and transform tracks.
track_error calculate_compression_error(iallocator& allocator, const track_array& raw_tracks0, const track_array& raw_tracks1, const itransform_error_metric& error_metric);
}
#include "acl/compression/impl/track_error.impl.h"
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,508 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/additive_utils.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/hash.h"
#include <rtm/matrix3x4f.h>
#include <rtm/qvvf.h>
#include <rtm/scalarf.h>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// Interface for all skeletal error metrics.
// An error metric is responsible for a few things:
// - converting from rtm::qvvf into whatever transform type the metric uses (optional)
// - applying local space transforms on top of base transforms (optional)
// - transforming local space transforms into object space
// - evaluating the error function
//
// Most functions require two implementations: with and without scale support.
// This is entirely for performance reasons as most clips do not have any scale.
//////////////////////////////////////////////////////////////////////////
class itransform_error_metric
{
public:
virtual ~itransform_error_metric() {}
//////////////////////////////////////////////////////////////////////////
// Returns the unique name of the error metric.
virtual const char* get_name() const = 0;
//////////////////////////////////////////////////////////////////////////
// Returns a unique hash to represent the error metric.
virtual uint32_t get_hash() const { return hash32(get_name()); }
//////////////////////////////////////////////////////////////////////////
// Returns the transform size used by the error metric.
virtual size_t get_transform_size(bool has_scale) const = 0;
//////////////////////////////////////////////////////////////////////////
// Returns whether or not the error metric uses a transform that isn't rtm::qvvf.
// If this is the case, we need to convert from rtm::qvvf into the transform type
// used by the error metric.
virtual bool needs_conversion(bool has_scale) const { (void)has_scale; return false; }
//////////////////////////////////////////////////////////////////////////
// Input arguments for the 'convert_transforms*' functions.
//////////////////////////////////////////////////////////////////////////
struct convert_transforms_args
{
//////////////////////////////////////////////////////////////////////////
// A list of transform indices that are dirty and need conversion.
const uint32_t* dirty_transform_indices;
//////////////////////////////////////////////////////////////////////////
// The number of dirty transforms that need conversion.
uint32_t num_dirty_transforms;
//////////////////////////////////////////////////////////////////////////
// The input transforms in rtm::qvvf format to be converted.
const rtm::qvvf* transforms;
//////////////////////////////////////////////////////////////////////////
// The number of transforms in the input and output buffers.
uint32_t num_transforms;
};
//////////////////////////////////////////////////////////////////////////
// Converts from rtm::qvvf into the transform type used by the error metric.
// Called when 'needs_conversion' returns true.
virtual void convert_transforms(const convert_transforms_args& args, void* out_transforms) const
{
(void)args;
(void)out_transforms;
ACL_ASSERT(false, "Not implemented");
}
//////////////////////////////////////////////////////////////////////////
// Converts from rtm::qvvf into the transform type used by the error metric.
// Called when 'needs_conversion' returns true.
virtual void convert_transforms_no_scale(const convert_transforms_args& args, void* out_transforms) const
{
(void)args;
(void)out_transforms;
ACL_ASSERT(false, "Not implemented");
}
//////////////////////////////////////////////////////////////////////////
// Input arguments for the 'local_to_object_space*' functions.
//////////////////////////////////////////////////////////////////////////
struct local_to_object_space_args
{
//////////////////////////////////////////////////////////////////////////
// A list of transform indices that are dirty and need transformation.
const uint32_t* dirty_transform_indices;
//////////////////////////////////////////////////////////////////////////
// The number of dirty transforms that need transformation.
uint32_t num_dirty_transforms;
//////////////////////////////////////////////////////////////////////////
// A list of parent transform indices for every transform.
// An index of 0xFFFF represents a root transform with no parent.
const uint32_t* parent_transform_indices;
//////////////////////////////////////////////////////////////////////////
// The input transforms in the type expected by the error metric to be transformed.
const void* local_transforms;
//////////////////////////////////////////////////////////////////////////
// The number of transforms in the input and output buffers.
uint32_t num_transforms;
};
//////////////////////////////////////////////////////////////////////////
// Takes local space transforms into object space.
virtual void local_to_object_space(const local_to_object_space_args& args, void* out_object_transforms) const
{
(void)args;
(void)out_object_transforms;
ACL_ASSERT(false, "Not implemented");
}
//////////////////////////////////////////////////////////////////////////
// Takes local space transforms into object space.
virtual void local_to_object_space_no_scale(const local_to_object_space_args& args, void* out_object_transforms) const
{
(void)args;
(void)out_object_transforms;
ACL_ASSERT(false, "Not implemented");
}
//////////////////////////////////////////////////////////////////////////
// Input arguments for the 'apply_additive_to_base*' functions.
//////////////////////////////////////////////////////////////////////////
struct apply_additive_to_base_args
{
//////////////////////////////////////////////////////////////////////////
// A list of transform indices that are dirty and need the base applied.
const uint32_t* dirty_transform_indices;
//////////////////////////////////////////////////////////////////////////
// The number of dirty transforms that need the base applied.
uint32_t num_dirty_transforms;
//////////////////////////////////////////////////////////////////////////
// The input local space transforms in the type expected by the error metric.
const void* local_transforms;
//////////////////////////////////////////////////////////////////////////
// The input base transforms in the type expected by the error metric.
const void* base_transforms;
//////////////////////////////////////////////////////////////////////////
// The number of transforms in the input and output buffers.
uint32_t num_transforms;
};
//////////////////////////////////////////////////////////////////////////
// Applies local space transforms on top of base transforms.
// This is called when a clip has an additive base.
virtual void apply_additive_to_base(const apply_additive_to_base_args& args, void* out_transforms) const
{
(void)args;
(void)out_transforms;
ACL_ASSERT(false, "Not implemented");
}
//////////////////////////////////////////////////////////////////////////
// Applies local space transforms on top of base transforms.
// This is called when a clip has an additive base.
virtual void apply_additive_to_base_no_scale(const apply_additive_to_base_args& args, void* out_transforms) const
{
(void)args;
(void)out_transforms;
ACL_ASSERT(false, "Not implemented");
}
//////////////////////////////////////////////////////////////////////////
// Input arguments for the 'calculate_error*' functions.
//////////////////////////////////////////////////////////////////////////
struct calculate_error_args
{
//////////////////////////////////////////////////////////////////////////
// A point on our rigid shell along the X axis.
rtm::vector4f shell_point_x;
//////////////////////////////////////////////////////////////////////////
// A point on our rigid shell along the Y axis.
rtm::vector4f shell_point_y;
//////////////////////////////////////////////////////////////////////////
// A point on our rigid shell along the Z axis.
rtm::vector4f shell_point_z;
//////////////////////////////////////////////////////////////////////////
// The first transform used to measure the error.
// In the type expected by the error metric.
// Could be in local or object space (same space as lossy).
const void* transform0;
//////////////////////////////////////////////////////////////////////////
// The second transform used to measure the error.
// In the type expected by the error metric.
// Could be in local or object space (same space as raw).
const void* transform1;
//////////////////////////////////////////////////////////////////////////
// We measure the error on a rigid shell around each transform.
// This shell takes the form of a sphere at a certain distance.
// When no scale is present, measuring any two points is sufficient
// but when there is scale, measuring all three is necessary.
// See ./docs/error_metrics.md for details.
void construct_sphere_shell(float shell_distance)
{
shell_point_x = rtm::vector_set(shell_distance, 0.0F, 0.0F, 0.0F);
shell_point_y = rtm::vector_set(0.0F, shell_distance, 0.0F, 0.0F);
shell_point_z = rtm::vector_set(0.0F, 0.0F, shell_distance, 0.0F);
}
};
//////////////////////////////////////////////////////////////////////////
// Measures the error between a raw and lossy transform.
virtual rtm::scalarf RTM_SIMD_CALL calculate_error(const calculate_error_args& args) const = 0;
//////////////////////////////////////////////////////////////////////////
// Measures the error between a raw and lossy transform.
virtual rtm::scalarf RTM_SIMD_CALL calculate_error_no_scale(const calculate_error_args& args) const = 0;
};
//////////////////////////////////////////////////////////////////////////
// Uses rtm::qvvf arithmetic for local and object space error.
// Note that this can cause inaccuracy when dealing with shear/skew.
//////////////////////////////////////////////////////////////////////////
class qvvf_transform_error_metric : public itransform_error_metric
{
public:
virtual const char* get_name() const override { return "qvvf_transform_error_metric"; }
virtual size_t get_transform_size(bool has_scale) const override { (void)has_scale; return sizeof(rtm::qvvf); }
virtual bool needs_conversion(bool has_scale) const override { (void)has_scale; return false; }
virtual ACL_DISABLE_SECURITY_COOKIE_CHECK void local_to_object_space(const local_to_object_space_args& args, void* out_object_transforms) const override
{
const uint32_t* dirty_transform_indices = args.dirty_transform_indices;
const uint32_t* parent_transform_indices = args.parent_transform_indices;
const rtm::qvvf* local_transforms_ = static_cast<const rtm::qvvf*>(args.local_transforms);
rtm::qvvf* out_object_transforms_ = static_cast<rtm::qvvf*>(out_object_transforms);
const uint32_t num_dirty_transforms = args.num_dirty_transforms;
for (uint32_t dirty_transform_index = 0; dirty_transform_index < num_dirty_transforms; ++dirty_transform_index)
{
const uint32_t transform_index = dirty_transform_indices[dirty_transform_index];
const uint32_t parent_transform_index = parent_transform_indices[transform_index];
rtm::qvvf obj_transform;
if (parent_transform_index == k_invalid_track_index)
obj_transform = local_transforms_[transform_index]; // Just copy the root as-is, it has no parent and thus local and object space transforms are equal
else
obj_transform = rtm::qvv_mul(local_transforms_[transform_index], out_object_transforms_[parent_transform_index]);
out_object_transforms_[transform_index] = obj_transform;
}
}
virtual ACL_DISABLE_SECURITY_COOKIE_CHECK void local_to_object_space_no_scale(const local_to_object_space_args& args, void* out_object_transforms) const override
{
const uint32_t* dirty_transform_indices = args.dirty_transform_indices;
const uint32_t* parent_transform_indices = args.parent_transform_indices;
const rtm::qvvf* local_transforms_ = static_cast<const rtm::qvvf*>(args.local_transforms);
rtm::qvvf* out_object_transforms_ = static_cast<rtm::qvvf*>(out_object_transforms);
const uint32_t num_dirty_transforms = args.num_dirty_transforms;
for (uint32_t dirty_transform_index = 0; dirty_transform_index < num_dirty_transforms; ++dirty_transform_index)
{
const uint32_t transform_index = dirty_transform_indices[dirty_transform_index];
const uint32_t parent_transform_index = parent_transform_indices[transform_index];
rtm::qvvf obj_transform;
if (parent_transform_index == k_invalid_track_index)
obj_transform = local_transforms_[transform_index]; // Just copy the root as-is, it has no parent and thus local and object space transforms are equal
else
obj_transform = rtm::qvv_mul_no_scale(local_transforms_[transform_index], out_object_transforms_[parent_transform_index]);
out_object_transforms_[transform_index] = obj_transform;
}
}
virtual ACL_DISABLE_SECURITY_COOKIE_CHECK rtm::scalarf RTM_SIMD_CALL calculate_error(const calculate_error_args& args) const override
{
const rtm::qvvf& raw_transform_ = *static_cast<const rtm::qvvf*>(args.transform0);
const rtm::qvvf& lossy_transform_ = *static_cast<const rtm::qvvf*>(args.transform1);
// Note that because we have scale, we must measure all three axes
const rtm::vector4f vtx0 = args.shell_point_x;
const rtm::vector4f vtx1 = args.shell_point_y;
const rtm::vector4f vtx2 = args.shell_point_z;
const rtm::vector4f raw_vtx0 = rtm::qvv_mul_point3(vtx0, raw_transform_);
const rtm::vector4f raw_vtx1 = rtm::qvv_mul_point3(vtx1, raw_transform_);
const rtm::vector4f raw_vtx2 = rtm::qvv_mul_point3(vtx2, raw_transform_);
const rtm::vector4f lossy_vtx0 = rtm::qvv_mul_point3(vtx0, lossy_transform_);
const rtm::vector4f lossy_vtx1 = rtm::qvv_mul_point3(vtx1, lossy_transform_);
const rtm::vector4f lossy_vtx2 = rtm::qvv_mul_point3(vtx2, lossy_transform_);
const rtm::scalarf vtx0_error = rtm::vector_distance3(raw_vtx0, lossy_vtx0);
const rtm::scalarf vtx1_error = rtm::vector_distance3(raw_vtx1, lossy_vtx1);
const rtm::scalarf vtx2_error = rtm::vector_distance3(raw_vtx2, lossy_vtx2);
return rtm::scalar_max(rtm::scalar_max(vtx0_error, vtx1_error), vtx2_error);
}
virtual ACL_DISABLE_SECURITY_COOKIE_CHECK rtm::scalarf RTM_SIMD_CALL calculate_error_no_scale(const calculate_error_args& args) const override
{
const rtm::qvvf& raw_transform_ = *static_cast<const rtm::qvvf*>(args.transform0);
const rtm::qvvf& lossy_transform_ = *static_cast<const rtm::qvvf*>(args.transform1);
const rtm::vector4f vtx0 = args.shell_point_x;
const rtm::vector4f vtx1 = args.shell_point_y;
const rtm::vector4f raw_vtx0 = rtm::qvv_mul_point3_no_scale(vtx0, raw_transform_);
const rtm::vector4f raw_vtx1 = rtm::qvv_mul_point3_no_scale(vtx1, raw_transform_);
const rtm::vector4f lossy_vtx0 = rtm::qvv_mul_point3_no_scale(vtx0, lossy_transform_);
const rtm::vector4f lossy_vtx1 = rtm::qvv_mul_point3_no_scale(vtx1, lossy_transform_);
const rtm::scalarf vtx0_error = rtm::vector_distance3(raw_vtx0, lossy_vtx0);
const rtm::scalarf vtx1_error = rtm::vector_distance3(raw_vtx1, lossy_vtx1);
return rtm::scalar_max(vtx0_error, vtx1_error);
}
};
//////////////////////////////////////////////////////////////////////////
// Uses a mix of rtm::qvvf and rtm::matrix3x4f arithmetic.
// The local space error is always calculated with rtm::qvvf arithmetic.
// The object space error is calculated with rtm::qvvf arithmetic if there is no scale
// and with rtm::matrix3x4f arithmetic if there is scale.
// Note that this can cause inaccuracy issues if there are very large or very small
// scale values.
//////////////////////////////////////////////////////////////////////////
class qvvf_matrix3x4f_transform_error_metric : public qvvf_transform_error_metric
{
public:
virtual const char* get_name() const override { return "qvvf_matrix3x4f_transform_error_metric"; }
virtual size_t get_transform_size(bool has_scale) const override { return has_scale ? sizeof(rtm::matrix3x4f) : sizeof(rtm::qvvf); }
virtual bool needs_conversion(bool has_scale) const override { return has_scale; }
virtual ACL_DISABLE_SECURITY_COOKIE_CHECK void convert_transforms(const convert_transforms_args& args, void* out_transforms) const override
{
const uint32_t* dirty_transform_indices = args.dirty_transform_indices;
const rtm::qvvf* transforms_ = args.transforms;
rtm::matrix3x4f* out_transforms_ = static_cast<rtm::matrix3x4f*>(out_transforms);
const uint32_t num_dirty_transforms = args.num_dirty_transforms;
for (uint32_t dirty_transform_index = 0; dirty_transform_index < num_dirty_transforms; ++dirty_transform_index)
{
const uint32_t transform_index = dirty_transform_indices[dirty_transform_index];
const rtm::qvvf& transform_qvv = transforms_[transform_index];
rtm::matrix3x4f transform_mtx = rtm::matrix_from_qvv(transform_qvv);
out_transforms_[transform_index] = transform_mtx;
}
}
virtual ACL_DISABLE_SECURITY_COOKIE_CHECK void local_to_object_space(const local_to_object_space_args& args, void* out_object_transforms) const override
{
const uint32_t* dirty_transform_indices = args.dirty_transform_indices;
const uint32_t* parent_transform_indices = args.parent_transform_indices;
const rtm::matrix3x4f* local_transforms_ = static_cast<const rtm::matrix3x4f*>(args.local_transforms);
rtm::matrix3x4f* out_object_transforms_ = static_cast<rtm::matrix3x4f*>(out_object_transforms);
const uint32_t num_dirty_transforms = args.num_dirty_transforms;
for (uint32_t dirty_transform_index = 0; dirty_transform_index < num_dirty_transforms; ++dirty_transform_index)
{
const uint32_t transform_index = dirty_transform_indices[dirty_transform_index];
const uint32_t parent_transform_index = parent_transform_indices[transform_index];
rtm::matrix3x4f obj_transform;
if (parent_transform_index == k_invalid_track_index)
obj_transform = local_transforms_[transform_index]; // Just copy the root as-is, it has no parent and thus local and object space transforms are equal
else
obj_transform = rtm::matrix_mul(local_transforms_[transform_index], out_object_transforms_[parent_transform_index]);
out_object_transforms_[transform_index] = obj_transform;
}
}
virtual ACL_DISABLE_SECURITY_COOKIE_CHECK rtm::scalarf RTM_SIMD_CALL calculate_error(const calculate_error_args& args) const override
{
const rtm::matrix3x4f& raw_transform_ = *static_cast<const rtm::matrix3x4f*>(args.transform0);
const rtm::matrix3x4f& lossy_transform_ = *static_cast<const rtm::matrix3x4f*>(args.transform1);
// Note that because we have scale, we must measure all three axes
const rtm::vector4f vtx0 = args.shell_point_x;
const rtm::vector4f vtx1 = args.shell_point_y;
const rtm::vector4f vtx2 = args.shell_point_z;
const rtm::vector4f raw_vtx0 = rtm::matrix_mul_point3(vtx0, raw_transform_);
const rtm::vector4f raw_vtx1 = rtm::matrix_mul_point3(vtx1, raw_transform_);
const rtm::vector4f raw_vtx2 = rtm::matrix_mul_point3(vtx2, raw_transform_);
const rtm::vector4f lossy_vtx0 = rtm::matrix_mul_point3(vtx0, lossy_transform_);
const rtm::vector4f lossy_vtx1 = rtm::matrix_mul_point3(vtx1, lossy_transform_);
const rtm::vector4f lossy_vtx2 = rtm::matrix_mul_point3(vtx2, lossy_transform_);
const rtm::scalarf vtx0_error = rtm::vector_distance3(raw_vtx0, lossy_vtx0);
const rtm::scalarf vtx1_error = rtm::vector_distance3(raw_vtx1, lossy_vtx1);
const rtm::scalarf vtx2_error = rtm::vector_distance3(raw_vtx2, lossy_vtx2);
return rtm::scalar_max(rtm::scalar_max(vtx0_error, vtx1_error), vtx2_error);
}
};
//////////////////////////////////////////////////////////////////////////
// Uses rtm::qvvf arithmetic for local and object space error.
// This error metric should be used whenever a clip is additive or relative.
// Note that this can cause inaccuracy when dealing with shear/skew.
//////////////////////////////////////////////////////////////////////////
template<additive_clip_format8 additive_format>
class additive_qvvf_transform_error_metric : public qvvf_transform_error_metric
{
public:
virtual const char* get_name() const override
{
switch (additive_format)
{
default:
case additive_clip_format8::none: return "additive_qvvf_transform_error_metric<none>";
case additive_clip_format8::relative: return "additive_qvvf_transform_error_metric<relative>";
case additive_clip_format8::additive0: return "additive_qvvf_transform_error_metric<additive0>";
case additive_clip_format8::additive1: return "additive_qvvf_transform_error_metric<additive1>";
}
}
virtual ACL_DISABLE_SECURITY_COOKIE_CHECK void apply_additive_to_base(const apply_additive_to_base_args& args, void* out_transforms) const override
{
const uint32_t* dirty_transform_indices = args.dirty_transform_indices;
const rtm::qvvf* local_transforms_ = static_cast<const rtm::qvvf*>(args.local_transforms);
const rtm::qvvf* base_transforms_ = static_cast<const rtm::qvvf*>(args.base_transforms);
rtm::qvvf* out_transforms_ = static_cast<rtm::qvvf*>(out_transforms);
const uint32_t num_dirty_transforms = args.num_dirty_transforms;
for (uint32_t dirty_transform_index = 0; dirty_transform_index < num_dirty_transforms; ++dirty_transform_index)
{
const uint32_t transform_index = dirty_transform_indices[dirty_transform_index];
const rtm::qvvf& local_transform = local_transforms_[transform_index];
const rtm::qvvf& base_transform = base_transforms_[transform_index];
const rtm::qvvf transform = acl::apply_additive_to_base(additive_format, base_transform, local_transform);
out_transforms_[transform_index] = transform;
}
}
virtual ACL_DISABLE_SECURITY_COOKIE_CHECK void apply_additive_to_base_no_scale(const apply_additive_to_base_args& args, void* out_transforms) const override
{
const uint32_t* dirty_transform_indices = args.dirty_transform_indices;
const rtm::qvvf* local_transforms_ = static_cast<const rtm::qvvf*>(args.local_transforms);
const rtm::qvvf* base_transforms_ = static_cast<const rtm::qvvf*>(args.base_transforms);
rtm::qvvf* out_transforms_ = static_cast<rtm::qvvf*>(out_transforms);
const uint32_t num_dirty_transforms = args.num_dirty_transforms;
for (uint32_t dirty_transform_index = 0; dirty_transform_index < num_dirty_transforms; ++dirty_transform_index)
{
const uint32_t transform_index = dirty_transform_indices[dirty_transform_index];
const rtm::qvvf& local_transform = local_transforms_[transform_index];
const rtm::qvvf& base_transform = base_transforms_[transform_index];
const rtm::qvvf transform = acl::apply_additive_to_base_no_scale(additive_format, base_transform, local_transform);
out_transforms_[transform_index] = transform;
}
}
};
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,74 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2018 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/error.h"
#include <rtm/qvvf.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
// Note: It is safe for both pose buffers to alias since the data if sorted parent first
inline void local_to_object_space(const uint32_t* parent_indices, const rtm::qvvf* local_pose, uint32_t num_transforms, rtm::qvvf* out_object_pose)
{
if (num_transforms == 0)
return; // Nothing to do
out_object_pose[0] = local_pose[0];
for (uint32_t bone_index = 1; bone_index < num_transforms; ++bone_index)
{
const uint32_t parent_bone_index = parent_indices[bone_index];
ACL_ASSERT(parent_bone_index < num_transforms, "Invalid parent bone index: %u >= %u", parent_bone_index, num_transforms);
out_object_pose[bone_index] = rtm::qvv_normalize(rtm::qvv_mul(local_pose[bone_index], out_object_pose[parent_bone_index]));
}
}
// Note: It is safe for both pose buffers to alias since the data if sorted parent first
inline void object_to_local_space(const uint32_t* parent_indices, const rtm::qvvf* object_pose, uint32_t num_transforms, rtm::qvvf* out_local_pose)
{
if (num_transforms == 0)
return; // Nothing to do
out_local_pose[0] = object_pose[0];
for (uint32_t bone_index = 1; bone_index < num_transforms; ++bone_index)
{
const uint32_t parent_bone_index = parent_indices[bone_index];
ACL_ASSERT(parent_bone_index < num_transforms, "Invalid parent bone index: %u >= %u", parent_bone_index, num_transforms);
const rtm::qvvf inv_parent_transform = rtm::qvv_inverse(object_pose[parent_bone_index]);
out_local_pose[bone_index] = rtm::qvv_normalize(rtm::qvv_mul(inv_parent_transform, object_pose[bone_index]));
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,191 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2018 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include <rtm/qvvf.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// Describes the format used by the additive clip.
enum class additive_clip_format8 : uint8_t
{
//////////////////////////////////////////////////////////////////////////
// Clip is not additive
none = 0,
//////////////////////////////////////////////////////////////////////////
// Clip is in relative space, transform_mul or equivalent is used to combine them.
// transform = transform_mul(additive_transform, base_transform)
relative = 1,
//////////////////////////////////////////////////////////////////////////
// Clip is in additive space where scale is combined with: base_scale * additive_scale
// transform = transform_add0(additive_transform, base_transform)
additive0 = 2,
//////////////////////////////////////////////////////////////////////////
// Clip is in additive space where scale is combined with: base_scale * (1.0 + additive_scale)
// transform = transform_add1(additive_transform, base_transform)
additive1 = 3,
};
//////////////////////////////////////////////////////////////////////////
// TODO: constexpr
inline const char* get_additive_clip_format_name(additive_clip_format8 format)
{
switch (format)
{
case additive_clip_format8::none: return "none";
case additive_clip_format8::relative: return "relative";
case additive_clip_format8::additive0: return "additive0";
case additive_clip_format8::additive1: return "additive1";
default: return "<Invalid>";
}
}
inline bool get_additive_clip_format(const char* format, additive_clip_format8& out_format)
{
const char* none_format = "None"; // ACL_DEPRECATED Legacy name, keep for backwards compatibility, remove in 3.0
const char* none_format_new = "none";
if (std::strncmp(format, none_format, std::strlen(none_format)) == 0
|| std::strncmp(format, none_format_new, std::strlen(none_format_new)) == 0)
{
out_format = additive_clip_format8::none;
return true;
}
const char* relative_format = "Relative"; // ACL_DEPRECATED Legacy name, keep for backwards compatibility, remove in 3.0
const char* relative_format_new = "relative";
if (std::strncmp(format, relative_format, std::strlen(relative_format)) == 0
|| std::strncmp(format, relative_format_new, std::strlen(relative_format_new)) == 0)
{
out_format = additive_clip_format8::relative;
return true;
}
const char* additive0_format = "Additive0"; // ACL_DEPRECATED Legacy name, keep for backwards compatibility, remove in 3.0
const char* additive0_format_new = "additive0";
if (std::strncmp(format, additive0_format, std::strlen(additive0_format)) == 0
|| std::strncmp(format, additive0_format_new, std::strlen(additive0_format_new)) == 0)
{
out_format = additive_clip_format8::additive0;
return true;
}
const char* additive1_format = "Additive1"; // ACL_DEPRECATED Legacy name, keep for backwards compatibility, remove in 3.0
const char* additive1_format_new = "additive1";
if (std::strncmp(format, additive1_format, std::strlen(additive1_format)) == 0
|| std::strncmp(format, additive1_format_new, std::strlen(additive1_format_new)) == 0)
{
out_format = additive_clip_format8::additive1;
return true;
}
return false;
}
inline rtm::vector4f RTM_SIMD_CALL get_default_scale(additive_clip_format8 additive_format)
{
return additive_format == additive_clip_format8::additive1 ? rtm::vector_zero() : rtm::vector_set(1.0F);
}
inline rtm::qvvf RTM_SIMD_CALL transform_add0(rtm::qvvf_arg0 base, rtm::qvvf_arg1 additive)
{
const rtm::quatf rotation = rtm::quat_mul(additive.rotation, base.rotation);
const rtm::vector4f translation = rtm::vector_add(additive.translation, base.translation);
const rtm::vector4f scale = rtm::vector_mul(additive.scale, base.scale);
return rtm::qvv_set(rotation, translation, scale);
}
inline rtm::qvvf RTM_SIMD_CALL transform_add1(rtm::qvvf_arg0 base, rtm::qvvf_arg1 additive)
{
const rtm::quatf rotation = rtm::quat_mul(additive.rotation, base.rotation);
const rtm::vector4f translation = rtm::vector_add(additive.translation, base.translation);
const rtm::vector4f scale = rtm::vector_mul(rtm::vector_add(rtm::vector_set(1.0F), additive.scale), base.scale);
return rtm::qvv_set(rotation, translation, scale);
}
inline rtm::qvvf RTM_SIMD_CALL transform_add_no_scale(rtm::qvvf_arg0 base, rtm::qvvf_arg1 additive)
{
const rtm::quatf rotation = rtm::quat_mul(additive.rotation, base.rotation);
const rtm::vector4f translation = rtm::vector_add(additive.translation, base.translation);
return rtm::qvv_set(rotation, translation, rtm::vector_set(1.0F));
}
inline rtm::qvvf RTM_SIMD_CALL apply_additive_to_base(additive_clip_format8 additive_format, rtm::qvvf_arg0 base, rtm::qvvf_arg1 additive)
{
switch (additive_format)
{
default:
case additive_clip_format8::none: return additive;
case additive_clip_format8::relative: return rtm::qvv_mul(additive, base);
case additive_clip_format8::additive0: return transform_add0(base, additive);
case additive_clip_format8::additive1: return transform_add1(base, additive);
}
}
inline rtm::qvvf RTM_SIMD_CALL apply_additive_to_base_no_scale(additive_clip_format8 additive_format, rtm::qvvf_arg0 base, rtm::qvvf_arg1 additive)
{
switch (additive_format)
{
default:
case additive_clip_format8::none: return additive;
case additive_clip_format8::relative: return rtm::qvv_mul_no_scale(additive, base);
case additive_clip_format8::additive0: return transform_add_no_scale(base, additive);
case additive_clip_format8::additive1: return transform_add_no_scale(base, additive);
}
}
inline rtm::qvvf RTM_SIMD_CALL convert_to_relative(rtm::qvvf_arg0 base, rtm::qvvf_arg1 transform)
{
return rtm::qvv_mul(transform, rtm::qvv_inverse(base));
}
inline rtm::qvvf RTM_SIMD_CALL convert_to_additive0(rtm::qvvf_arg0 base, rtm::qvvf_arg1 transform)
{
const rtm::quatf rotation = rtm::quat_mul(transform.rotation, rtm::quat_conjugate(base.rotation));
const rtm::vector4f translation = rtm::vector_sub(transform.translation, base.translation);
const rtm::vector4f scale = rtm::vector_div(transform.scale, base.scale);
return rtm::qvv_set(rotation, translation, scale);
}
inline rtm::qvvf RTM_SIMD_CALL convert_to_additive1(rtm::qvvf_arg0 base, rtm::qvvf_arg1 transform)
{
const rtm::quatf rotation = rtm::quat_mul(transform.rotation, rtm::quat_conjugate(base.rotation));
const rtm::vector4f translation = rtm::vector_sub(transform.translation, base.translation);
const rtm::vector4f scale = rtm::vector_sub(rtm::vector_mul(transform.scale, rtm::vector_reciprocal(base.scale)), rtm::vector_set(1.0F));
return rtm::qvv_set(rotation, translation, scale);
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,102 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include <cstdint>
#include <cstring>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
////////////////////////////////////////////////////////////////////////////////
// algorithm_type8 is an enum that represents every supported algorithm.
//
// BE CAREFUL WHEN CHANGING VALUES IN THIS ENUM
// The algorithm type is serialized in the compressed data, if you change a value
// the compressed clips will be invalid. If you do, bump the appropriate algorithm versions.
enum class algorithm_type8 : uint8_t
{
uniformly_sampled = 0,
//LinearKeyReduction = 1,
//SplineKeyReduction = 2,
};
//////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Returns true if the algorithm type is a valid value. Used to validate if
// memory has been corrupted.
// TODO: constexpr
inline bool is_valid_algorithm_type(algorithm_type8 type)
{
switch (type)
{
case algorithm_type8::uniformly_sampled:
//case algorithm_type8::LinearKeyReduction:
//case algorithm_type8::SplineKeyReduction:
return true;
default:
return false;
}
}
////////////////////////////////////////////////////////////////////////////////
// Returns a string of the algorithm name suitable for display.
// TODO: constexpr
inline const char* get_algorithm_name(algorithm_type8 type)
{
switch (type)
{
case algorithm_type8::uniformly_sampled: return "uniformly_sampled";
//case algorithm_type8::LinearKeyReduction: return "LinearKeyReduction";
//case algorithm_type8::SplineKeyReduction: return "SplineKeyReduction";
default: return "<Invalid>";
}
}
////////////////////////////////////////////////////////////////////////////////
// Returns true if the algorithm type was properly parsed from an input string.
//
// type: A string representing the algorithm name to parse. It must match the get_algorithm_name(..) output.
// out_type: On success, it will contain the the parsed algorithm type otherwise it is left untouched.
inline bool get_algorithm_type(const char* type, algorithm_type8& out_type)
{
const char* uniformly_sampled_name = "UniformlySampled"; // ACL_DEPRECATED Legacy name, keep for backwards compatibility, remove in 3.0
const char* uniformly_sampled_name_new = "uniformly_sampled";
if (std::strncmp(type, uniformly_sampled_name, std::strlen(uniformly_sampled_name)) == 0
|| std::strncmp(type, uniformly_sampled_name_new, std::strlen(uniformly_sampled_name_new)) == 0)
{
out_type = algorithm_type8::uniformly_sampled;
return true;
}
return false;
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,196 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2018 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/iallocator.h"
#include "acl/core/error.h"
#if defined(__APPLE__) || defined(__EMSCRIPTEN__)
#include <cstdlib> // For posix_memalign
#elif defined(_WIN32)
#include <malloc.h>
#endif
#if defined(ACL_HAS_ASSERT_CHECKS) && !defined(ACL_NO_ALLOCATOR_TRACKING)
#define ACL_ALLOCATOR_TRACK_NUM_ALLOCATIONS
#endif
// This is used for debugging memory leaks, double frees, etc.
// It should never be enabled in production!
//#define ACL_ALLOCATOR_TRACK_ALL_ALLOCATIONS
#if defined(ACL_ALLOCATOR_TRACK_NUM_ALLOCATIONS)
#include <atomic>
#endif
#if defined(ACL_ALLOCATOR_TRACK_ALL_ALLOCATIONS)
#include <unordered_map>
#endif
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
////////////////////////////////////////////////////////////////////////////////
// An ANSI allocator implementation. It uses the system malloc/free to manage
// memory as well as provides some debugging functionality to track memory leaks.
////////////////////////////////////////////////////////////////////////////////
class ansi_allocator : public iallocator
{
public:
ansi_allocator()
: iallocator()
#if defined(ACL_ALLOCATOR_TRACK_NUM_ALLOCATIONS)
, m_allocation_count(0)
#endif
#if defined(ACL_ALLOCATOR_TRACK_ALL_ALLOCATIONS)
, m_debug_allocations()
#endif
{}
virtual ~ansi_allocator()
{
#if defined(ACL_ALLOCATOR_TRACK_ALL_ALLOCATIONS)
if (!m_debug_allocations.empty())
{
for (const auto& pair : m_debug_allocations)
{
printf("Live allocation at the allocator destruction: 0x%p (%zu)\n", pair.second.ptr, pair.second.size);
}
}
#endif
#if defined(ACL_ALLOCATOR_TRACK_NUM_ALLOCATIONS)
ACL_ASSERT(m_allocation_count == 0, "The number of allocations and deallocations does not match");
#endif
}
ansi_allocator(const ansi_allocator&) = delete;
ansi_allocator& operator=(const ansi_allocator&) = delete;
virtual void* allocate(size_t size, size_t alignment = k_default_alignment) override
{
/*
* This is a common requirement for many of the aligned allocators, see
* http://pubs.opengroup.org/onlinepubs/9699919799/functions/posix_memalign.html
* https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/aligned-malloc?view=vs-2017
*/
ACL_ASSERT(is_power_of_two(alignment), "The alignment must be power of two.");
/*
* Another common requirement is for size to be an integral multiple of alignment,
* i.e. aligned to alignment. As this interface here is supposed to help the user out,
* just silently align it for them, while keeping the tracked size intact.
*/
size_t aligned_size = align_to(size, alignment);
#if defined(ACL_ALLOCATOR_TRACK_NUM_ALLOCATIONS)
m_allocation_count.fetch_add(1, std::memory_order_relaxed);
#endif
void* ptr;
#if defined(_WIN32)
ptr = _aligned_malloc(aligned_size, alignment);
#elif defined(__APPLE__) || defined(__EMSCRIPTEN__)
ptr = nullptr;
posix_memalign(&ptr, std::max<size_t>(alignment, sizeof(void*)), aligned_size);
#elif defined(__ANDROID__)
// Don't bother using aligned_size here, as we're doing custom alignment, just mark the var as unused.
(void)aligned_size;
alignment = std::max<size_t>(std::max<size_t>(alignment, sizeof(void*)), sizeof(size_t));
const size_t padded_size = size + alignment + sizeof(size_t);
ptr = malloc(padded_size);
if (ptr != nullptr)
{
const void* allocated_ptr = ptr;
ptr = align_to(add_offset_to_ptr<void>(ptr, sizeof(size_t)), alignment);
const size_t padding_size = safe_static_cast<size_t>(reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(allocated_ptr));
size_t* padding_size_ptr = add_offset_to_ptr<size_t>(ptr, -sizeof(size_t));
*padding_size_ptr = padding_size;
}
#else
ptr = aligned_alloc(alignment, aligned_size);
#endif
#if defined(ACL_ALLOCATOR_TRACK_ALL_ALLOCATIONS)
m_debug_allocations.insert({ {ptr, AllocationEntry{ptr, size}} });
#endif
return ptr;
}
virtual void deallocate(void* ptr, size_t size) override
{
if (ptr == nullptr)
return;
(void)size;
#if defined(_WIN32)
_aligned_free(ptr);
#elif defined(__ANDROID__)
const size_t* padding_size_ptr = add_offset_to_ptr<size_t>(ptr, -sizeof(size_t));
void* allocated_ptr = add_offset_to_ptr<void>(ptr, -*padding_size_ptr);
free(allocated_ptr);
#else
free(ptr);
#endif
#if defined(ACL_ALLOCATOR_TRACK_ALL_ALLOCATIONS)
const auto it = m_debug_allocations.find(ptr);
ACL_ASSERT(it != m_debug_allocations.end(), "Attempting to deallocate a pointer that isn't allocated");
ACL_ASSERT(it->second.size == size, "Allocation and deallocation size do not match");
m_debug_allocations.erase(ptr);
#endif
#if defined(ACL_ALLOCATOR_TRACK_NUM_ALLOCATIONS)
const int32_t old_value = m_allocation_count.fetch_sub(1, std::memory_order_relaxed);
ACL_ASSERT(old_value > 0, "The number of allocations and deallocations does not match");
#endif
}
#if defined(ACL_ALLOCATOR_TRACK_NUM_ALLOCATIONS)
int32_t get_allocation_count() const { return m_allocation_count.load(std::memory_order_relaxed); }
#endif
private:
#if defined(ACL_ALLOCATOR_TRACK_NUM_ALLOCATIONS)
std::atomic<int32_t> m_allocation_count;
#endif
#if defined(ACL_ALLOCATOR_TRACK_ALL_ALLOCATIONS)
struct AllocationEntry
{
void* ptr;
size_t size;
};
std::unordered_map<void*, AllocationEntry> m_debug_allocations;
#endif
};
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,136 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2018 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/error.h"
#include <rtm/math.h>
#include <cstdint>
#if !defined(ACL_USE_POPCOUNT) && !defined(RTM_NO_INTRINSICS)
// TODO: Enable this for PlayStation 4 as well, what is the define and can we use it in public code?
#if defined(_DURANGO) || defined(_XBOX_ONE)
// Enable pop-count type instructions on Xbox One
#define ACL_USE_POPCOUNT
#endif
#endif
#if defined(ACL_USE_POPCOUNT)
#include <nmmintrin.h>
#endif
// Note: It seems that the Clang toolchain with MSVC enables BMI only with AVX2 unlike
// MSVC which enables it with AVX
#if defined(RTM_AVX_INTRINSICS) && !(defined(_MSC_VER) && defined(__clang__))
// Use BMI
#include <ammintrin.h> // MSVC uses this header for _andn_u32 BMI intrinsic
#include <immintrin.h> // Intel documentation says _andn_u32 and others are here
#define ACL_BMI_INTRINSICS
#endif
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
inline uint8_t count_set_bits(uint8_t value)
{
#if defined(ACL_USE_POPCOUNT)
return (uint8_t)_mm_popcnt_u32(value);
#elif defined(RTM_NEON_INTRINSICS)
return (uint8_t)vget_lane_u64(vcnt_u8(vcreate_u8(value)), 0);
#else
value = value - ((value >> 1) & 0x55);
value = (value & 0x33) + ((value >> 2) & 0x33);
return ((value + (value >> 4)) & 0x0F);
#endif
}
inline uint16_t count_set_bits(uint16_t value)
{
#if defined(ACL_USE_POPCOUNT)
return (uint16_t)_mm_popcnt_u32(value);
#elif defined(RTM_NEON_INTRINSICS)
return (uint16_t)vget_lane_u64(vpaddl_u8(vcnt_u8(vcreate_u8(value))), 0);
#else
value = value - ((value >> 1) & 0x5555);
value = (value & 0x3333) + ((value >> 2) & 0x3333);
return uint16_t(((value + (value >> 4)) & 0x0F0F) * 0x0101) >> 8;
#endif
}
inline uint32_t count_set_bits(uint32_t value)
{
#if defined(ACL_USE_POPCOUNT)
return _mm_popcnt_u32(value);
#elif defined(RTM_NEON_INTRINSICS)
return (uint32_t)vget_lane_u64(vpaddl_u16(vpaddl_u8(vcnt_u8(vcreate_u8(value)))), 0);
#else
value = value - ((value >> 1) & 0x55555555);
value = (value & 0x33333333) + ((value >> 2) & 0x33333333);
return (((value + (value >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
#endif
}
inline uint64_t count_set_bits(uint64_t value)
{
#if defined(ACL_USE_POPCOUNT)
return _mm_popcnt_u64(value);
#elif defined(RTM_NEON_INTRINSICS)
return vget_lane_u64(vpaddl_u32(vpaddl_u16(vpaddl_u8(vcnt_u8(vcreate_u8(value))))), 0);
#else
value = value - ((value >> 1) & 0x5555555555555555ULL);
value = (value & 0x3333333333333333ULL) + ((value >> 2) & 0x3333333333333333ULL);
return (((value + (value >> 4)) & 0x0F0F0F0F0F0F0F0FULL) * 0x0101010101010101ULL) >> 56;
#endif
}
inline uint32_t rotate_bits_left(uint32_t value, int32_t num_bits)
{
ACL_ASSERT(num_bits >= 0, "Attempting to rotate by negative bits");
ACL_ASSERT(num_bits < 32, "Attempting to rotate by too many bits");
const uint32_t mask = 32 - 1;
num_bits &= mask;
return (value << num_bits) | (value >> ((-num_bits) & mask));
}
inline uint32_t and_not(uint32_t not_value, uint32_t and_value)
{
#if defined(ACL_BMI_INTRINSICS)
// Use BMI
#if defined(__GNUC__) && !defined(__clang__) && !defined(_andn_u32)
return __andn_u32(not_value, and_value); // GCC doesn't define the right intrinsic symbol
#else
return _andn_u32(not_value, and_value);
#endif
#else
return ~not_value & and_value;
#endif
}
}
ACL_IMPL_FILE_PRAGMA_POP

216
sources/acl/core/bitset.h Normal file
View File

@ -0,0 +1,216 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2018 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/error.h"
#include "acl/core/bit_manip_utils.h"
#include <cstdint>
#include <limits>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
////////////////////////////////////////////////////////////////////////////////
// A bit set description holds the required information to ensure type and memory safety
// with the various bit set functions.
////////////////////////////////////////////////////////////////////////////////
class bitset_description
{
public:
////////////////////////////////////////////////////////////////////////////////
// Creates an invalid bit set description.
constexpr bitset_description() : m_size(0) {}
////////////////////////////////////////////////////////////////////////////////
// Creates a bit set description from a compile time known number of bits.
template<uint64_t num_bits>
static constexpr bitset_description make_from_num_bits()
{
static_assert(num_bits <= std::numeric_limits<uint32_t>::max() - 31, "Number of bits exceeds the maximum number allowed");
return bitset_description((uint32_t(num_bits) + 32 - 1) / 32);
}
////////////////////////////////////////////////////////////////////////////////
// Creates a bit set description from a runtime known number of bits.
inline static bitset_description make_from_num_bits(uint32_t num_bits)
{
ACL_ASSERT(num_bits <= std::numeric_limits<uint32_t>::max() - 31, "Number of bits exceeds the maximum number allowed");
return bitset_description((num_bits + 32 - 1) / 32);
}
////////////////////////////////////////////////////////////////////////////////
// Returns the number of 32 bit words used to represent the bitset.
// 1 == 32 bits, 2 == 64 bits, etc.
constexpr uint32_t get_size() const { return m_size; }
////////////////////////////////////////////////////////////////////////////////
// Returns the number of bits contained within the bit set.
constexpr uint32_t get_num_bits() const { return m_size * 32; }
////////////////////////////////////////////////////////////////////////////////
// Returns the number of bytes used by the bit set.
constexpr uint32_t get_num_bytes() const { return m_size * sizeof(uint32_t); }
////////////////////////////////////////////////////////////////////////////////
// Returns true if the index is valid within the bit set.
constexpr bool is_bit_index_valid(uint32_t index) const { return index < get_num_bits(); }
private:
////////////////////////////////////////////////////////////////////////////////
// Creates a bit set description from a specified size.
explicit constexpr bitset_description(uint32_t size) : m_size(size) {}
// Number of words required to hold the bit set
// 1 == 32 bits, 2 == 64 bits, etc.
uint32_t m_size;
};
//////////////////////////////////////////////////////////////////////////
// A bit set index reference is created from a bit set description and a bit index.
// It holds the bit set word offset as well as the bit mask required.
// This is useful if you sample multiple bit sets at the same index.
//////////////////////////////////////////////////////////////////////////
struct bitset_index_ref
{
bitset_index_ref()
: desc()
, offset(0)
, mask(0)
{}
bitset_index_ref(bitset_description desc_, uint32_t bit_index)
: desc(desc_)
, offset(bit_index / 32)
, mask(1 << (31 - (bit_index % 32)))
{
ACL_ASSERT(desc_.is_bit_index_valid(bit_index), "Invalid bit index: %d", bit_index);
}
bitset_description desc;
uint32_t offset;
uint32_t mask;
};
////////////////////////////////////////////////////////////////////////////////
// Resets the entire bit set to the provided value.
inline void bitset_reset(uint32_t* bitset, bitset_description desc, bool value)
{
const uint32_t mask = value ? 0xFFFFFFFF : 0x00000000;
const uint32_t size = desc.get_size();
for (uint32_t offset = 0; offset < size; ++offset)
bitset[offset] = mask;
}
////////////////////////////////////////////////////////////////////////////////
// Sets a specific bit to its desired value.
inline void bitset_set(uint32_t* bitset, bitset_description desc, uint32_t bit_index, bool value)
{
ACL_ASSERT(desc.is_bit_index_valid(bit_index), "Invalid bit index: %d", bit_index);
(void)desc;
const uint32_t offset = bit_index / 32;
const uint32_t mask = 1 << (31 - (bit_index % 32));
if (value)
bitset[offset] |= mask;
else
bitset[offset] &= ~mask;
}
////////////////////////////////////////////////////////////////////////////////
// Sets a specific bit to its desired value.
inline void bitset_set(uint32_t* bitset, const bitset_index_ref& ref, bool value)
{
if (value)
bitset[ref.offset] |= ref.mask;
else
bitset[ref.offset] &= ~ref.mask;
}
////////////////////////////////////////////////////////////////////////////////
// Sets a specified range of bits to a specified value.
inline void bitset_set_range(uint32_t* bitset, bitset_description desc, uint32_t start_bit_index, uint32_t num_bits, bool value)
{
ACL_ASSERT(desc.is_bit_index_valid(start_bit_index), "Invalid start bit index: %d", start_bit_index);
ACL_ASSERT(start_bit_index + num_bits <= desc.get_num_bits(), "Invalid num bits: %d > %d", start_bit_index + num_bits, desc.get_num_bits());
const uint32_t end_bit_offset = start_bit_index + num_bits;
for (uint32_t offset = start_bit_index; offset < end_bit_offset; ++offset)
bitset_set(bitset, desc, offset, value);
}
////////////////////////////////////////////////////////////////////////////////
// Returns the bit value as a specific index.
inline bool bitset_test(const uint32_t* bitset, bitset_description desc, uint32_t bit_index)
{
ACL_ASSERT(desc.is_bit_index_valid(bit_index), "Invalid bit index: %d", bit_index);
(void)desc;
const uint32_t offset = bit_index / 32;
const uint32_t mask = 1 << (31 - (bit_index % 32));
return (bitset[offset] & mask) != 0;
}
////////////////////////////////////////////////////////////////////////////////
// Returns the bit value as a specific index.
inline bool bitset_test(const uint32_t* bitset, const bitset_index_ref& ref)
{
return (bitset[ref.offset] & ref.mask) != 0;
}
////////////////////////////////////////////////////////////////////////////////
// Counts the total number of set (true) bits within the bit set.
inline uint32_t bitset_count_set_bits(const uint32_t* bitset, bitset_description desc)
{
const uint32_t size = desc.get_size();
// TODO: Optimize for NEON by using the intrinsic directly and unrolling the loop to
// reduce the number of pairwise add instructions.
uint32_t num_set_bits = 0;
for (uint32_t offset = 0; offset < size; ++offset)
num_set_bits += count_set_bits(bitset[offset]);
return num_set_bits;
}
//////////////////////////////////////////////////////////////////////////
// Performs the operation: result = ~not_value & and_value
// Bit sets must have the same description
// Bit sets can alias
inline void bitset_and_not(uint32_t* bitset_result, const uint32_t* bitset_not_value, const uint32_t* bitset_and_value, bitset_description desc)
{
const uint32_t size = desc.get_size();
for (uint32_t offset = 0; offset < size; ++offset)
bitset_result[offset] = and_not(bitset_not_value[offset], bitset_and_value[offset]);
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,50 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2020 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// This enum represents raw buffer identification tags
enum class buffer_tag32 : uint32_t
{
//////////////////////////////////////////////////////////////////////////
// Identifies a 'CompressedClip' buffer.
// Deprecated, no longer used. Belonged to pre-2.0 file format.
compressed_clip = 0xac10ac10,
//////////////////////////////////////////////////////////////////////////
// Identifies a 'compressed_tracks' buffer.
compressed_tracks = 0xac11ac11,
};
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,160 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/algorithm_types.h"
#include "acl/core/buffer_tag.h"
#include "acl/core/compressed_tracks_version.h"
#include "acl/core/error_result.h"
#include "acl/core/hash.h"
#include "acl/core/track_desc.h"
#include "acl/core/track_types.h"
#include "acl/core/utils.h"
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/impl/compressed_headers.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
////////////////////////////////////////////////////////////////////////////////
// An instance of a compressed tracks.
// The compressed data immediately follows this instance in memory.
// The total size of the buffer can be queried with `get_size()`.
////////////////////////////////////////////////////////////////////////////////
class alignas(16) compressed_tracks final
{
public:
////////////////////////////////////////////////////////////////////////////////
// Returns the algorithm type used to compress the tracks.
algorithm_type8 get_algorithm_type() const;
////////////////////////////////////////////////////////////////////////////////
// Returns the size in bytes of the compressed tracks.
// Includes the 'compressed_tracks' instance size.
uint32_t get_size() const { return m_buffer_header.size; }
//////////////////////////////////////////////////////////////////////////
// Returns the hash for the compressed tracks.
// This is only used for sanity checking in case of memory corruption.
uint32_t get_hash() const { return m_buffer_header.hash; }
//////////////////////////////////////////////////////////////////////////
// Returns the binary tag for the compressed tracks.
// This uniquely identifies the buffer as a proper 'compressed_tracks' object.
buffer_tag32 get_tag() const;
//////////////////////////////////////////////////////////////////////////
// Returns the binary format version.
compressed_tracks_version16 get_version() const;
//////////////////////////////////////////////////////////////////////////
// Returns the number of tracks contained.
uint32_t get_num_tracks() const;
//////////////////////////////////////////////////////////////////////////
// Returns the number of samples each track contains.
uint32_t get_num_samples_per_track() const;
//////////////////////////////////////////////////////////////////////////
// Returns the type of the compressed tracks.
track_type8 get_track_type() const;
//////////////////////////////////////////////////////////////////////////
// Returns the duration of each track.
float get_duration() const;
//////////////////////////////////////////////////////////////////////////
// Returns the sample rate used by each track.
float get_sample_rate() const;
//////////////////////////////////////////////////////////////////////////
// Returns the track list name if metadata is present, nullptr otherwise.
const char* get_name() const;
//////////////////////////////////////////////////////////////////////////
// Returns the track name for the specified track index if metadata is present, k_invalid_track_index otherwise.
const char* get_track_name(uint32_t track_index) const;
//////////////////////////////////////////////////////////////////////////
// Returns the parent track index for the specified track index if metadata is present, nullptr otherwise.
uint32_t get_parent_track_index(uint32_t track_index) const;
//////////////////////////////////////////////////////////////////////////
// Returns the track description for the specified track index if metadata is present.
// Returns true on success, false otherwise.
bool get_track_description(uint32_t track_index, track_desc_scalarf& out_description) const;
bool get_track_description(uint32_t track_index, track_desc_transformf& out_description) const;
////////////////////////////////////////////////////////////////////////////////
// Returns true if the compressed tracks are valid and usable.
// This mainly validates some invariants as well as ensuring that the
// memory has not been corrupted.
//
// check_hash: If true, the compressed tracks hash will also be compared.
error_result is_valid(bool check_hash) const;
private:
////////////////////////////////////////////////////////////////////////////////
// Hide everything
compressed_tracks() = delete;
compressed_tracks(const compressed_tracks&) = delete;
compressed_tracks(compressed_tracks&&) = delete;
compressed_tracks* operator=(const compressed_tracks&) = delete;
compressed_tracks* operator=(compressed_tracks&&) = delete;
////////////////////////////////////////////////////////////////////////////////
// Raw buffer header that isn't included in the hash.
////////////////////////////////////////////////////////////////////////////////
acl_impl::raw_buffer_header m_buffer_header;
////////////////////////////////////////////////////////////////////////////////
// Everything starting here is included in the hash.
////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
// Compressed data follows here in memory.
//////////////////////////////////////////////////////////////////////////
// Here we define some unspecified padding but the 'tracks_header' starts here.
// This is done to ensure that this class is 16 byte aligned without requiring further padding
// if the 'tracks_header' ends up causing us to be unaligned.
uint32_t m_padding[2];
};
//////////////////////////////////////////////////////////////////////////
// Create a compressed_tracks instance in place from a raw memory buffer.
// If the buffer does not contain a valid compressed_tracks instance, nullptr is returned
// along with an optional error result.
//////////////////////////////////////////////////////////////////////////
const compressed_tracks* make_compressed_tracks(const void* buffer, error_result* out_error_result = nullptr);
}
#include "acl/core/impl/compressed_tracks.impl.h"
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,76 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2020 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// Enum for versions used by 'compressed_tracks'.
// These values are used by serialization, do not change them once assigned.
// To add a new version, create a new entry and follow the naming and numbering scheme
// described below.
enum class compressed_tracks_version16 : uint16_t
{
//////////////////////////////////////////////////////////////////////////
// Special version identifier used when decompressing.
// This indicates that any version is supported by decompression and
// the code isn't optimized for any one version in particular.
// It is not a valid value for compressed tracks.
any = 0,
//////////////////////////////////////////////////////////////////////////
// Actual versions in sequential order.
// Keep the enum values sequential.
// Enum value name should be of the form: major, minor, patch version.
// Two digits are reserved for safety and future proofing.
//v00_01_00 = 0, // ACL v0.1.0
//v00_06_00 = 1, // ACL v0.6.0
//v00_08_00 = 2, // ACL v0.8.0
//v01_01_00 = 3, // ACL v1.1.0
//v01_02_00 = 4, // ACL v1.2.0
//v01_03_00 = 5, // ACL v1.3.0
//v01_99_99 = 6, // ACL v2.0.0-wip
v02_00_00 = 7, // ACL v2.0.0
//////////////////////////////////////////////////////////////////////////
// First version marker, this is equal to the first version supported: ACL 2.0.0
// Versions prior to ACL 2.0 are not backwards compatible.
// It is used for range checks.
first = v02_00_00,
//////////////////////////////////////////////////////////////////////////
// Always assigned to the latest version supported.
latest = v02_00_00,
};
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,100 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include <type_traits>
ACL_IMPL_FILE_PRAGMA_PUSH
// This macro defines common operators for manipulating bit flags
#define ACL_IMPL_ENUM_FLAGS_OPERATORS(EnumType) \
constexpr EnumType operator|(EnumType lhs, EnumType rhs) \
{ \
typedef typename std::underlying_type<EnumType>::type IntegralType; \
typedef typename std::make_unsigned<IntegralType>::type RawType; \
return static_cast<EnumType>(static_cast<RawType>(lhs) | static_cast<RawType>(rhs)); \
} \
inline void operator|=(EnumType& lhs, EnumType rhs) \
{ \
typedef typename std::underlying_type<EnumType>::type IntegralType; \
typedef typename std::make_unsigned<IntegralType>::type RawType; \
lhs = static_cast<EnumType>(static_cast<RawType>(lhs) | static_cast<RawType>(rhs)); \
} \
constexpr EnumType operator&(EnumType lhs, EnumType rhs) \
{ \
typedef typename std::underlying_type<EnumType>::type IntegralType; \
typedef typename std::make_unsigned<IntegralType>::type RawType; \
return static_cast<EnumType>(static_cast<RawType>(lhs) & static_cast<RawType>(rhs)); \
} \
inline void operator&=(EnumType& lhs, EnumType rhs) \
{ \
typedef typename std::underlying_type<EnumType>::type IntegralType; \
typedef typename std::make_unsigned<IntegralType>::type RawType; \
lhs = static_cast<EnumType>(static_cast<RawType>(lhs) & static_cast<RawType>(rhs)); \
} \
constexpr EnumType operator^(EnumType lhs, EnumType rhs) \
{ \
typedef typename std::underlying_type<EnumType>::type IntegralType; \
typedef typename std::make_unsigned<IntegralType>::type RawType; \
return static_cast<EnumType>(static_cast<RawType>(lhs) ^ static_cast<RawType>(rhs)); \
} \
inline void operator^=(EnumType& lhs, EnumType rhs) \
{ \
typedef typename std::underlying_type<EnumType>::type IntegralType; \
typedef typename std::make_unsigned<IntegralType>::type RawType; \
lhs = static_cast<EnumType>(static_cast<RawType>(lhs) ^ static_cast<RawType>(rhs)); \
} \
constexpr EnumType operator~(EnumType rhs) \
{ \
typedef typename std::underlying_type<EnumType>::type IntegralType; \
typedef typename std::make_unsigned<IntegralType>::type RawType; \
return static_cast<EnumType>(~static_cast<RawType>(rhs)); \
}
namespace acl
{
////////////////////////////////////////////////////////////////////////////////
// Returns true if any of the requested flags are set.
template<typename EnumType>
constexpr bool are_any_enum_flags_set(EnumType flags, EnumType flags_to_test)
{
typedef typename std::underlying_type<EnumType>::type IntegralType;
return static_cast<IntegralType>(flags & flags_to_test) != 0;
}
////////////////////////////////////////////////////////////////////////////////
// Returns true if all of the requested flags are set.
template<typename EnumType>
constexpr bool are_all_enum_flags_set(EnumType flags, EnumType flags_to_test)
{
typedef typename std::underlying_type<EnumType>::type IntegralType;
return static_cast<IntegralType>(flags & flags_to_test) == static_cast<IntegralType>(flags_to_test);
}
}
ACL_IMPL_FILE_PRAGMA_POP

180
sources/acl/core/error.h Normal file
View File

@ -0,0 +1,180 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
ACL_IMPL_FILE_PRAGMA_PUSH
//////////////////////////////////////////////////////////////////////////
// This library uses a simple system to handle asserts. Asserts are fatal and must terminate
// otherwise the behavior is undefined if execution continues.
//
// A total of 4 behaviors are supported:
// - We can print to stderr and abort
// - We can throw and exception
// - We can call a custom function
// - Do nothing and strip the check at compile time (default behavior)
//
// Aborting:
// In order to enable the aborting behavior, simply define the macro ACL_ON_ASSERT_ABORT:
// #define ACL_ON_ASSERT_ABORT
//
// Throwing:
// In order to enable the throwing behavior, simply define the macro ACL_ON_ASSERT_THROW:
// #define ACL_ON_ASSERT_THROW
// Note that the type of the exception thrown is std::runtime_error.
//
// Custom function:
// In order to enable the custom function calling behavior, define the macro ACL_ON_ASSERT_CUSTOM
// with the name of the function to call:
// #define ACL_ON_ASSERT_CUSTOM on_custom_assert_impl
// Note that the function signature is as follow:
// void on_custom_assert_impl(const char* expression, int line, const char* file, const char* format, ...) {}
//
// You can also define your own assert implementation by defining the ACL_ASSERT macro as well:
// #define ACL_ON_ASSERT_CUSTOM
// #define ACL_ASSERT(expression, format, ...) checkf(expression, ANSI_TO_TCHAR(format), #__VA_ARGS__)
//
// No checks:
// By default if no macro mentioned above is defined, all asserts will be stripped
// at compile time.
//////////////////////////////////////////////////////////////////////////
#if defined(ACL_ON_ASSERT_ABORT)
#include <cstdio>
#include <cstdarg>
#include <cstdlib>
namespace acl
{
namespace error_impl
{
inline void on_assert_abort(const char* expression, int line, const char* file, const char* format, ...)
{
(void)expression;
(void)line;
(void)file;
va_list args;
va_start(args, format);
std::vfprintf(stderr, format, args);
std::fprintf(stderr, "\n");
va_end(args);
std::abort();
}
}
}
#define ACL_ASSERT(expression, format, ...) if (!(expression)) acl::error_impl::on_assert_abort(#expression, __LINE__, __FILE__, (format), ## __VA_ARGS__)
#define ACL_HAS_ASSERT_CHECKS
#define ACL_NO_EXCEPT noexcept
#elif defined(ACL_ON_ASSERT_THROW)
#include <cstdio>
#include <cstdarg>
#include <string>
#include <stdexcept>
namespace acl
{
class runtime_assert final : public std::runtime_error
{
public:
explicit runtime_assert(const std::string& message) : std::runtime_error(message.c_str()) {}
explicit runtime_assert(const char* message) : std::runtime_error(message) {}
};
namespace error_impl
{
inline void on_assert_throw(const char* expression, int line, const char* file, const char* format, ...)
{
(void)expression;
(void)line;
(void)file;
constexpr int buffer_size = 64 * 1024;
char buffer[buffer_size];
va_list args;
va_start(args, format);
const int count = vsnprintf(buffer, buffer_size, format, args);
va_end(args);
if (count >= 0 && count < buffer_size)
throw runtime_assert(std::string(&buffer[0], count));
else
throw runtime_assert("Failed to format assert message!\n");
}
}
}
#define ACL_ASSERT(expression, format, ...) if (!(expression)) acl::error_impl::on_assert_throw(#expression, __LINE__, __FILE__, (format), ## __VA_ARGS__)
#define ACL_HAS_ASSERT_CHECKS
#define ACL_NO_EXCEPT
#elif defined(ACL_ON_ASSERT_CUSTOM)
#if !defined(ACL_ASSERT)
#define ACL_ASSERT(expression, format, ...) if (!(expression)) ACL_ON_ASSERT_CUSTOM(#expression, __LINE__, __FILE__, (format), ## __VA_ARGS__)
#endif
#define ACL_HAS_ASSERT_CHECKS
#define ACL_NO_EXCEPT
#else
#define ACL_ASSERT(expression, format, ...) ((void)0)
#define ACL_NO_EXCEPT noexcept
#endif
//////////////////////////////////////////////////////////////////////////
// Allow deprecation support
#if defined(__has_cpp_attribute) && __cplusplus >= 201402L
#if __has_cpp_attribute(deprecated)
#define ACL_DEPRECATED(msg) [[deprecated(msg)]]
#endif
#endif
#if !defined(ACL_DEPRECATED)
#if defined(__GNUC__) || defined(__clang__)
#define ACL_DEPRECATED(msg) __attribute__((deprecated))
#elif defined(_MSC_VER)
#define ACL_DEPRECATED(msg) __declspec(deprecated)
#else
#define ACL_DEPRECATED(msg)
#endif
#endif
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,56 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2018 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// This class is meant to encapsulate a simple error message and easily
// coerce with 'bool' in order to simplify and clean up error handling.
// In an ideal world, I would love for a C++11 equivalent of std::result in Rust.
//////////////////////////////////////////////////////////////////////////
class error_result
{
public:
error_result() : m_error(nullptr) {}
explicit error_result(const char* error) : m_error(error) {}
bool empty() const { return m_error == nullptr; }
bool any() const { return m_error != nullptr; }
const char* c_str() const { return m_error != nullptr ? m_error : ""; }
void reset() { m_error = nullptr; }
private:
const char* m_error;
};
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,152 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include <rtm/math.h>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// Stores the floating point environment information.
//////////////////////////////////////////////////////////////////////////
struct fp_environment
{
#if defined(RTM_SSE2_INTRINSICS)
unsigned int exception_mask;
#elif defined(RTM_NEON_INTRINSICS)
// TODO: Implement on ARM. API to do this is not consistent across Android, Windows ARM, and iOS
// and on top of it, most ARM CPUs out there do not raise the SIGFPE trap so they are silent
#endif
};
//////////////////////////////////////////////////////////////////////////
// Enables floating point exceptions for invalid operations, division by zero, and overflow.
//////////////////////////////////////////////////////////////////////////
inline void enable_fp_exceptions(fp_environment& out_old_env)
{
#if defined(RTM_SSE2_INTRINSICS)
// We only care about SSE and not x87
// Clear any exceptions that might have been raised already
_MM_SET_EXCEPTION_STATE(0);
// Cache the exception mask we had so we can restore it later
out_old_env.exception_mask = _MM_GET_EXCEPTION_MASK();
// Enable our exceptions
const unsigned int exception_flags = _MM_MASK_INVALID | _MM_MASK_DIV_ZERO | _MM_MASK_OVERFLOW;
_MM_SET_EXCEPTION_MASK(~exception_flags & _MM_MASK_MASK);
#else
(void)out_old_env;
#endif
}
//////////////////////////////////////////////////////////////////////////
// Disables all floating point exceptions.
//////////////////////////////////////////////////////////////////////////
inline void disable_fp_exceptions(fp_environment& out_old_env)
{
#if defined(RTM_SSE2_INTRINSICS)
// We only care about SSE and not x87
// Cache the exception mask we had so we can restore it later
out_old_env.exception_mask = _MM_GET_EXCEPTION_MASK();
// Disable all exceptions
_MM_SET_EXCEPTION_MASK(_MM_MASK_MASK);
#else
(void)out_old_env;
#endif
}
//////////////////////////////////////////////////////////////////////////
// Restores a previously set floating point environment.
//////////////////////////////////////////////////////////////////////////
inline void restore_fp_exceptions(const fp_environment& env)
{
#if defined(RTM_SSE2_INTRINSICS)
// We only care about SSE and not x87
// Clear any exceptions that might have been raised already
_MM_SET_EXCEPTION_STATE(0);
// Restore our old mask value
_MM_SET_EXCEPTION_MASK(env.exception_mask);
#else
(void)env;
#endif
}
//////////////////////////////////////////////////////////////////////////
// Enables floating point exceptions in the parent scope for invalid operations, division by zero, and overflow.
//////////////////////////////////////////////////////////////////////////
class scope_enable_fp_exceptions
{
public:
scope_enable_fp_exceptions()
{
enable_fp_exceptions(env);
}
~scope_enable_fp_exceptions()
{
restore_fp_exceptions(env);
}
private:
// Prevent copy or move
scope_enable_fp_exceptions(const scope_enable_fp_exceptions&) = delete;
scope_enable_fp_exceptions(scope_enable_fp_exceptions&&) = delete;
scope_enable_fp_exceptions& operator=(const scope_enable_fp_exceptions&) = delete;
scope_enable_fp_exceptions& operator=(scope_enable_fp_exceptions&&) = delete;
fp_environment env;
};
//////////////////////////////////////////////////////////////////////////
// Disables all floating point exceptions in the parent scope.
//////////////////////////////////////////////////////////////////////////
class scope_disable_fp_exceptions
{
public:
scope_disable_fp_exceptions()
{
disable_fp_exceptions(env);
}
~scope_disable_fp_exceptions()
{
restore_fp_exceptions(env);
}
private:
// Prevent copy or move
scope_disable_fp_exceptions(const scope_disable_fp_exceptions&) = delete;
scope_disable_fp_exceptions(scope_disable_fp_exceptions&&) = delete;
scope_disable_fp_exceptions& operator=(const scope_disable_fp_exceptions&) = delete;
scope_disable_fp_exceptions& operator=(scope_disable_fp_exceptions&&) = delete;
fp_environment env;
};
}
ACL_IMPL_FILE_PRAGMA_POP

133
sources/acl/core/hash.h Normal file
View File

@ -0,0 +1,133 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include <cstdint>
#include <cstring>
#include <type_traits>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace hash_impl
{
////////////////////////////////////////////////////////////////////////////////
// FNV 1a hash implementation for 32/64 bit hashes.
////////////////////////////////////////////////////////////////////////////////
template <typename ResultType, ResultType OffsetBasis, ResultType Prime>
class fnv1a_impl final
{
public:
////////////////////////////////////////////////////////////////////////////////
// Constructs a hash structures and initializes it with the offset basis.
constexpr fnv1a_impl()
: m_state(OffsetBasis)
{}
////////////////////////////////////////////////////////////////////////////////
// Updates the current hash with the new provided data.
//
// data: A pointer to the memory buffer to hash.
// size: The memory buffer size in bytes to hash.
void update(const void* data, size_t size)
{
const uint8_t* cdata = static_cast<const uint8_t*>(data);
ResultType acc = m_state;
for (size_t i = 0; i < size; ++i)
{
const ResultType next = cdata[i];
acc = (acc ^ next) * Prime;
}
m_state = acc;
}
////////////////////////////////////////////////////////////////////////////////
// Returns the current hash digest value.
constexpr ResultType digest() const { return m_state; }
private:
static_assert(std::is_unsigned<ResultType>::value, "need unsigned integer");
// The running hash digest value.
ResultType m_state;
};
}
////////////////////////////////////////////////////////////////////////////////
// A 32 bit hash instance type
using fnv1a_32 = hash_impl::fnv1a_impl<uint32_t, 2166136261U, 16777619U>;
////////////////////////////////////////////////////////////////////////////////
// A 64 bit hash instance type
using fnv1a_64 = hash_impl::fnv1a_impl<uint64_t, 14695981039346656037ULL, 1099511628211ULL>;
////////////////////////////////////////////////////////////////////////////////
// Returns the 32 bit hash of the provided buffer and size in bytes.
inline uint32_t hash32(const void* buffer, size_t buffer_size)
{
fnv1a_32 hashfn = fnv1a_32();
hashfn.update(buffer, buffer_size);
return hashfn.digest();
}
////////////////////////////////////////////////////////////////////////////////
// Returns the 32 bit hash of the provided element.
template<typename ElementType>
inline uint32_t hash32(const ElementType& element) { return hash32(&element, sizeof(ElementType)); }
////////////////////////////////////////////////////////////////////////////////
// Returns the 32 bit hash of the provided string.
// The null terminator not included in the hash.
inline uint32_t hash32(const char* str) { return hash32(str, std::strlen(str)); }
////////////////////////////////////////////////////////////////////////////////
// Returns the 64 bit hash of the provided buffer and size in bytes.
inline uint64_t hash64(const void* buffer, size_t buffer_size)
{
fnv1a_64 hashfn = fnv1a_64();
hashfn.update(buffer, buffer_size);
return hashfn.digest();
}
////////////////////////////////////////////////////////////////////////////////
// Returns the 64 bit hash of the provided element.
template<typename ElementType>
inline uint64_t hash64(const ElementType& element) { return hash64(&element, sizeof(ElementType)); }
////////////////////////////////////////////////////////////////////////////////
// Returns the 64 bit hash of the provided string.
// The null terminator not included in the hash.
inline uint64_t hash64(const char* str) { return hash64(str, std::strlen(str)); }
////////////////////////////////////////////////////////////////////////////////
// Combines two hashes into a new one.
inline uint32_t hash_combine(uint32_t hash_a, uint32_t hash_b) { return (hash_a ^ hash_b) * 16777619U; }
inline uint64_t hash_combine(uint64_t hash_a, uint64_t hash_b) { return (hash_a ^ hash_b) * 1099511628211ULL; }
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,146 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2018 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/error.h"
#include "acl/core/memory_utils.h"
#include "acl/core/impl/compiler_utils.h"
#include <type_traits>
#include <utility>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
////////////////////////////////////////////////////////////////////////////////
// A simple memory allocator interface.
//
// In order to integrate this library into your own code base, you will
// need to provide some functions with an allocator instance that derives
// from this interface.
//
// See ansi_allocator.h for an implementation that uses the system malloc/free.
////////////////////////////////////////////////////////////////////////////////
class iallocator
{
public:
static constexpr size_t k_default_alignment = 16;
iallocator() {}
virtual ~iallocator() {}
iallocator(const iallocator&) = delete;
iallocator& operator=(const iallocator&) = delete;
////////////////////////////////////////////////////////////////////////////////
// Allocates memory with the specified size and alignment.
//
// size: Size in bytes to allocate.
// alignment: Alignment to allocate the memory with.
virtual void* allocate(size_t size, size_t alignment = k_default_alignment) = 0;
////////////////////////////////////////////////////////////////////////////////
// Deallocates previously allocated memory and releases it.
//
// ptr: A pointer to memory previously allocated or nullptr.
// size: Size in bytes of the allocation. This will match the original size requested through 'allocate'.
virtual void deallocate(void* ptr, size_t size) = 0;
};
//////////////////////////////////////////////////////////////////////////
template<typename allocated_type, typename... args>
allocated_type* allocate_type(iallocator& allocator, args&&... arguments)
{
allocated_type* ptr = reinterpret_cast<allocated_type*>(allocator.allocate(sizeof(allocated_type), alignof(allocated_type)));
if (acl_impl::is_trivially_default_constructible<allocated_type>::value)
return ptr;
return new(ptr) allocated_type(std::forward<args>(arguments)...);
}
template<typename allocated_type, typename... args>
allocated_type* allocate_type_aligned(iallocator& allocator, size_t alignment, args&&... arguments)
{
ACL_ASSERT(is_alignment_valid<allocated_type>(alignment), "Invalid alignment: %u. Expected a power of two at least equal to %u", alignment, alignof(allocated_type));
allocated_type* ptr = reinterpret_cast<allocated_type*>(allocator.allocate(sizeof(allocated_type), alignment));
if (acl_impl::is_trivially_default_constructible<allocated_type>::value)
return ptr;
return new(ptr) allocated_type(std::forward<args>(arguments)...);
}
template<typename allocated_type>
void deallocate_type(iallocator& allocator, allocated_type* ptr)
{
if (ptr == nullptr)
return;
if (!std::is_trivially_destructible<allocated_type>::value)
ptr->~allocated_type();
allocator.deallocate(ptr, sizeof(allocated_type));
}
template<typename allocated_type, typename... args>
allocated_type* allocate_type_array(iallocator& allocator, size_t num_elements, args&&... arguments)
{
allocated_type* ptr = reinterpret_cast<allocated_type*>(allocator.allocate(sizeof(allocated_type) * num_elements, alignof(allocated_type)));
if (acl_impl::is_trivially_default_constructible<allocated_type>::value)
return ptr;
for (size_t element_index = 0; element_index < num_elements; ++element_index)
new(&ptr[element_index]) allocated_type(std::forward<args>(arguments)...);
return ptr;
}
template<typename allocated_type, typename... args>
allocated_type* allocate_type_array_aligned(iallocator& allocator, size_t num_elements, size_t alignment, args&&... arguments)
{
ACL_ASSERT(is_alignment_valid<allocated_type>(alignment), "Invalid alignment: %zu. Expected a power of two at least equal to %zu", alignment, alignof(allocated_type));
allocated_type* ptr = reinterpret_cast<allocated_type*>(allocator.allocate(sizeof(allocated_type) * num_elements, alignment));
if (acl_impl::is_trivially_default_constructible<allocated_type>::value)
return ptr;
for (size_t element_index = 0; element_index < num_elements; ++element_index)
new(&ptr[element_index]) allocated_type(std::forward<args>(arguments)...);
return ptr;
}
template<typename allocated_type>
void deallocate_type_array(iallocator& allocator, allocated_type* elements, size_t num_elements)
{
if (elements == nullptr)
return;
if (!std::is_trivially_destructible<allocated_type>::value)
{
for (size_t element_index = 0; element_index < num_elements; ++element_index)
elements[element_index].~allocated_type();
}
allocator.deallocate(elements, sizeof(allocated_type) * num_elements);
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,134 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2018 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include <type_traits>
#if !defined(__GNUG__) || defined(_LIBCPP_VERSION) || defined(_GLIBCXX_USE_CXX11_ABI)
#include <cstdlib>
#else
#include <stdlib.h>
#endif
//////////////////////////////////////////////////////////////////////////
// Macro to identify GCC
//////////////////////////////////////////////////////////////////////////
#if defined(__GNUG__) && !defined(__clang__)
#define ACL_COMPILER_GCC
#endif
//////////////////////////////////////////////////////////////////////////
// Macro to identify Clang
//////////////////////////////////////////////////////////////////////////
#if defined(__clang__)
#define ACL_COMPILER_CLANG
#endif
//////////////////////////////////////////////////////////////////////////
// Macro to identify MSVC
//////////////////////////////////////////////////////////////////////////
#if defined(_MSC_VER) && !defined(__clang__)
#define ACL_COMPILER_MSVC
#endif
//////////////////////////////////////////////////////////////////////////
// Because this library is made entirely of headers, we have no control over the
// compilation flags used. However, in some cases, certain options must be forced.
// To do this, every header is wrapped in two macros to push and pop the necessary
// pragmas.
//////////////////////////////////////////////////////////////////////////
#if defined(ACL_COMPILER_MSVC)
#define ACL_IMPL_FILE_PRAGMA_PUSH \
/* Disable fast math, it can hurt precision for little to no performance gain due to the high level of hand tuned optimizations. */ \
__pragma(float_control(precise, on, push))
#define ACL_IMPL_FILE_PRAGMA_POP \
__pragma(float_control(pop))
#else
#define ACL_IMPL_FILE_PRAGMA_PUSH
#define ACL_IMPL_FILE_PRAGMA_POP
#endif
//////////////////////////////////////////////////////////////////////////
// In some cases, for performance reasons, we wish to disable stack security
// check cookies. This macro serves this purpose.
//////////////////////////////////////////////////////////////////////////
#if defined(ACL_COMPILER_MSVC)
#define ACL_DISABLE_SECURITY_COOKIE_CHECK __declspec(safebuffers)
#else
#define ACL_DISABLE_SECURITY_COOKIE_CHECK
#endif
//////////////////////////////////////////////////////////////////////////
// Force inline macros for when it is necessary.
//////////////////////////////////////////////////////////////////////////
#if defined(ACL_COMPILER_MSVC)
#define ACL_FORCE_INLINE __forceinline
#elif defined(ACL_COMPILER_GCC) || defined(ACL_COMPILER_CLANG)
#define ACL_FORCE_INLINE __attribute__((always_inline)) inline
#else
#define ACL_FORCE_INLINE inline
#endif
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// The version of the STL shipped with versions of GCC older than 5.1 are missing a number of type traits and functions,
// such as std::is_trivially_default_constructible.
// In this case, we polyfill the proper standard names using the deprecated std::has_trivial_default_constructor.
// This must also be done when the compiler is clang when it makes use of the GCC implementation of the STL,
// which is the default behavior on linux. Properly detecting the version of the GCC STL used by clang cannot
// be done with the __GNUC__ macro, which are overridden by clang. Instead, we check for the definition
// of the macro ``_GLIBCXX_USE_CXX11_ABI`` which is only defined with GCC versions greater than 5.
//////////////////////////////////////////////////////////////////////////
namespace acl_impl
{
#if !defined(__GNUG__) || defined(_LIBCPP_VERSION) || defined(_GLIBCXX_USE_CXX11_ABI)
using std::strtoull;
template <class Type>
using is_trivially_default_constructible = std::is_trivially_default_constructible<Type>;
#else
using ::strtoull;
template <class Type>
using is_trivially_default_constructible = std::has_trivial_default_constructor<Type>;
#endif
}
}
//////////////////////////////////////////////////////////////////////////
// Silence compiler warnings within switch cases that fall through
// Note: C++17 has [[fallthrough]];
//////////////////////////////////////////////////////////////////////////
#if defined(ACL_COMPILER_GCC) || defined(ACL_COMPILER_CLANG)
#if defined(__has_attribute) && __has_attribute(fallthrough)
#define ACL_SWITCH_CASE_FALLTHROUGH_INTENTIONAL __attribute__ ((fallthrough))
#else
#define ACL_SWITCH_CASE_FALLTHROUGH_INTENTIONAL (void)0
#endif
#else
#define ACL_SWITCH_CASE_FALLTHROUGH_INTENTIONAL (void)0
#endif

View File

@ -0,0 +1,299 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/algorithm_types.h"
#include "acl/core/compressed_tracks_version.h"
#include "acl/core/ptr_offset.h"
#include "acl/core/range_reduction_types.h"
#include "acl/core/track_formats.h"
#include "acl/core/track_types.h"
#include "acl/core/impl/compiler_utils.h"
#include <cstdint>
// This is a bit slower because of the added bookkeeping when we unpack
//#define ACL_IMPL_USE_CONSTANT_GROUPS
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
class compressed_tracks;
namespace acl_impl
{
// Common header to all binary formats
struct raw_buffer_header
{
// Total size in bytes of the raw buffer.
uint32_t size;
// Hash of the raw buffer.
uint32_t hash;
};
// Header for 'compressed_tracks'
struct tracks_header
{
// Serialization tag used to distinguish raw buffer types.
uint32_t tag;
// Serialization version used to compress the tracks.
compressed_tracks_version16 version;
// Algorithm type used to compress the tracks.
algorithm_type8 algorithm_type;
// Type of the tracks contained in this compressed stream.
track_type8 track_type;
// The total number of tracks.
uint32_t num_tracks;
// The total number of samples per track.
uint32_t num_samples;
// The sample rate our tracks use.
float sample_rate; // TODO: Store duration as float instead?
// Miscellaneous packed values
uint32_t misc_packed;
//////////////////////////////////////////////////////////////////////////
// Accessors for 'misc_packed'
// Scalar tracks use it like this (listed from LSB):
// Bits [0, 31): unused (31 bits)
// Bit [31, 32): has metadata?
// Transform tracks use it like this (listed from LSB):
// Bit 0: has scale?
// Bit 1: default scale: 0,0,0 or 1,1,1 (bool/bit)
// Bit 2: scale format
// Bit 3: translation format
// Bits [4, 8): rotation format (4 bits)
// Bits [8, 31): unused (23 bits)
// Bit [31, 32): has metadata?
rotation_format8 get_rotation_format() const { return static_cast<rotation_format8>((misc_packed >> 4) & 15); }
void set_rotation_format(rotation_format8 format) { misc_packed = (misc_packed & ~(15 << 4)) | (static_cast<uint32_t>(format) << 4); }
vector_format8 get_translation_format() const { return static_cast<vector_format8>((misc_packed >> 3) & 1); }
void set_translation_format(vector_format8 format) { misc_packed = (misc_packed & ~(1 << 3)) | (static_cast<uint32_t>(format) << 3); }
vector_format8 get_scale_format() const { return static_cast<vector_format8>((misc_packed >> 2) & 1); }
void set_scale_format(vector_format8 format) { misc_packed = (misc_packed & ~(1 << 2)) | (static_cast<uint32_t>(format) << 2); }
int32_t get_default_scale() const { return (misc_packed >> 1) & 1; }
void set_default_scale(uint32_t scale) { ACL_ASSERT(scale == 0 || scale == 1, "Invalid default scale"); misc_packed = (misc_packed & ~(1 << 1)) | (scale << 1); }
bool get_has_scale() const { return (misc_packed & 1) != 0; }
void set_has_scale(bool has_scale) { misc_packed = (misc_packed & ~1) | static_cast<uint32_t>(has_scale); }
bool get_has_metadata() const { return (misc_packed >> 31) != 0; }
void set_has_metadata(bool has_metadata) { misc_packed = (misc_packed & ~(1 << 31)) | (static_cast<uint32_t>(has_metadata) << 31); }
};
// Scalar track metadata
struct track_metadata
{
uint8_t bit_rate;
};
// Header for scalar 'compressed_tracks'
struct scalar_tracks_header
{
// The number of bits used for a whole frame of data.
// The sum of one sample per track with all bit rates taken into account.
uint32_t num_bits_per_frame;
// Various data offsets relative to the start of this header.
ptr_offset32<track_metadata> metadata_per_track;
ptr_offset32<float> track_constant_values;
ptr_offset32<float> track_range_values;
ptr_offset32<uint8_t> track_animated_values;
//////////////////////////////////////////////////////////////////////////
track_metadata* get_track_metadata() { return metadata_per_track.add_to(this); }
const track_metadata* get_track_metadata() const { return metadata_per_track.add_to(this); }
float* get_track_constant_values() { return track_constant_values.add_to(this); }
const float* get_track_constant_values() const { return track_constant_values.add_to(this); }
float* get_track_range_values() { return track_range_values.add_to(this); }
const float* get_track_range_values() const { return track_range_values.add_to(this); }
uint8_t* get_track_animated_values() { return track_animated_values.add_to(this); }
const uint8_t* get_track_animated_values() const { return track_animated_values.add_to(this); }
};
////////////////////////////////////////////////////////////////////////////////
// A compressed clip segment header. Each segment is built from a uniform number
// of samples per track. A clip is split into one or more segments.
////////////////////////////////////////////////////////////////////////////////
struct segment_header
{
// Number of bits used by a fully animated pose (excludes default/constant tracks).
uint32_t animated_pose_bit_size;
// Offset to the animated segment data
// Segment data is partitioned as follows:
// - format per variable track (no alignment)
// - range data per variable track (only when more than one segment) (2 byte alignment)
// - track data sorted per sample then per track (4 byte alignment)
ptr_offset32<uint8_t> segment_data;
};
//////////////////////////////////////////////////////////////////////////
// A packed structure with metadata for animated groups.
//////////////////////////////////////////////////////////////////////////
struct animated_group_metadata
{
// Bits [0, 14): the group size
// Bits [14, 16): the group type
uint16_t metadata;
bool is_valid() const { return metadata != 0xFFFF; }
animation_track_type8 get_type() const { return static_cast<animation_track_type8>(metadata >> 6); }
void set_type(animation_track_type8 type) { metadata = (metadata & ~(3 << 6)) | static_cast<uint16_t>(type) << 6; }
uint32_t get_size() const { return static_cast<uint32_t>(metadata) & ((1 << 14) - 1); }
void set_size(uint32_t size) { ACL_ASSERT(size < (1 << 14), "Group size too large"); metadata = (metadata & ~((1 << 14) - 1)) | static_cast<uint16_t>(size); }
};
// Header for transform 'compressed_tracks'
struct transform_tracks_header
{
// The number of segments contained.
uint32_t num_segments;
// The number of animated rot/trans/scale tracks.
uint32_t num_animated_variable_sub_tracks; // Might be padded with dummy tracks for alignment
uint32_t num_animated_rotation_sub_tracks;
uint32_t num_animated_translation_sub_tracks;
uint32_t num_animated_scale_sub_tracks; // TODO: Not needed?
// The number of constant sub-track samples stored, does not include default samples
uint32_t num_constant_rotation_samples;
uint32_t num_constant_translation_samples;
uint32_t num_constant_scale_samples; // TODO: Not needed?
// Offset to the segment headers data.
ptr_offset32<segment_header> segment_headers_offset;
// Offsets to the default/constant tracks bitsets.
ptr_offset32<uint32_t> default_tracks_bitset_offset;
ptr_offset32<uint32_t> constant_tracks_bitset_offset;
// Offset to the constant tracks data.
ptr_offset32<uint8_t> constant_track_data_offset;
// Offset to the clip range data.
ptr_offset32<uint8_t> clip_range_data_offset; // TODO: Make this offset optional? Only present if normalized
// Offset to the animated group types. Ends with an invalid group type of 0xFF.
ptr_offset32<animation_track_type8> animated_group_types_offset;
//////////////////////////////////////////////////////////////////////////
// Utility functions that return pointers from their respective offsets.
uint32_t* get_segment_start_indices() { return num_segments > 1 ? add_offset_to_ptr<uint32_t>(this, align_to(sizeof(transform_tracks_header), 4)) : 0; }
const uint32_t* get_segment_start_indices() const { return num_segments > 1 ? add_offset_to_ptr<const uint32_t>(this, align_to(sizeof(transform_tracks_header), 4)) : 0; }
segment_header* get_segment_headers() { return segment_headers_offset.add_to(this); }
const segment_header* get_segment_headers() const { return segment_headers_offset.add_to(this); }
animation_track_type8* get_animated_group_types() { return animated_group_types_offset.add_to(this); }
const animation_track_type8* get_animated_group_types() const { return animated_group_types_offset.add_to(this); }
uint32_t* get_default_tracks_bitset() { return default_tracks_bitset_offset.add_to(this); }
const uint32_t* get_default_tracks_bitset() const { return default_tracks_bitset_offset.add_to(this); }
uint32_t* get_constant_tracks_bitset() { return constant_tracks_bitset_offset.add_to(this); }
const uint32_t* get_constant_tracks_bitset() const { return constant_tracks_bitset_offset.add_to(this); }
uint8_t* get_constant_track_data() { return constant_track_data_offset.safe_add_to(this); }
const uint8_t* get_constant_track_data() const { return constant_track_data_offset.safe_add_to(this); }
uint8_t* get_clip_range_data() { return clip_range_data_offset.safe_add_to(this); }
const uint8_t* get_clip_range_data() const { return clip_range_data_offset.safe_add_to(this); }
void get_segment_data(const segment_header& header, uint8_t*& out_format_per_track_data, uint8_t*& out_range_data, uint8_t*& out_animated_data)
{
uint8_t* segment_data = header.segment_data.add_to(this);
uint8_t* format_per_track_data = segment_data;
uint8_t* range_data = align_to(format_per_track_data + num_animated_variable_sub_tracks, 2);
const uint32_t range_data_size = num_segments > 1 ? (k_segment_range_reduction_num_bytes_per_component * 6 * num_animated_variable_sub_tracks) : 0;
uint8_t* animated_data = align_to(range_data + range_data_size, 4);
out_format_per_track_data = format_per_track_data;
out_range_data = range_data;
out_animated_data = animated_data;
}
void get_segment_data(const segment_header& header, const uint8_t*& out_format_per_track_data, const uint8_t*& out_range_data, const uint8_t*& out_animated_data) const
{
const uint8_t* segment_data = header.segment_data.add_to(this);
const uint8_t* format_per_track_data = segment_data;
const uint8_t* range_data = align_to(format_per_track_data + num_animated_variable_sub_tracks, 2);
const uint32_t range_data_size = num_segments > 1 ? (k_segment_range_reduction_num_bytes_per_component * 6 * num_animated_variable_sub_tracks) : 0;
const uint8_t* animated_data = align_to(range_data + range_data_size, 4);
out_format_per_track_data = format_per_track_data;
out_range_data = range_data;
out_animated_data = animated_data;
}
};
// Header for optional track metadata, must be at least 15 bytes
struct optional_metadata_header
{
ptr_offset32<char> track_list_name;
ptr_offset32<uint32_t> track_name_offsets;
ptr_offset32<uint32_t> parent_track_indices;
ptr_offset32<uint8_t> track_descriptions;
//////////////////////////////////////////////////////////////////////////
// Utility functions that return pointers from their respective offsets.
char* get_track_list_name(compressed_tracks& tracks) { return track_list_name.safe_add_to(&tracks); }
const char* get_track_list_name(const compressed_tracks& tracks) const { return track_list_name.safe_add_to(&tracks); }
uint32_t* get_track_name_offsets(compressed_tracks& tracks) { return track_name_offsets.safe_add_to(&tracks); }
const uint32_t* get_track_name_offsets(const compressed_tracks& tracks) const { return track_name_offsets.safe_add_to(&tracks); }
uint32_t* get_parent_track_indices(compressed_tracks& tracks) { return parent_track_indices.safe_add_to(&tracks); }
const uint32_t* get_parent_track_indices(const compressed_tracks& tracks) const { return parent_track_indices.safe_add_to(&tracks); }
uint8_t* get_track_descriptions(compressed_tracks& tracks) { return track_descriptions.safe_add_to(&tracks); }
const uint8_t* get_track_descriptions(const compressed_tracks& tracks) const { return track_descriptions.safe_add_to(&tracks); }
};
static_assert(sizeof(optional_metadata_header) >= 15, "Optional metadata must be at least 15 bytes");
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,229 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2020 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
// Included only once from compressed_tracks.h
#include "acl/core/track_desc.h"
namespace acl
{
namespace acl_impl
{
// Hide these implementations, they shouldn't be needed in user-space
inline const tracks_header& get_tracks_header(const compressed_tracks& tracks)
{
return *reinterpret_cast<const tracks_header*>(reinterpret_cast<const uint8_t*>(&tracks) + sizeof(raw_buffer_header));
}
inline const scalar_tracks_header& get_scalar_tracks_header(const compressed_tracks& tracks)
{
return *reinterpret_cast<const scalar_tracks_header*>(reinterpret_cast<const uint8_t*>(&tracks) + sizeof(raw_buffer_header) + sizeof(tracks_header));
}
inline const transform_tracks_header& get_transform_tracks_header(const compressed_tracks& tracks)
{
return *reinterpret_cast<const transform_tracks_header*>(reinterpret_cast<const uint8_t*>(&tracks) + sizeof(raw_buffer_header) + sizeof(tracks_header));
}
inline const optional_metadata_header& get_optional_metadata_header(const compressed_tracks& tracks)
{
return *reinterpret_cast<const optional_metadata_header*>(reinterpret_cast<const uint8_t*>(&tracks) + tracks.get_size() - sizeof(optional_metadata_header));
}
}
inline algorithm_type8 compressed_tracks::get_algorithm_type() const { return acl_impl::get_tracks_header(*this).algorithm_type; }
inline buffer_tag32 compressed_tracks::get_tag() const { return static_cast<buffer_tag32>(acl_impl::get_tracks_header(*this).tag); }
inline compressed_tracks_version16 compressed_tracks::get_version() const { return acl_impl::get_tracks_header(*this).version; }
inline uint32_t compressed_tracks::get_num_tracks() const { return acl_impl::get_tracks_header(*this).num_tracks; }
inline uint32_t compressed_tracks::get_num_samples_per_track() const { return acl_impl::get_tracks_header(*this).num_samples; }
inline track_type8 compressed_tracks::get_track_type() const { return acl_impl::get_tracks_header(*this).track_type; }
inline float compressed_tracks::get_duration() const
{
const acl_impl::tracks_header& header = acl_impl::get_tracks_header(*this);
return calculate_duration(header.num_samples, header.sample_rate);
}
inline float compressed_tracks::get_sample_rate() const { return acl_impl::get_tracks_header(*this).sample_rate; }
inline const char* compressed_tracks::get_name() const
{
const acl_impl::tracks_header& header = acl_impl::get_tracks_header(*this);
if (!header.get_has_metadata())
return nullptr; // No metadata is stored
const acl_impl::optional_metadata_header& metadata_header = acl_impl::get_optional_metadata_header(*this);
if (!metadata_header.track_name_offsets.is_valid())
return nullptr; // Metadata isn't stored
return metadata_header.get_track_list_name(*this);
}
inline const char* compressed_tracks::get_track_name(uint32_t track_index) const
{
const acl_impl::tracks_header& header = acl_impl::get_tracks_header(*this);
if (!header.get_has_metadata())
return nullptr; // No metadata is stored
ACL_ASSERT(track_index < header.num_tracks, "Invalid track index");
if (track_index >= header.num_tracks)
return nullptr; // Invalid track index
const acl_impl::optional_metadata_header& metadata_header = acl_impl::get_optional_metadata_header(*this);
if (!metadata_header.track_name_offsets.is_valid())
return nullptr; // Metadata isn't stored
const uint32_t* track_names_offsets = metadata_header.get_track_name_offsets(*this);
const ptr_offset32<char> offset = track_names_offsets[track_index];
return offset.add_to(track_names_offsets);
}
inline uint32_t compressed_tracks::get_parent_track_index(uint32_t track_index) const
{
const acl_impl::tracks_header& header = acl_impl::get_tracks_header(*this);
if (!header.get_has_metadata())
return k_invalid_track_index; // No metadata is stored
ACL_ASSERT(track_index < header.num_tracks, "Invalid track index");
if (track_index >= header.num_tracks)
return k_invalid_track_index; // Invalid track index
const acl_impl::optional_metadata_header& metadata_header = acl_impl::get_optional_metadata_header(*this);
if (!metadata_header.parent_track_indices.is_valid())
return k_invalid_track_index; // Metadata isn't stored
const uint32_t* parent_track_indices = metadata_header.get_parent_track_indices(*this);
return parent_track_indices[track_index];
}
inline bool compressed_tracks::get_track_description(uint32_t track_index, track_desc_scalarf& out_description) const
{
const acl_impl::tracks_header& header = acl_impl::get_tracks_header(*this);
if (!header.get_has_metadata())
return false; // No metadata is stored
ACL_ASSERT(track_index < header.num_tracks, "Invalid track index");
if (track_index >= header.num_tracks)
return false; // Invalid track index
const acl_impl::optional_metadata_header& metadata_header = acl_impl::get_optional_metadata_header(*this);
if (!metadata_header.track_descriptions.is_valid())
return false; // Metadata isn't stored
const uint8_t* descriptions = metadata_header.get_track_descriptions(*this);
const float* description_data = reinterpret_cast<const float*>(descriptions + (track_index * sizeof(float) * 1));
out_description.output_index = track_index;
out_description.precision = description_data[0];
return true;
}
inline bool compressed_tracks::get_track_description(uint32_t track_index, track_desc_transformf& out_description) const
{
const acl_impl::tracks_header& header = acl_impl::get_tracks_header(*this);
if (!header.get_has_metadata())
return false; // No metadata is stored
ACL_ASSERT(track_index < header.num_tracks, "Invalid track index");
if (track_index >= header.num_tracks)
return false; // Invalid track index
const acl_impl::optional_metadata_header& metadata_header = acl_impl::get_optional_metadata_header(*this);
if (!metadata_header.track_descriptions.is_valid())
return false; // Metadata isn't stored
if (!metadata_header.parent_track_indices.is_valid())
return false; // Metadata isn't stored
const uint32_t* parent_track_indices = metadata_header.get_parent_track_indices(*this);
const uint8_t* descriptions = metadata_header.get_track_descriptions(*this);
const float* description_data = reinterpret_cast<const float*>(descriptions + (track_index * sizeof(float) * 5));
out_description.output_index = track_index;
out_description.parent_index = parent_track_indices[track_index];
out_description.precision = description_data[0];
out_description.shell_distance = description_data[1];
out_description.constant_rotation_threshold_angle = description_data[2];
out_description.constant_translation_threshold = description_data[3];
out_description.constant_scale_threshold = description_data[4];
return true;
}
inline error_result compressed_tracks::is_valid(bool check_hash) const
{
if (!is_aligned_to(this, alignof(compressed_tracks)))
return error_result("Invalid alignment");
const acl_impl::tracks_header& header = acl_impl::get_tracks_header(*this);
if (header.tag != static_cast<uint32_t>(buffer_tag32::compressed_tracks))
return error_result("Invalid tag");
if (!is_valid_algorithm_type(header.algorithm_type))
return error_result("Invalid algorithm type");
if (header.version < compressed_tracks_version16::first || header.version > compressed_tracks_version16::latest)
return error_result("Invalid algorithm version");
if (check_hash)
{
const uint32_t hash = hash32(safe_ptr_cast<const uint8_t>(&m_padding[0]), m_buffer_header.size - sizeof(acl_impl::raw_buffer_header));
if (hash != m_buffer_header.hash)
return error_result("Invalid hash");
}
return error_result();
}
inline const compressed_tracks* make_compressed_tracks(const void* buffer, error_result* out_error_result)
{
if (buffer == nullptr)
{
if (out_error_result != nullptr)
*out_error_result = error_result("Buffer is not a valid pointer");
return nullptr;
}
const compressed_tracks* clip = static_cast<const compressed_tracks*>(buffer);
if (out_error_result != nullptr)
{
const error_result result = clip->is_valid(false);
*out_error_result = result;
if (result.any())
return nullptr;
}
return clip;
}
}

View File

@ -0,0 +1,184 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/iallocator.h"
#include "acl/core/track_types.h"
#include "acl/core/track_writer.h"
#include <rtm/scalarf.h>
#include <rtm/vector4f.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
struct debug_track_writer final : public track_writer
{
debug_track_writer(iallocator& allocator_, track_type8 type_, uint32_t num_tracks_)
: allocator(allocator_)
, tracks_typed{ nullptr }
, buffer_size(0)
, num_tracks(num_tracks_)
, type(type_)
{
// Large enough to accommodate the largest type
buffer_size = sizeof(rtm::qvvf) * num_tracks_;
tracks_typed.any = allocator_.allocate(buffer_size, alignof(rtm::qvvf));
}
~debug_track_writer()
{
allocator.deallocate(tracks_typed.any, buffer_size);
}
//////////////////////////////////////////////////////////////////////////
// Called by the decoder to write out a value for a specified track index.
void RTM_SIMD_CALL write_float1(uint32_t track_index, rtm::scalarf_arg0 value)
{
ACL_ASSERT(type == track_type8::float1f, "Unexpected track type access");
rtm::scalar_store(value, &tracks_typed.float1f[track_index]);
}
float RTM_SIMD_CALL read_float1(uint32_t track_index) const
{
ACL_ASSERT(type == track_type8::float1f, "Unexpected track type access");
return tracks_typed.float1f[track_index];
}
//////////////////////////////////////////////////////////////////////////
// Called by the decoder to write out a value for a specified track index.
void RTM_SIMD_CALL write_float2(uint32_t track_index, rtm::vector4f_arg0 value)
{
ACL_ASSERT(type == track_type8::float2f, "Unexpected track type access");
rtm::vector_store2(value, &tracks_typed.float2f[track_index]);
}
rtm::vector4f RTM_SIMD_CALL read_float2(uint32_t track_index) const
{
ACL_ASSERT(type == track_type8::float2f, "Unexpected track type access");
return rtm::vector_load2(&tracks_typed.float2f[track_index]);
}
//////////////////////////////////////////////////////////////////////////
// Called by the decoder to write out a value for a specified track index.
void RTM_SIMD_CALL write_float3(uint32_t track_index, rtm::vector4f_arg0 value)
{
ACL_ASSERT(type == track_type8::float3f, "Unexpected track type access");
rtm::vector_store3(value, &tracks_typed.float3f[track_index]);
}
rtm::vector4f RTM_SIMD_CALL read_float3(uint32_t track_index) const
{
ACL_ASSERT(type == track_type8::float3f, "Unexpected track type access");
return rtm::vector_load3(&tracks_typed.float3f[track_index]);
}
//////////////////////////////////////////////////////////////////////////
// Called by the decoder to write out a value for a specified track index.
void RTM_SIMD_CALL write_float4(uint32_t track_index, rtm::vector4f_arg0 value)
{
ACL_ASSERT(type == track_type8::float4f, "Unexpected track type access");
rtm::vector_store(value, &tracks_typed.float4f[track_index]);
}
rtm::vector4f RTM_SIMD_CALL read_float4(uint32_t track_index) const
{
ACL_ASSERT(type == track_type8::float4f, "Unexpected track type access");
return rtm::vector_load(&tracks_typed.float4f[track_index]);
}
//////////////////////////////////////////////////////////////////////////
// Called by the decoder to write out a value for a specified track index.
void RTM_SIMD_CALL write_vector4(uint32_t track_index, rtm::vector4f_arg0 value)
{
ACL_ASSERT(type == track_type8::vector4f, "Unexpected track type access");
tracks_typed.vector4f[track_index] = value;
}
rtm::vector4f RTM_SIMD_CALL read_vector4(uint32_t track_index) const
{
ACL_ASSERT(type == track_type8::vector4f, "Unexpected track type access");
return tracks_typed.vector4f[track_index];
}
//////////////////////////////////////////////////////////////////////////
// Called by the decoder to write out a quaternion rotation value for a specified bone index.
void RTM_SIMD_CALL write_rotation(uint32_t track_index, rtm::quatf_arg0 rotation)
{
ACL_ASSERT(type == track_type8::qvvf, "Unexpected track type access");
tracks_typed.qvvf[track_index].rotation = rotation;
}
//////////////////////////////////////////////////////////////////////////
// Called by the decoder to write out a translation value for a specified bone index.
void RTM_SIMD_CALL write_translation(uint32_t track_index, rtm::vector4f_arg0 translation)
{
ACL_ASSERT(type == track_type8::qvvf, "Unexpected track type access");
tracks_typed.qvvf[track_index].translation = translation;
}
//////////////////////////////////////////////////////////////////////////
// Called by the decoder to write out a scale value for a specified bone index.
void RTM_SIMD_CALL write_scale(uint32_t track_index, rtm::vector4f_arg0 scale)
{
ACL_ASSERT(type == track_type8::qvvf, "Unexpected track type access");
tracks_typed.qvvf[track_index].scale = scale;
}
const rtm::qvvf& RTM_SIMD_CALL read_qvv(uint32_t track_index) const
{
ACL_ASSERT(type == track_type8::qvvf, "Unexpected track type access");
return tracks_typed.qvvf[track_index];
}
union ptr_union
{
void* any;
float* float1f;
rtm::float2f* float2f;
rtm::float3f* float3f;
rtm::float4f* float4f;
rtm::vector4f* vector4f;
rtm::qvvf* qvvf;
};
iallocator& allocator;
ptr_union tracks_typed;
size_t buffer_size;
uint32_t num_tracks;
track_type8 type;
};
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,121 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/iallocator.h"
#include <rtm/vector4f.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// Simple helper to flush the CPU cache
//////////////////////////////////////////////////////////////////////////
class alignas(16) CPUCacheFlusher
{
public:
CPUCacheFlusher()
: m_is_flushing(false)
{
(void)m_padding;
}
//////////////////////////////////////////////////////////////////////////
// Marks the beginning of a cache flushing operation
void begin_flushing()
{
ACL_ASSERT(!m_is_flushing, "begin_flushing() already called");
m_is_flushing = true;
}
//////////////////////////////////////////////////////////////////////////
// Flush the buffer data from the CPU cache
void flush_buffer(const void* buffer, size_t buffer_size)
{
ACL_ASSERT(m_is_flushing, "begin_flushing() not called");
(void)buffer;
(void)buffer_size;
#if defined(RTM_SSE2_INTRINSICS)
constexpr size_t k_cache_line_size = 64;
const uint8_t* buffer_start = reinterpret_cast<const uint8_t*>(buffer);
const uint8_t* buffer_ptr = buffer_start;
const uint8_t* buffer_end = buffer_start + buffer_size;
while (buffer_ptr < buffer_end)
{
_mm_clflush(buffer_ptr);
buffer_ptr += k_cache_line_size;
}
#endif
}
//////////////////////////////////////////////////////////////////////////
// Marks the end of a cache flushing operation
void end_flushing()
{
ACL_ASSERT(m_is_flushing, "begin_flushing() not called");
m_is_flushing = false;
#if !defined(RTM_SSE2_INTRINSICS)
const rtm::vector4f one = rtm::vector_set(1.0F);
for (size_t entry_index = 0; entry_index < k_num_buffer_entries; ++entry_index)
m_buffer[entry_index] = rtm::vector_add(m_buffer[entry_index], one);
#endif
}
private:
CPUCacheFlusher(const CPUCacheFlusher& other) = delete;
CPUCacheFlusher& operator=(const CPUCacheFlusher& other) = delete;
#if !defined(RTM_SSE2_INTRINSICS)
// TODO: get an official CPU cache size
#if defined(__ANDROID__)
// Nexus 5X has 2MB cache
static constexpr size_t k_cache_size = 3 * 1024 * 1024;
#else
// iPad Pro has 8MB cache
static constexpr size_t k_cache_size = 9 * 1024 * 1024;
#endif
static constexpr size_t k_num_buffer_entries = k_cache_size / sizeof(rtm::vector4f);
rtm::vector4f m_buffer[k_num_buffer_entries];
#endif
bool m_is_flushing;
// Unused memory left as padding
uint8_t m_padding[15];
};
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,64 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2020 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
// Included only once from track_desc.h
#include "acl/core/error_result.h"
#include "acl/core/track_types.h"
#include <rtm/scalarf.h>
#include <cstdint>
namespace acl
{
inline error_result track_desc_scalarf::is_valid() const
{
if (precision < 0.0F || !rtm::scalar_is_finite(precision))
return error_result("Invalid precision");
return error_result();
}
inline error_result track_desc_transformf::is_valid() const
{
if (precision < 0.0F || !rtm::scalar_is_finite(precision))
return error_result("Invalid precision");
if (shell_distance < 0.0F || !rtm::scalar_is_finite(shell_distance))
return error_result("Invalid shell_distance");
if (constant_rotation_threshold_angle < 0.0F || !rtm::scalar_is_finite(constant_rotation_threshold_angle))
return error_result("Invalid constant_rotation_threshold_angle");
if (constant_translation_threshold < 0.0F || !rtm::scalar_is_finite(constant_translation_threshold))
return error_result("Invalid constant_translation_threshold");
if (constant_scale_threshold < 0.0F || !rtm::scalar_is_finite(constant_scale_threshold))
return error_result("Invalid constant_scale_threshold");
return error_result();
}
}

View File

@ -0,0 +1,133 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2020 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
// Included only once from track_formats.h
#include <cstdint>
#include <cstring>
namespace acl
{
inline const char* get_rotation_format_name(rotation_format8 format)
{
switch (format)
{
case rotation_format8::quatf_full: return "quatf_full";
case rotation_format8::quatf_drop_w_full: return "quatf_drop_w_full";
case rotation_format8::quatf_drop_w_variable: return "quatf_drop_w_variable";
default: return "<Invalid>";
}
}
inline bool get_rotation_format(const char* format, rotation_format8& out_format)
{
const char* quat_128_format = "Quat_128"; // ACL_DEPRECATED Legacy name, keep for backwards compatibility, remove in 3.0
const char* quatf_full_format = "quatf_full";
if (std::strncmp(format, quat_128_format, std::strlen(quat_128_format)) == 0
|| std::strncmp(format, quatf_full_format, std::strlen(quatf_full_format)) == 0)
{
out_format = rotation_format8::quatf_full;
return true;
}
const char* quatdropw_96_format = "QuatDropW_96"; // ACL_DEPRECATED Legacy name, keep for backwards compatibility, remove in 3.0
const char* quatf_drop_w_full_format = "quatf_drop_w_full";
if (std::strncmp(format, quatdropw_96_format, std::strlen(quatdropw_96_format)) == 0
|| std::strncmp(format, quatf_drop_w_full_format, std::strlen(quatf_drop_w_full_format)) == 0)
{
out_format = rotation_format8::quatf_drop_w_full;
return true;
}
const char* quatdropw_variable_format = "QuatDropW_Variable"; // ACL_DEPRECATED Legacy name, keep for backwards compatibility, remove in 3.0
const char* quatf_drop_w_variable_format = "quatf_drop_w_variable";
if (std::strncmp(format, quatdropw_variable_format, std::strlen(quatdropw_variable_format)) == 0
|| std::strncmp(format, quatf_drop_w_variable_format, std::strlen(quatf_drop_w_variable_format)) == 0)
{
out_format = rotation_format8::quatf_drop_w_variable;
return true;
}
return false;
}
constexpr const char* get_vector_format_name(vector_format8 format)
{
return format == vector_format8::vector3f_full ? "vector3f_full" : "vector3f_variable";
}
inline bool get_vector_format(const char* format, vector_format8& out_format)
{
const char* vector3_96_format = "Vector3_96"; // ACL_DEPRECATED Legacy name, keep for backwards compatibility, remove in 3.0
const char* vector3f_full_format = "vector3f_full";
if (std::strncmp(format, vector3_96_format, std::strlen(vector3_96_format)) == 0
|| std::strncmp(format, vector3f_full_format, std::strlen(vector3f_full_format)) == 0)
{
out_format = vector_format8::vector3f_full;
return true;
}
const char* vector3_variable_format = "Vector3_Variable"; // ACL_DEPRECATED Legacy name, keep for backwards compatibility, remove in 3.0
const char* vector3f_variable_format = "vector3f_variable";
if (std::strncmp(format, vector3_variable_format, std::strlen(vector3_variable_format)) == 0
|| std::strncmp(format, vector3f_variable_format, std::strlen(vector3f_variable_format)) == 0)
{
out_format = vector_format8::vector3f_variable;
return true;
}
return false;
}
constexpr rotation_variant8 get_rotation_variant(rotation_format8 rotation_format)
{
return rotation_format == rotation_format8::quatf_full ? rotation_variant8::quat : rotation_variant8::quat_drop_w;
}
constexpr rotation_format8 get_highest_variant_precision(rotation_variant8 variant)
{
return variant == rotation_variant8::quat ? rotation_format8::quatf_full : rotation_format8::quatf_drop_w_full;
}
constexpr bool is_rotation_format_variable(rotation_format8 format)
{
return format == rotation_format8::quatf_drop_w_variable;
}
constexpr bool is_rotation_format_full_precision(rotation_format8 format)
{
return format == rotation_format8::quatf_full || format == rotation_format8::quatf_drop_w_full;
}
constexpr bool is_vector_format_variable(vector_format8 format)
{
return format == vector_format8::vector3f_variable;
}
constexpr bool is_vector_format_full_precision(vector_format8 format)
{
return format == vector_format8::vector3f_full;
}
}

View File

@ -0,0 +1,158 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2018 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/error.h"
#include "acl/core/impl/compiler_utils.h"
#include <rtm/scalarf.h>
#include <cstdint>
#include <algorithm>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// This enum dictates how interpolation samples are calculated based on the sample time.
enum class sample_rounding_policy
{
//////////////////////////////////////////////////////////////////////////
// If the sample time lies between two samples, both sample indices
// are returned and the interpolation alpha lies in between.
none,
//////////////////////////////////////////////////////////////////////////
// If the sample time lies between two samples, both sample indices
// are returned and the interpolation will be 0.0.
floor,
//////////////////////////////////////////////////////////////////////////
// If the sample time lies between two samples, both sample indices
// are returned and the interpolation will be 1.0.
ceil,
//////////////////////////////////////////////////////////////////////////
// If the sample time lies between two samples, both sample indices
// are returned and the interpolation will be 0.0 or 1.0 depending
// on which sample is nearest.
nearest,
};
//////////////////////////////////////////////////////////////////////////
// Calculates the sample indices and the interpolation required to linearly
// interpolate when the samples are uniform.
// The returned sample indices are clamped and do not loop.
// If the sample rate is available, prefer using find_linear_interpolation_samples_with_sample_rate
// instead. It is faster and more accurate.
inline void find_linear_interpolation_samples_with_duration(uint32_t num_samples, float duration, float sample_time, sample_rounding_policy rounding_policy,
uint32_t& out_sample_index0, uint32_t& out_sample_index1, float& out_interpolation_alpha)
{
// Samples are evenly spaced, trivially calculate the indices that we need
ACL_ASSERT(duration >= 0.0F, "Invalid duration: %f", duration);
ACL_ASSERT(sample_time >= 0.0F && sample_time <= duration, "Invalid sample time: 0.0 <= %f <= %f", sample_time, duration);
ACL_ASSERT(num_samples > 0, "Invalid num_samples: %u", num_samples);
const float sample_rate = duration == 0.0F ? 0.0F : (float(num_samples - 1) / duration);
ACL_ASSERT(sample_rate >= 0.0F && rtm::scalar_is_finite(sample_rate), "Invalid sample_rate: %f", sample_rate);
const float sample_index = sample_time * sample_rate;
const uint32_t sample_index0 = static_cast<uint32_t>(sample_index);
const uint32_t sample_index1 = std::min(sample_index0 + 1, num_samples - 1);
ACL_ASSERT(sample_index0 <= sample_index1 && sample_index1 < num_samples, "Invalid sample indices: 0 <= %u <= %u < %u", sample_index0, sample_index1, num_samples);
const float interpolation_alpha = sample_index - float(sample_index0);
ACL_ASSERT(interpolation_alpha >= 0.0F && interpolation_alpha <= 1.0F, "Invalid interpolation alpha: 0.0 <= %f <= 1.0", interpolation_alpha);
out_sample_index0 = sample_index0;
out_sample_index1 = sample_index1;
switch (rounding_policy)
{
default:
case sample_rounding_policy::none:
out_interpolation_alpha = interpolation_alpha;
break;
case sample_rounding_policy::floor:
out_interpolation_alpha = 0.0F;
break;
case sample_rounding_policy::ceil:
out_interpolation_alpha = 1.0F;
break;
case sample_rounding_policy::nearest:
out_interpolation_alpha = rtm::scalar_floor(interpolation_alpha + 0.5F);
break;
}
}
//////////////////////////////////////////////////////////////////////////
// Calculates the sample indices and the interpolation required to linearly
// interpolate when the samples are uniform.
// The returned sample indices are clamped and do not loop.
inline void find_linear_interpolation_samples_with_sample_rate(uint32_t num_samples, float sample_rate, float sample_time, sample_rounding_policy rounding_policy,
uint32_t& out_sample_index0, uint32_t& out_sample_index1, float& out_interpolation_alpha)
{
// Samples are evenly spaced, trivially calculate the indices that we need
ACL_ASSERT(sample_rate >= 0.0F, "Invalid sample rate: %f", sample_rate);
ACL_ASSERT(num_samples > 0, "Invalid num_samples: %u", num_samples);
// TODO: Would it be faster to do the index calculation entirely with floating point?
// SSE4 can floor with a single instruction.
// We don't need the index1, there are no dependencies there, we can still convert the float index0 into an integer, do the min.
// This would break the dependency chains, right now the index0 depends on sample_index and interpolation_alpha depends on index0.
// Generating index0 is slow, and converting it back to float is slow.
// If we keep index0 as a float and floor it as a float, we can calculate index1 at the same time as the interpolation alpha.
const float sample_index = sample_time * sample_rate;
const uint32_t sample_index0 = static_cast<uint32_t>(sample_index);
const uint32_t sample_index1 = std::min(sample_index0 + 1, num_samples - 1);
ACL_ASSERT(sample_index0 <= sample_index1 && sample_index1 < num_samples, "Invalid sample indices: 0 <= %u <= %u < %u", sample_index0, sample_index1, num_samples);
const float interpolation_alpha = sample_index - float(sample_index0);
ACL_ASSERT(interpolation_alpha >= 0.0F && interpolation_alpha <= 1.0F, "Invalid interpolation alpha: 0.0 <= %f <= 1.0", interpolation_alpha);
out_sample_index0 = sample_index0;
out_sample_index1 = sample_index1;
switch (rounding_policy)
{
default:
case sample_rounding_policy::none:
out_interpolation_alpha = interpolation_alpha;
break;
case sample_rounding_policy::floor:
out_interpolation_alpha = 0.0F;
break;
case sample_rounding_policy::ceil:
out_interpolation_alpha = 1.0F;
break;
case sample_rounding_policy::nearest:
out_interpolation_alpha = rtm::scalar_floor(interpolation_alpha + 0.5F);
break;
}
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,62 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include <cstdint>
#include <type_traits>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
namespace acl_impl
{
template <class item_type, bool is_const>
class iterator_impl
{
public:
typedef typename std::conditional<is_const, const item_type*, item_type*>::type ItemPtr;
constexpr iterator_impl(ItemPtr items, size_t num_items) : m_items(items), m_num_items(num_items) {}
constexpr ItemPtr begin() const { return m_items; }
constexpr ItemPtr end() const { return m_items + m_num_items; }
private:
ItemPtr m_items;
size_t m_num_items;
};
}
template <class item_type>
using iterator = acl_impl::iterator_impl<item_type, false>;
template <class item_type>
using const_iterator = acl_impl::iterator_impl<item_type, true>;
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,363 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/error.h"
#include <rtm/math.h>
#include <cstdint>
#include <cstring>
#include <type_traits>
#include <limits>
#include <memory>
#include <algorithm>
// For byte swapping intrinsics
#if defined(_MSC_VER)
#include <cstdlib>
#elif defined(__APPLE__)
#include <libkern/OSByteOrder.h>
#endif
// For __prefetch
#if defined(RTM_NEON64_INTRINSICS) && defined(ACL_COMPILER_MSVC)
#include <intrin.h>
#endif
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// Allows static branching without any warnings
template<bool expression_result>
struct static_condition { static constexpr bool test() { return true; } };
template<>
struct static_condition<false> { static constexpr bool test() { return false; } };
//////////////////////////////////////////////////////////////////////////
// Various miscellaneous utilities related to alignment
constexpr bool is_power_of_two(size_t input)
{
return input != 0 && (input & (input - 1)) == 0;
}
template<typename Type>
constexpr bool is_alignment_valid(size_t alignment)
{
return is_power_of_two(alignment) && alignment >= alignof(Type);
}
template<typename PtrType>
inline bool is_aligned_to(PtrType* value, size_t alignment)
{
ACL_ASSERT(is_power_of_two(alignment), "Alignment value must be a power of two");
return (reinterpret_cast<intptr_t>(value) & (alignment - 1)) == 0;
}
template<typename IntegralType>
inline bool is_aligned_to(IntegralType value, size_t alignment)
{
ACL_ASSERT(is_power_of_two(alignment), "Alignment value must be a power of two");
return (static_cast<size_t>(value) & (alignment - 1)) == 0;
}
template<typename PtrType>
constexpr bool is_aligned(PtrType* value)
{
return is_aligned_to(value, alignof(PtrType));
}
template<typename PtrType>
inline PtrType* align_to(PtrType* value, size_t alignment)
{
ACL_ASSERT(is_power_of_two(alignment), "Alignment value must be a power of two");
return reinterpret_cast<PtrType*>((reinterpret_cast<intptr_t>(value) + (alignment - 1)) & ~(alignment - 1));
}
template<typename IntegralType>
inline IntegralType align_to(IntegralType value, size_t alignment)
{
ACL_ASSERT(is_power_of_two(alignment), "Alignment value must be a power of two");
return static_cast<IntegralType>((static_cast<size_t>(value) + (alignment - 1)) & ~(alignment - 1));
}
template<typename PreviousMemberType, typename NextMemberType>
constexpr size_t get_required_padding()
{
// align_to(sizeof(PreviousMemberType), alignof(NextMemberType)) - sizeof(PreviousMemberType)
return ((sizeof(PreviousMemberType) + (alignof(NextMemberType) - 1)) & ~(alignof(NextMemberType)- 1)) - sizeof(PreviousMemberType);
}
template<typename ElementType, size_t num_elements>
constexpr size_t get_array_size(ElementType const (&)[num_elements]) { return num_elements; }
//////////////////////////////////////////////////////////////////////////
// Type safe casting
namespace memory_impl
{
template<typename DestPtrType, typename SrcType>
struct safe_ptr_to_ptr_cast_impl
{
inline static DestPtrType* cast(SrcType* input)
{
ACL_ASSERT(is_aligned_to(input, alignof(DestPtrType)), "reinterpret_cast would result in an unaligned pointer");
return reinterpret_cast<DestPtrType*>(input);
}
};
template<typename SrcType>
struct safe_ptr_to_ptr_cast_impl<void, SrcType>
{
static constexpr void* cast(SrcType* input) { return input; }
};
template<typename DestPtrType, typename SrcType>
struct safe_int_to_ptr_cast_impl
{
inline static DestPtrType* cast(SrcType input)
{
ACL_ASSERT(is_aligned_to(input, alignof(DestPtrType)), "reinterpret_cast would result in an unaligned pointer");
return reinterpret_cast<DestPtrType*>(input);
}
};
template<typename SrcType>
struct safe_int_to_ptr_cast_impl<void, SrcType>
{
static constexpr void* cast(SrcType input) { return reinterpret_cast<void*>(input); }
};
}
template<typename DestPtrType, typename SrcType>
inline DestPtrType* safe_ptr_cast(SrcType* input)
{
return memory_impl::safe_ptr_to_ptr_cast_impl<DestPtrType, SrcType>::cast(input);
}
template<typename DestPtrType, typename SrcType>
inline DestPtrType* safe_ptr_cast(SrcType input)
{
return memory_impl::safe_int_to_ptr_cast_impl<DestPtrType, SrcType>::cast(input);
}
#if defined(ACL_COMPILER_GCC)
// GCC sometimes complains about comparisons being always true due to partial template
// evaluation. Disable that warning since we know it is safe.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wtype-limits"
#endif
namespace memory_impl
{
template<typename Type, bool is_enum = true>
struct safe_underlying_type { using type = typename std::underlying_type<Type>::type; };
template<typename Type>
struct safe_underlying_type<Type, false> { using type = Type; };
template<typename DstType, typename SrcType, bool is_floating_point = false>
struct is_static_cast_safe_s
{
static bool test(SrcType input)
{
using SrcRealType = typename safe_underlying_type<SrcType, std::is_enum<SrcType>::value>::type;
if (static_condition<(std::is_signed<DstType>::value == std::is_signed<SrcRealType>::value)>::test())
return SrcType(DstType(input)) == input;
else if (static_condition<(std::is_signed<SrcRealType>::value)>::test())
return int64_t(input) >= 0 && SrcType(DstType(input)) == input;
else
return uint64_t(input) <= uint64_t(std::numeric_limits<DstType>::max());
};
};
template<typename DstType, typename SrcType>
struct is_static_cast_safe_s<DstType, SrcType, true>
{
static bool test(SrcType input)
{
return SrcType(DstType(input)) == input;
}
};
template<typename DstType, typename SrcType>
inline bool is_static_cast_safe(SrcType input)
{
// TODO: In C++17 this should be folded to constexpr if
return is_static_cast_safe_s<DstType, SrcType, static_condition<(std::is_floating_point<SrcType>::value || std::is_floating_point<DstType>::value)>::test()>::test(input);
}
}
template<typename DstType, typename SrcType>
inline DstType safe_static_cast(SrcType input)
{
#if defined(ACL_HAS_ASSERT_CHECKS)
const bool is_safe = memory_impl::is_static_cast_safe<DstType, SrcType>(input);
ACL_ASSERT(is_safe, "Unsafe static cast resulted in data loss");
#endif
return static_cast<DstType>(input);
}
#if defined(ACL_COMPILER_GCC)
#pragma GCC diagnostic pop
#endif
//////////////////////////////////////////////////////////////////////////
// Endian and raw memory support
template<typename OutputPtrType, typename InputPtrType, typename offset_type>
inline OutputPtrType* add_offset_to_ptr(InputPtrType* ptr, offset_type offset)
{
return safe_ptr_cast<OutputPtrType>(reinterpret_cast<uintptr_t>(ptr) + offset);
}
inline uint16_t byte_swap(uint16_t value)
{
#if defined(_MSC_VER)
return _byteswap_ushort(value);
#elif defined(__APPLE__)
return OSSwapInt16(value);
#elif defined(__GNUC__) || defined(__clang__)
return __builtin_bswap16(value);
#else
return (value & 0x00FF) << 8 | (value & 0xFF00) >> 8;
#endif
}
inline uint32_t byte_swap(uint32_t value)
{
#if defined(_MSC_VER)
return _byteswap_ulong(value);
#elif defined(__APPLE__)
return OSSwapInt32(value);
#elif defined(__GNUC__) || defined(__clang__)
return __builtin_bswap32(value);
#else
value = (value & 0x0000FFFF) << 16 | (value & 0xFFFF0000) >> 16;
value = (value & 0x00FF00FF) << 8 | (value & 0xFF00FF00) >> 8;
return value;
#endif
}
inline uint64_t byte_swap(uint64_t value)
{
#if defined(_MSC_VER)
return _byteswap_uint64(value);
#elif defined(__APPLE__)
return OSSwapInt64(value);
#elif defined(__GNUC__) || defined(__clang__)
return __builtin_bswap64(value);
#else
value = (value & 0x00000000FFFFFFFF) << 32 | (value & 0xFFFFFFFF00000000) >> 32;
value = (value & 0x0000FFFF0000FFFF) << 16 | (value & 0xFFFF0000FFFF0000) >> 16;
value = (value & 0x00FF00FF00FF00FF) << 8 | (value & 0xFF00FF00FF00FF00) >> 8;
return value;
#endif
}
// We copy bits assuming big-endian ordering for 'dest' and 'src'
inline void memcpy_bits(void* dest, uint64_t dest_bit_offset, const void* src, uint64_t src_bit_offset, uint64_t num_bits_to_copy)
{
while (true)
{
uint64_t src_byte_offset = src_bit_offset / 8;
uint8_t src_byte_bit_offset = safe_static_cast<uint8_t>(src_bit_offset % 8);
uint64_t dest_byte_offset = dest_bit_offset / 8;
uint8_t dest_byte_bit_offset = safe_static_cast<uint8_t>(dest_bit_offset % 8);
const uint8_t* src_bytes = add_offset_to_ptr<const uint8_t>(src, src_byte_offset);
uint8_t* dest_byte = add_offset_to_ptr<uint8_t>(dest, dest_byte_offset);
// We'll copy only as many bits as there fits within 'dest' or as there are left
uint8_t num_bits_dest_remain_in_byte = 8 - dest_byte_bit_offset;
uint8_t num_bits_src_remain_in_byte = 8 - src_byte_bit_offset;
uint64_t num_bits_copied = std::min<uint64_t>(std::min<uint8_t>(num_bits_dest_remain_in_byte, num_bits_src_remain_in_byte), num_bits_to_copy);
uint8_t num_bits_copied_u8 = safe_static_cast<uint8_t>(num_bits_copied);
// We'll shift and mask to retain the 'dest' bits prior to our offset and whatever remains after the copy
uint8_t dest_shift_offset = dest_byte_bit_offset;
uint8_t dest_byte_mask = ~(0xFF >> dest_shift_offset) | ~(0xFF << (8 - num_bits_copied_u8 - dest_byte_bit_offset));
uint8_t src_shift_offset = 8 - src_byte_bit_offset - num_bits_copied_u8;
uint8_t src_byte_mask = 0xFF >> (8 - num_bits_copied_u8);
uint8_t src_insert_shift_offset = 8 - num_bits_copied_u8 - dest_byte_bit_offset;
uint8_t partial_dest_value = *dest_byte & dest_byte_mask;
uint8_t partial_src_value = (*src_bytes >> src_shift_offset) & src_byte_mask;
*dest_byte = partial_dest_value | (partial_src_value << src_insert_shift_offset);
if (num_bits_to_copy <= num_bits_copied)
break; // Done
num_bits_to_copy -= num_bits_copied;
dest_bit_offset += num_bits_copied;
src_bit_offset += num_bits_copied;
}
}
template<typename data_type>
inline data_type unaligned_load(const void* input)
{
data_type result;
std::memcpy(&result, input, sizeof(data_type));
return result;
}
template<typename data_type>
inline data_type aligned_load(const void* input)
{
return *safe_ptr_cast<const data_type, const void*>(input);
}
template<typename data_type>
inline void unaligned_write(data_type input, void* output)
{
std::memcpy(output, &input, sizeof(data_type));
}
// TODO: Add support for streaming prefetch (ptr, 0, 0) for arm
inline void memory_prefetch(const void* ptr)
{
#if defined(RTM_SSE2_INTRINSICS)
_mm_prefetch(reinterpret_cast<const char*>(ptr), _MM_HINT_T0);
#elif defined(ACL_COMPILER_GCC) || defined(ACL_COMPILER_CLANG)
__builtin_prefetch(ptr, 0, 3);
#elif defined(RTM_NEON64_INTRINSICS) && defined(ACL_COMPILER_MSVC)
__prefetch(ptr);
#else
(void)ptr;
#endif
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,124 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2018 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/memory_utils.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
////////////////////////////////////////////////////////////////////////////////
// Represents an invalid pointer offset, used by 'ptr_offset'.
////////////////////////////////////////////////////////////////////////////////
struct invalid_ptr_offset final {};
////////////////////////////////////////////////////////////////////////////////
// A type safe pointer offset.
//
// This class only wraps an integer of the 'offset_type' type and adds type safety
// by only casting to 'data_type'.
////////////////////////////////////////////////////////////////////////////////
template<typename data_type, typename offset_type>
class ptr_offset
{
public:
////////////////////////////////////////////////////////////////////////////////
// Constructs a valid but empty offset.
constexpr ptr_offset() : m_value(0) {}
////////////////////////////////////////////////////////////////////////////////
// Constructs a valid offset with the specified value.
constexpr ptr_offset(size_t value) : m_value(safe_static_cast<offset_type>(value)) {}
////////////////////////////////////////////////////////////////////////////////
// Constructs an invalid offset.
constexpr ptr_offset(invalid_ptr_offset) : m_value(k_invalid_value) {}
////////////////////////////////////////////////////////////////////////////////
// Adds this offset to the provided pointer.
template<typename BaseType>
inline data_type* add_to(BaseType* ptr) const
{
ACL_ASSERT(is_valid(), "Invalid ptr_offset!");
return add_offset_to_ptr<data_type>(ptr, m_value);
}
////////////////////////////////////////////////////////////////////////////////
// Adds this offset to the provided pointer.
template<typename BaseType>
inline const data_type* add_to(const BaseType* ptr) const
{
ACL_ASSERT(is_valid(), "Invalid ptr_offset!");
return add_offset_to_ptr<const data_type>(ptr, m_value);
}
////////////////////////////////////////////////////////////////////////////////
// Adds this offset to the provided pointer or returns nullptr if the offset is invalid.
template<typename BaseType>
inline data_type* safe_add_to(BaseType* ptr) const
{
return is_valid() ? add_offset_to_ptr<data_type>(ptr, m_value) : nullptr;
}
////////////////////////////////////////////////////////////////////////////////
// Adds this offset to the provided pointer or returns nullptr if the offset is invalid.
template<typename BaseType>
inline const data_type* safe_add_to(const BaseType* ptr) const
{
return is_valid() ? add_offset_to_ptr<data_type>(ptr, m_value) : nullptr;
}
////////////////////////////////////////////////////////////////////////////////
// Coercion operator to the underlying 'offset_type'.
constexpr operator offset_type() const { return m_value; }
////////////////////////////////////////////////////////////////////////////////
// Returns true if the offset is valid.
constexpr bool is_valid() const { return m_value != k_invalid_value; }
private:
// Value representing an invalid offset
static constexpr offset_type k_invalid_value = std::numeric_limits<offset_type>::max();
// Actual offset value.
offset_type m_value;
};
////////////////////////////////////////////////////////////////////////////////
// A 16 bit offset.
template<typename data_type>
using ptr_offset16 = ptr_offset<data_type, uint16_t>;
////////////////////////////////////////////////////////////////////////////////
// A 32 bit offset.
template<typename data_type>
using ptr_offset32 = ptr_offset<data_type, uint32_t>;
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,92 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/enum_utils.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
////////////////////////////////////////////////////////////////////////////////
// Various constants for range reduction.
constexpr uint8_t k_segment_range_reduction_num_bits_per_component = 8;
constexpr uint8_t k_segment_range_reduction_num_bytes_per_component = 1;
constexpr uint32_t k_clip_range_reduction_vector3_range_size = sizeof(float) * 6;
////////////////////////////////////////////////////////////////////////////////
// range_reduction_flags8 represents the types of range reduction we support as a bit field.
//
// BE CAREFUL WHEN CHANGING VALUES IN THIS ENUM
// The range reduction strategy is serialized in the compressed data, if you change a value
// the compressed clips will be invalid. If you do, bump the appropriate algorithm versions.
enum class range_reduction_flags8 : uint8_t
{
none = 0x00,
// Flags to determine which tracks have range reduction applied
rotations = 0x01,
translations = 0x02,
scales = 0x04,
//Properties = 0x08, // TODO: Implement this
all_tracks = 0x07, // rotations | translations | scales
};
ACL_IMPL_ENUM_FLAGS_OPERATORS(range_reduction_flags8)
//////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Returns a string of the algorithm name suitable for display.
// TODO: constexpr
inline const char* get_range_reduction_name(range_reduction_flags8 flags)
{
// Some compilers have trouble with constexpr operator| with enums in a case switch statement
if (flags == range_reduction_flags8::none)
return "range_reduction::none";
else if (flags == range_reduction_flags8::rotations)
return "range_reduction::rotations";
else if (flags == range_reduction_flags8::translations)
return "range_reduction::translations";
else if (flags == range_reduction_flags8::scales)
return "range_reduction::scales";
else if (flags == (range_reduction_flags8::rotations | range_reduction_flags8::translations))
return "range_reduction::rotations | range_reduction::translations";
else if (flags == (range_reduction_flags8::rotations | range_reduction_flags8::scales))
return "range_reduction::rotations | range_reduction::scales";
else if (flags == (range_reduction_flags8::translations | range_reduction_flags8::scales))
return "range_reduction::translations | range_reduction::scales";
else if (flags == (range_reduction_flags8::rotations | range_reduction_flags8::translations | range_reduction_flags8::scales))
return "range_reduction::rotations | range_reduction::translations | range_reduction::scales";
else
return "<Invalid>";
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,95 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include <chrono>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
////////////////////////////////////////////////////////////////////////////////
// A scope activated profiler.
////////////////////////////////////////////////////////////////////////////////
class scope_profiler
{
public:
////////////////////////////////////////////////////////////////////////////////
// Creates and starts a scope profiler and automatically starts it.
scope_profiler();
////////////////////////////////////////////////////////////////////////////////
// Destroys a scope profiler and automatically stops it.
~scope_profiler() = default;
////////////////////////////////////////////////////////////////////////////////
// Manually stops the profiler.
void stop();
////////////////////////////////////////////////////////////////////////////////
// Returns the elapsed time in nanoseconds since the profiler was started.
std::chrono::nanoseconds get_elapsed_time() const { return std::chrono::duration_cast<std::chrono::nanoseconds>(m_end_time - m_start_time); }
////////////////////////////////////////////////////////////////////////////////
// Returns the elapsed time in microseconds since the profiler was started.
double get_elapsed_microseconds() const { return std::chrono::duration<double, std::chrono::microseconds::period>(get_elapsed_time()).count(); }
////////////////////////////////////////////////////////////////////////////////
// Returns the elapsed time in milliseconds since the profiler was started.
double get_elapsed_milliseconds() const { return std::chrono::duration<double, std::chrono::milliseconds::period>(get_elapsed_time()).count(); }
////////////////////////////////////////////////////////////////////////////////
// Returns the elapsed time in seconds since the profiler was started.
double get_elapsed_seconds() const { return std::chrono::duration<double, std::chrono::seconds::period>(get_elapsed_time()).count(); }
private:
scope_profiler(const scope_profiler&) = delete;
scope_profiler& operator=(const scope_profiler&) = delete;
// The time at which the profiler started.
std::chrono::time_point<std::chrono::high_resolution_clock> m_start_time;
// The time at which the profiler stopped.
std::chrono::time_point<std::chrono::high_resolution_clock> m_end_time;
};
//////////////////////////////////////////////////////////////////////////
inline scope_profiler::scope_profiler()
{
m_start_time = m_end_time = std::chrono::high_resolution_clock::now();
}
inline void scope_profiler::stop()
{
if (m_start_time == m_end_time)
m_end_time = std::chrono::high_resolution_clock::now();
}
}
ACL_IMPL_FILE_PRAGMA_POP

150
sources/acl/core/string.h Normal file
View File

@ -0,0 +1,150 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/iallocator.h"
#include "acl/core/error.h"
#include <memory>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// A basic string class that uses our custom allocator.
// It is used exclusively for debug purposes.
//
// Strings are immutable.
//////////////////////////////////////////////////////////////////////////
class string
{
public:
string() noexcept : m_allocator(nullptr), m_c_str(nullptr) {}
string(iallocator& allocator, const char* c_str, size_t length)
: m_allocator(&allocator)
{
if (length > 0)
{
#if defined(ACL_HAS_ASSERT_CHECKS) && !defined(NDEBUG)
for (size_t i = 0; i < length; ++i)
ACL_ASSERT(c_str[i] != '\0', "String cannot contain NULL terminators");
#endif
m_c_str = allocate_type_array<char>(allocator, length + 1);
std::memcpy(m_c_str, c_str, length);
m_c_str[length] = '\0';
}
else
{
m_c_str = nullptr;
}
}
string(iallocator& allocator, const char* c_str)
: string(allocator, c_str, c_str != nullptr ? std::strlen(c_str) : 0)
{}
string(iallocator& allocator, const string& str)
: string(allocator, str.c_str(), str.size())
{}
~string()
{
if (m_c_str != nullptr)
deallocate_type_array(*m_allocator, m_c_str, std::strlen(m_c_str) + 1);
}
string(string&& other) noexcept
: m_allocator(other.m_allocator)
, m_c_str(other.m_c_str)
{
new(&other) string();
}
string& operator=(string&& other) noexcept
{
std::swap(m_allocator, other.m_allocator);
std::swap(m_c_str, other.m_c_str);
return *this;
}
bool operator==(const char* c_str) const noexcept
{
const size_t this_length = m_c_str == nullptr ? 0 : std::strlen(m_c_str);
const size_t other_length = c_str == nullptr ? 0 : std::strlen(c_str);
if (this_length != other_length)
return false;
if (this_length == 0)
return true;
return std::memcmp(m_c_str, c_str, other_length) == 0;
}
bool operator!=(const char* c_str) const noexcept { return !(*this == c_str); }
bool operator==(const string& other) const noexcept { return (*this == other.c_str()); }
bool operator!=(const string& other) const noexcept { return !(*this == other.c_str()); }
//////////////////////////////////////////////////////////////////////////
// Returns a pointer to the allocator instance or nullptr if there is none present.
iallocator* get_allocator() const noexcept { return m_allocator; }
//////////////////////////////////////////////////////////////////////////
// Returns a copy of the current string.
// Explicit instead of using the assignment operator or copy constructor
string get_copy() const
{
if (m_c_str == nullptr)
return string();
return string(*m_allocator, m_c_str, std::strlen(m_c_str));
}
//////////////////////////////////////////////////////////////////////////
// Returns a copy of the current string.
// Explicit instead of using the assignment operator or copy constructor
string get_copy(iallocator& allocator) const
{
if (m_c_str == nullptr)
return string();
return string(allocator, m_c_str, std::strlen(m_c_str));
}
const char* c_str() const noexcept { return m_c_str != nullptr ? m_c_str : ""; }
size_t size() const noexcept { return m_c_str != nullptr ? std::strlen(m_c_str) : 0; }
bool empty() const noexcept { return m_c_str != nullptr ? (std::strlen(m_c_str) == 0) : true; }
private:
iallocator* m_allocator;
char* m_c_str;
};
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,140 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2020 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/error_result.h"
#include "acl/core/track_types.h"
#include "acl/core/impl/compiler_utils.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// This structure describes the various settings for floating point scalar tracks.
// Used by: float1f, float2f, float3f, float4f, vector4f
struct track_desc_scalarf
{
//////////////////////////////////////////////////////////////////////////
// The track category for this description.
static constexpr track_category8 category = track_category8::scalarf;
//////////////////////////////////////////////////////////////////////////
// The track output index. When writing out the compressed data stream, this index
// will be used instead of the track index. This allows custom reordering for things
// like LOD sorting or skeleton remapping. A value of 'k_invalid_track_index' will strip the track
// from the compressed data stream. Output indices must be unique and contiguous.
uint32_t output_index = k_invalid_track_index;
//////////////////////////////////////////////////////////////////////////
// The per component precision threshold to try and attain when optimizing the bit rate.
// If the error is below the precision threshold, we will remove bits until we reach it without
// exceeding it. If the error is above the precision threshold, we will add more bits until
// we lower it underneath.
// Defaults to '0.00001'
float precision = 0.00001F;
//////////////////////////////////////////////////////////////////////////
// Returns whether a scalar track description is valid or not.
// It is valid if:
// - The precision is positive or zero and finite
error_result is_valid() const;
};
//////////////////////////////////////////////////////////////////////////
// This structure describes the various settings for transform tracks.
// Used by: quatf, qvvf
struct track_desc_transformf
{
//////////////////////////////////////////////////////////////////////////
// The track category for this description.
static constexpr track_category8 category = track_category8::transformf;
//////////////////////////////////////////////////////////////////////////
// The track output index. When writing out the compressed data stream, this index
// will be used instead of the track index. This allows custom reordering for things
// like LOD sorting or skeleton remapping. A value of 'k_invalid_track_index' will strip the track
// from the compressed data stream. Output indices must be unique and contiguous.
uint32_t output_index = k_invalid_track_index;
//////////////////////////////////////////////////////////////////////////
// The index of the parent transform track or `k_invalid_track_index` if it has no parent.
uint32_t parent_index = k_invalid_track_index;
//////////////////////////////////////////////////////////////////////////
// The shell precision threshold to try and attain when optimizing the bit rate.
// If the error is below the precision threshold, we will remove bits until we reach it without
// exceeding it. If the error is above the precision threshold, we will add more bits until
// we lower it underneath.
// Note that you will need to change this value if your units are not in centimeters.
// Defaults to '0.01' centimeters
float precision = 0.01F;
//////////////////////////////////////////////////////////////////////////
// The error is measured on a rigidly deformed shell around every transform at the specified distance.
// Defaults to '3.0' centimeters
float shell_distance = 3.0F;
//////////////////////////////////////////////////////////////////////////
// TODO: Use the precision and shell distance?
//////////////////////////////////////////////////////////////////////////
// Threshold angle when detecting if rotation tracks are constant or default.
// See the rtm::quatf quat_near_identity for details about how the default threshold
// was chosen. You will typically NEVER need to change this, the value has been
// selected to be as safe as possible and is independent of game engine units.
// Defaults to '0.00284714461' radians
float constant_rotation_threshold_angle = 0.00284714461F;
//////////////////////////////////////////////////////////////////////////
// Threshold value to use when detecting if translation tracks are constant or default.
// Note that you will need to change this value if your units are not in centimeters.
// Defaults to '0.001' centimeters.
float constant_translation_threshold = 0.001F;
//////////////////////////////////////////////////////////////////////////
// Threshold value to use when detecting if scale tracks are constant or default.
// There are no units for scale as such a value that was deemed safe was selected
// as a default.
// Defaults to '0.00001'
float constant_scale_threshold = 0.00001F;
//////////////////////////////////////////////////////////////////////////
// Returns whether a transform track description is valid or not.
// It is valid if:
// - The precision is positive or zero and finite
// - The shell distance is positive or zero and finite
// - The constant rotation threshold angle is positive or zero and finite
// - The constant translation threshold is positive or zero and finite
// - The constant scale threshold is positive or zero and finite
error_result is_valid() const;
};
}
#include "acl/core/impl/track_desc.impl.h"
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,113 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2020 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
// BE CAREFUL WHEN CHANGING VALUES IN THIS ENUM
// The rotation format is serialized in the compressed data, if you change a value
// the compressed clips will be invalid. If you do, bump the appropriate algorithm versions.
enum class rotation_format8 : uint8_t
{
quatf_full = 0, // Full precision quaternion, [x,y,z,w] stored with float32
//quatf_variable = 1, // TODO Quantized quaternion, [x,y,z,w] stored with [N,N,N,N] bits (same number of bits per component)
quatf_drop_w_full = 2, // Full precision quaternion, [x,y,z] stored with float32 (w is dropped)
quatf_drop_w_variable = 3, // Quantized quaternion, [x,y,z] stored with [N,N,N] bits (w is dropped, same number of bits per component)
//quatf_optimal = 15, // Mix of quatf_variable and quatf_drop_w_variable
// TODO: Implement these?
//quatf_drop_largest_full // Full precision quaternion, [a,b,c] stored with float32 (largest component dropped)
//quatf_drop_largest_variable // Quantized quaternion, [a,b,c] stored with [N,N,N] bits (largest component dropped, same number of bits per component)
//quatf_log_full, // Full precision quaternion logarithm, [x,y,z] stored with float32
//quatf_log_variable, // Quantized quaternion logarithm, [x,y,z] stored with [N,N,N] bits (same number of bits per component)
};
// BE CAREFUL WHEN CHANGING VALUES IN THIS ENUM
// The vector format is serialized in the compressed data, if you change a value
// the compressed clips will be invalid. If you do, bump the appropriate algorithm versions.
enum class vector_format8 : uint8_t
{
vector3f_full = 0, // Full precision vector3f, [x,y,z] stored with float32
vector3f_variable = 1, // Quantized vector3f, [x,y,z] stored with [N,N,N] bits (same number of bits per component)
};
union track_format8
{
rotation_format8 rotation;
vector_format8 vector;
track_format8() {}
explicit track_format8(rotation_format8 format) : rotation(format) {}
explicit track_format8(vector_format8 format) : vector(format) {}
};
enum class animation_track_type8 : uint8_t
{
rotation,
translation,
scale,
};
enum class rotation_variant8 : uint8_t
{
quat,
quat_drop_w,
//quat_drop_largest,
//quat_log,
};
//////////////////////////////////////////////////////////////////////////
const char* get_rotation_format_name(rotation_format8 format);
bool get_rotation_format(const char* format, rotation_format8& out_format);
constexpr const char* get_vector_format_name(vector_format8 format);
bool get_vector_format(const char* format, vector_format8& out_format);
constexpr rotation_variant8 get_rotation_variant(rotation_format8 rotation_format);
constexpr rotation_format8 get_highest_variant_precision(rotation_variant8 variant);
constexpr bool is_rotation_format_variable(rotation_format8 format);
constexpr bool is_rotation_format_full_precision(rotation_format8 format);
constexpr bool is_vector_format_variable(vector_format8 format);
constexpr bool is_vector_format_full_precision(vector_format8 format);
}
#include "acl/core/impl/track_formats.impl.h"
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,107 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include "acl/core/track_desc.h"
#include "acl/core/track_types.h"
#include <rtm/types.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// Type tracks for tracks.
// Each trait contains:
// - The category of the track
// - The type of each sample in the track
// - The type of the track description
//////////////////////////////////////////////////////////////////////////
template<track_type8 track_type>
struct track_traits {};
//////////////////////////////////////////////////////////////////////////
// Specializations for each track type.
template<>
struct track_traits<track_type8::float1f>
{
static constexpr track_category8 category = track_category8::scalarf;
using sample_type = float;
using desc_type = track_desc_scalarf;
};
template<>
struct track_traits<track_type8::float2f>
{
static constexpr track_category8 category = track_category8::scalarf;
using sample_type = rtm::float2f;
using desc_type = track_desc_scalarf;
};
template<>
struct track_traits<track_type8::float3f>
{
static constexpr track_category8 category = track_category8::scalarf;
using sample_type = rtm::float3f;
using desc_type = track_desc_scalarf;
};
template<>
struct track_traits<track_type8::float4f>
{
static constexpr track_category8 category = track_category8::scalarf;
using sample_type = rtm::float4f;
using desc_type = track_desc_scalarf;
};
template<>
struct track_traits<track_type8::vector4f>
{
static constexpr track_category8 category = track_category8::scalarf;
using sample_type = rtm::vector4f;
using desc_type = track_desc_scalarf;
};
template<>
struct track_traits<track_type8::qvvf>
{
static constexpr track_category8 category = track_category8::transformf;
using sample_type = rtm::qvvf;
using desc_type = track_desc_transformf;
};
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,215 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2017 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/error_result.h"
#include "acl/core/memory_utils.h"
#include "acl/core/impl/compiler_utils.h"
#include <rtm/scalarf.h>
#include <cstdint>
#include <cstring>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// We only support up to 4294967295 tracks. We reserve 4294967295 for the invalid index
constexpr uint32_t k_invalid_track_index = 0xFFFFFFFFU;
//////////////////////////////////////////////////////////////////////////
// The various supported track types.
// Note: be careful when changing values here as they might be serialized.
enum class track_type8 : uint8_t
{
float1f = 0,
float2f = 1,
float3f = 2,
float4f = 3,
vector4f = 4,
//float1d = 5,
//float2d = 6,
//float3d = 7,
//float4d = 8,
//vector4d = 9,
//quatf = 10,
//quatd = 11,
qvvf = 12,
//qvvd = 13,
//int1i = 14,
//int2i = 15,
//int3i = 16,
//int4i = 17,
//vector4i = 18,
//int1q = 19,
//int2q = 20,
//int3q = 21,
//int4q = 22,
//vector4q = 23,
};
//////////////////////////////////////////////////////////////////////////
// The categories of track types.
enum class track_category8 : uint8_t
{
scalarf = 0,
scalard = 1,
//scalari = 2,
//scalarq = 3,
transformf = 4,
transformd = 5,
};
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
// Returns the string representation for the provided track type.
// TODO: constexpr
inline const char* get_track_type_name(track_type8 type)
{
switch (type)
{
case track_type8::float1f: return "float1f";
case track_type8::float2f: return "float2f";
case track_type8::float3f: return "float3f";
case track_type8::float4f: return "float4f";
case track_type8::vector4f: return "vector4f";
case track_type8::qvvf: return "qvvf";
default: return "<Invalid>";
}
}
//////////////////////////////////////////////////////////////////////////
// Returns the track type from its string representation.
// Returns true on success, false otherwise.
inline bool get_track_type(const char* type, track_type8& out_type)
{
// Entries in the same order as the enum integral value
static const char* k_track_type_names[] =
{
"float1f",
"float2f",
"float3f",
"float4f",
"vector4f",
"float1d",
"float2d",
"float3d",
"float4d",
"vector4d",
"quatf",
"quatd",
"qvvf",
};
static_assert(get_array_size(k_track_type_names) == (size_t)track_type8::qvvf + 1, "Unexpected array size");
for (size_t type_index = 0; type_index < get_array_size(k_track_type_names); ++type_index)
{
const char* type_name = k_track_type_names[type_index];
if (std::strncmp(type, type_name, std::strlen(type_name)) == 0)
{
out_type = safe_static_cast<track_type8>(type_index);
return true;
}
}
return false;
}
//////////////////////////////////////////////////////////////////////////
// Returns the track category for the provided track type.
inline track_category8 get_track_category(track_type8 type)
{
// Entries in the same order as the enum integral value
static constexpr track_category8 k_track_type_to_category[]
{
track_category8::scalarf, // float1f
track_category8::scalarf, // float2f
track_category8::scalarf, // float3f
track_category8::scalarf, // float4f
track_category8::scalarf, // vector4f
track_category8::scalard, // float1d
track_category8::scalard, // float2d
track_category8::scalard, // float3d
track_category8::scalard, // float4d
track_category8::scalard, // vector4d
track_category8::transformf, // quatf
track_category8::transformd, // quatd
track_category8::transformf, // qvvf
};
static_assert(get_array_size(k_track_type_to_category) == (size_t)track_type8::qvvf + 1, "Unexpected array size");
ACL_ASSERT(type <= track_type8::qvvf, "Unexpected track type");
return type <= track_type8::qvvf ? k_track_type_to_category[static_cast<uint32_t>(type)] : track_category8::scalarf;
}
//////////////////////////////////////////////////////////////////////////
// Returns the num of elements within a sample for the provided track type.
inline uint32_t get_track_num_sample_elements(track_type8 type)
{
// Entries in the same order as the enum integral value
static constexpr uint32_t k_track_type_to_num_elements[]
{
1, // float1f
2, // float2f
3, // float3f
4, // float4f
4, // vector4f
1, // float1d
2, // float2d
3, // float3d
4, // float4d
4, // vector4d
4, // quatf
4, // quatd
12, // qvvf
};
static_assert(get_array_size(k_track_type_to_num_elements) == (size_t)track_type8::qvvf + 1, "Unexpected array size");
ACL_ASSERT(type <= track_type8::qvvf, "Unexpected track type");
return type <= track_type8::qvvf ? k_track_type_to_num_elements[static_cast<uint32_t>(type)] : 0;
}
}
ACL_IMPL_FILE_PRAGMA_POP

View File

@ -0,0 +1,133 @@
#pragma once
////////////////////////////////////////////////////////////////////////////////
// The MIT License (MIT)
//
// Copyright (c) 2019 Nicholas Frechette & Animation Compression Library contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
////////////////////////////////////////////////////////////////////////////////
#include "acl/core/impl/compiler_utils.h"
#include <rtm/types.h>
#include <cstdint>
ACL_IMPL_FILE_PRAGMA_PUSH
namespace acl
{
//////////////////////////////////////////////////////////////////////////
// We use a struct like this to allow an arbitrary format on the end user side.
// Since our decode function is templated on this type implemented by the user,
// the callbacks can trivially be inlined and customized.
// Only called functions need to be overridden and implemented.
//////////////////////////////////////////////////////////////////////////
struct track_writer
{
//////////////////////////////////////////////////////////////////////////
// Scalar track writing
//////////////////////////////////////////////////////////////////////////
// Called by the decoder to write out a value for a specified track index.
void RTM_SIMD_CALL write_float1(uint32_t track_index, rtm::scalarf_arg0 value)
{
(void)track_index;
(void)value;
}
//////////////////////////////////////////////////////////////////////////
// Called by the decoder to write out a value for a specified track index.
void RTM_SIMD_CALL write_float2(uint32_t track_index, rtm::vector4f_arg0 value)
{
(void)track_index;
(void)value;
}
//////////////////////////////////////////////////////////////////////////
// Called by the decoder to write out a value for a specified track index.
void RTM_SIMD_CALL write_float3(uint32_t track_index, rtm::vector4f_arg0 value)
{
(void)track_index;
(void)value;
}
//////////////////////////////////////////////////////////////////////////
// Called by the decoder to write out a value for a specified track index.
void RTM_SIMD_CALL write_float4(uint32_t track_index, rtm::vector4f_arg0 value)
{
(void)track_index;
(void)value;
}
//////////////////////////////////////////////////////////////////////////
// Called by the decoder to write out a value for a specified track index.
void RTM_SIMD_CALL write_vector4(uint32_t track_index, rtm::vector4f_arg0 value)
{
(void)track_index;
(void)value;
}
//////////////////////////////////////////////////////////////////////////
// Transform track writing
//////////////////////////////////////////////////////////////////////////
// These allow the caller of decompress_pose to control which track types they are interested in.
// This information allows the codecs to avoid unpacking values that are not needed.
// Must be static constexpr!
static constexpr bool skip_all_rotations() { return false; }
static constexpr bool skip_all_translations() { return false; }
static constexpr bool skip_all_scales() { return false; }
//////////////////////////////////////////////////////////////////////////
// These allow the caller of decompress_pose to control which tracks they are interested in.
// This information allows the codecs to avoid unpacking values that are not needed.
// Must be non-static member functions!
constexpr bool skip_track_rotation(uint32_t /*track_index*/) const { return false; }
constexpr bool skip_track_translation(uint32_t /*track_index*/) const { return false; }
constexpr bool skip_track_scale(uint32_t /*track_index*/) const { return false; }
//////////////////////////////////////////////////////////////////////////
// Called by the decoder to write out a quaternion rotation value for a specified bone index.
void RTM_SIMD_CALL write_rotation(uint32_t track_index, rtm::quatf_arg0 rotation)
{
(void)track_index;
(void)rotation;
}
//////////////////////////////////////////////////////////////////////////
// Called by the decoder to write out a translation value for a specified bone index.
void RTM_SIMD_CALL write_translation(uint32_t track_index, rtm::vector4f_arg0 translation)
{
(void)track_index;
(void)translation;
}
//////////////////////////////////////////////////////////////////////////
// Called by the decoder to write out a scale value for a specified bone index.
void RTM_SIMD_CALL write_scale(uint32_t track_index, rtm::vector4f_arg0 scale)
{
(void)track_index;
(void)scale;
}
};
}
ACL_IMPL_FILE_PRAGMA_POP

Some files were not shown because too many files have changed in this diff Show More