summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/Benau
diff options
context:
space:
mode:
authorBenau <Benau@users.noreply.github.com>2021-08-25 04:32:50 +0800
committerGitHub <noreply@github.com>2021-08-24 22:32:50 +0200
commit53cafa9f3d0c8be33821fc7338b1da97e91d9cc6 (patch)
tree964a225219099a1a1c282e27913767da588191b4 /vendor/github.com/Benau
parentd4195deb3a6305c49c50ff30e8af978c7f1bdd92 (diff)
downloadmatterbridge-msglm-53cafa9f3d0c8be33821fc7338b1da97e91d9cc6.tar.gz
matterbridge-msglm-53cafa9f3d0c8be33821fc7338b1da97e91d9cc6.tar.bz2
matterbridge-msglm-53cafa9f3d0c8be33821fc7338b1da97e91d9cc6.zip
Convert .tgs with go libraries (and cgo) (telegram) (#1569)
This commit adds support for go/cgo tgs conversion when building with the -tags `cgo` The default binaries are still "pure" go and uses the old way of converting. * Move lottie_convert.py conversion code to its own file * Add optional libtgsconverter * Update vendor * Apply suggestions from code review * Update bridge/helper/libtgsconverter.go Co-authored-by: Wim <wim@42.be>
Diffstat (limited to 'vendor/github.com/Benau')
-rw-r--r--vendor/github.com/Benau/go_rlottie/LICENSE24
-rw-r--r--vendor/github.com/Benau/go_rlottie/README.md1
-rw-r--r--vendor/github.com/Benau/go_rlottie/binding_c_lottieanimation_capi.cpp284
-rw-r--r--vendor/github.com/Benau/go_rlottie/config.h10
-rw-r--r--vendor/github.com/Benau/go_rlottie/generate_from_rlottie.py122
-rw-r--r--vendor/github.com/Benau/go_rlottie/go.mod1
-rw-r--r--vendor/github.com/Benau/go_rlottie/go_rlottie.go56
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_lottieanimation.cpp457
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_lottiefiltermodel.h435
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_lottieitem.cpp1491
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_lottieitem.h626
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_lottieitem_capi.cpp339
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_lottiekeypath.cpp86
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_lottiekeypath.h53
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_lottieloader.cpp169
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_lottiemodel.cpp390
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_lottiemodel.h1148
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_lottieparser.cpp2390
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_lottieproxymodel.cpp0
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_allocators.h284
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_cursorstreamwrapper.h78
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_document.h2732
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_encodedstream.h299
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_encodings.h716
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_error_en.h74
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_error_error.h161
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_filereadstream.h99
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_filewritestream.h104
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_fwd.h151
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_biginteger.h290
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_clzll.h71
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_diyfp.h257
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_dtoa.h245
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_ieee754.h78
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_itoa.h308
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_meta.h186
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_pow10.h55
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_regex.h739
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_stack.h232
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_strfunc.h69
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_strtod.h290
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_swap.h46
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_istreamwrapper.h128
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_memorybuffer.h70
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_memorystream.h71
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_msinttypes_inttypes.h316
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_msinttypes_stdint.h300
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_ostreamwrapper.h81
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_pointer.h1415
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_prettywriter.h277
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_rapidjson.h692
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_reader.h2244
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_schema.h2496
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_stream.h223
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_stringbuffer.h121
-rw-r--r--vendor/github.com/Benau/go_rlottie/lottie_rapidjson_writer.h710
-rw-r--r--vendor/github.com/Benau/go_rlottie/rlottie.h525
-rw-r--r--vendor/github.com/Benau/go_rlottie/rlottie_capi.h299
-rw-r--r--vendor/github.com/Benau/go_rlottie/rlottiecommon.h231
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_math.cpp461
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_math.h438
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_raster.cpp1423
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_raster.h607
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_stroker.cpp1936
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_stroker.h319
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_types.h160
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_pixman_pixman-arm-neon-asm.S500
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_pixman_pixman-arm-neon-asm.h1126
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_stb_stb_image.cpp59
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_stb_stb_image.h7509
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_varenaalloc.cpp166
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_varenaalloc.h232
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vbezier.cpp135
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vbezier.h139
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vbitmap.cpp219
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vbitmap.h94
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vbrush.cpp69
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vbrush.h92
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vcowptr.h126
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vdasher.cpp254
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vdasher.h65
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vdebug.cpp758
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vdebug.h187
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vdrawable.cpp130
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vdrawable.h93
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vdrawhelper.cpp767
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vdrawhelper.h270
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_common.cpp190
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_neon.cpp34
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_sse2.cpp261
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_velapsedtimer.cpp50
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_velapsedtimer.h41
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vglobal.h302
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vimageloader.cpp220
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vimageloader.h26
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vinterpolator.cpp124
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vinterpolator.h88
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vline.h97
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vmatrix.cpp684
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vmatrix.h116
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vpainter.cpp172
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vpainter.h60
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vpath.cpp709
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vpath.h285
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vpathmesure.cpp67
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vpathmesure.h44
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vpoint.h210
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vraster.cpp563
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vraster.h50
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vrect.cpp68
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vrect.h172
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vrle.cpp748
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vrle.h121
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vsharedptr.h123
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vstackallocator.h156
-rw-r--r--vendor/github.com/Benau/go_rlottie/vector_vtaskqueue.h87
-rw-r--r--vendor/github.com/Benau/tgsconverter/LICENSE24
-rw-r--r--vendor/github.com/Benau/tgsconverter/libtgsconverter/apng.go51
-rw-r--r--vendor/github.com/Benau/tgsconverter/libtgsconverter/gif.go81
-rw-r--r--vendor/github.com/Benau/tgsconverter/libtgsconverter/imagewriter.go40
-rw-r--r--vendor/github.com/Benau/tgsconverter/libtgsconverter/lib.go160
-rw-r--r--vendor/github.com/Benau/tgsconverter/libtgsconverter/png.go30
-rw-r--r--vendor/github.com/Benau/tgsconverter/libtgsconverter/quantize_bucket.go119
-rw-r--r--vendor/github.com/Benau/tgsconverter/libtgsconverter/quantize_mediancut.go209
-rw-r--r--vendor/github.com/Benau/tgsconverter/libtgsconverter/webp.go39
125 files changed, 50800 insertions, 0 deletions
diff --git a/vendor/github.com/Benau/go_rlottie/LICENSE b/vendor/github.com/Benau/go_rlottie/LICENSE
new file mode 100644
index 00000000..86fd0417
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/LICENSE
@@ -0,0 +1,24 @@
+The MIT License
+
+Copyright (c) 2021, (see AUTHORS)
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/Benau/go_rlottie/README.md b/vendor/github.com/Benau/go_rlottie/README.md
new file mode 100644
index 00000000..68fbb245
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/README.md
@@ -0,0 +1 @@
+Go binding for https://github.com/Samsung/rlottie, example at https://github.com/Benau/tgsconverter
diff --git a/vendor/github.com/Benau/go_rlottie/binding_c_lottieanimation_capi.cpp b/vendor/github.com/Benau/go_rlottie/binding_c_lottieanimation_capi.cpp
new file mode 100644
index 00000000..9dc851e9
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/binding_c_lottieanimation_capi.cpp
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rlottie.h"
+#include "rlottie_capi.h"
+#include "vector_vdebug.h"
+
+using namespace rlottie;
+
+extern "C" {
+#include <string.h>
+#include <stdarg.h>
+
+struct Lottie_Animation_S
+{
+ std::unique_ptr<Animation> mAnimation;
+ std::future<Surface> mRenderTask;
+ uint32_t *mBufferRef;
+ LOTMarkerList *mMarkerList;
+};
+
+RLOTTIE_API Lottie_Animation_S *lottie_animation_from_file(const char *path)
+{
+ if (auto animation = Animation::loadFromFile(path) ) {
+ Lottie_Animation_S *handle = new Lottie_Animation_S();
+ handle->mAnimation = std::move(animation);
+ return handle;
+ } else {
+ return nullptr;
+ }
+}
+
+RLOTTIE_API Lottie_Animation_S *lottie_animation_from_data(const char *data, const char *key, const char *resourcePath)
+{
+ if (auto animation = Animation::loadFromData(data, key, resourcePath) ) {
+ Lottie_Animation_S *handle = new Lottie_Animation_S();
+ handle->mAnimation = std::move(animation);
+ return handle;
+ } else {
+ return nullptr;
+ }
+}
+
+RLOTTIE_API void lottie_animation_destroy(Lottie_Animation_S *animation)
+{
+ if (animation) {
+ if (animation->mMarkerList) {
+ for(size_t i = 0; i < animation->mMarkerList->size; i++) {
+ if (animation->mMarkerList->ptr[i].name) free(animation->mMarkerList->ptr[i].name);
+ }
+ delete[] animation->mMarkerList->ptr;
+ delete animation->mMarkerList;
+ }
+
+ if (animation->mRenderTask.valid()) {
+ animation->mRenderTask.get();
+ }
+ animation->mAnimation = nullptr;
+ delete animation;
+ }
+}
+
+RLOTTIE_API void lottie_animation_get_size(const Lottie_Animation_S *animation, size_t *width, size_t *height)
+{
+ if (!animation) return;
+
+ animation->mAnimation->size(*width, *height);
+}
+
+RLOTTIE_API double lottie_animation_get_duration(const Lottie_Animation_S *animation)
+{
+ if (!animation) return 0;
+
+ return animation->mAnimation->duration();
+}
+
+RLOTTIE_API size_t lottie_animation_get_totalframe(const Lottie_Animation_S *animation)
+{
+ if (!animation) return 0;
+
+ return animation->mAnimation->totalFrame();
+}
+
+
+RLOTTIE_API double lottie_animation_get_framerate(const Lottie_Animation_S *animation)
+{
+ if (!animation) return 0;
+
+ return animation->mAnimation->frameRate();
+}
+
+RLOTTIE_API const LOTLayerNode * lottie_animation_render_tree(Lottie_Animation_S *animation, size_t frame_num, size_t width, size_t height)
+{
+ if (!animation) return nullptr;
+
+ return animation->mAnimation->renderTree(frame_num, width, height);
+}
+
+RLOTTIE_API size_t
+lottie_animation_get_frame_at_pos(const Lottie_Animation_S *animation, float pos)
+{
+ if (!animation) return 0;
+
+ return animation->mAnimation->frameAtPos(pos);
+}
+
+RLOTTIE_API void
+lottie_animation_render(Lottie_Animation_S *animation,
+ size_t frame_number,
+ uint32_t *buffer,
+ size_t width,
+ size_t height,
+ size_t bytes_per_line)
+{
+ if (!animation) return;
+
+ rlottie::Surface surface(buffer, width, height, bytes_per_line);
+ animation->mAnimation->renderSync(frame_number, surface);
+}
+
+RLOTTIE_API void
+lottie_animation_render_async(Lottie_Animation_S *animation,
+ size_t frame_number,
+ uint32_t *buffer,
+ size_t width,
+ size_t height,
+ size_t bytes_per_line)
+{
+ if (!animation) return;
+
+ rlottie::Surface surface(buffer, width, height, bytes_per_line);
+ animation->mRenderTask = animation->mAnimation->render(frame_number, surface);
+ animation->mBufferRef = buffer;
+}
+
+RLOTTIE_API uint32_t *
+lottie_animation_render_flush(Lottie_Animation_S *animation)
+{
+ if (!animation) return nullptr;
+
+ if (animation->mRenderTask.valid()) {
+ animation->mRenderTask.get();
+ }
+
+ return animation->mBufferRef;
+}
+
+RLOTTIE_API void
+lottie_animation_property_override(Lottie_Animation_S *animation,
+ const Lottie_Animation_Property type,
+ const char *keypath,
+ ...)
+{
+ va_list prop;
+ va_start(prop, keypath);
+ const int arg_count = [type](){
+ switch (type) {
+ case LOTTIE_ANIMATION_PROPERTY_FILLCOLOR:
+ case LOTTIE_ANIMATION_PROPERTY_STROKECOLOR:
+ return 3;
+ case LOTTIE_ANIMATION_PROPERTY_FILLOPACITY:
+ case LOTTIE_ANIMATION_PROPERTY_STROKEOPACITY:
+ case LOTTIE_ANIMATION_PROPERTY_STROKEWIDTH:
+ case LOTTIE_ANIMATION_PROPERTY_TR_ROTATION:
+ return 1;
+ case LOTTIE_ANIMATION_PROPERTY_TR_POSITION:
+ case LOTTIE_ANIMATION_PROPERTY_TR_SCALE:
+ return 2;
+ default:
+ return 0;
+ }
+ }();
+ double v[3] = {0};
+ for (int i = 0; i < arg_count ; i++) {
+ v[i] = va_arg(prop, double);
+ }
+ va_end(prop);
+
+ switch(type) {
+ case LOTTIE_ANIMATION_PROPERTY_FILLCOLOR: {
+ double r = v[0];
+ double g = v[1];
+ double b = v[2];
+ if (r > 1 || r < 0 || g > 1 || g < 0 || b > 1 || b < 0) break;
+ animation->mAnimation->setValue<rlottie::Property::FillColor>(keypath, rlottie::Color(r, g, b));
+ break;
+ }
+ case LOTTIE_ANIMATION_PROPERTY_FILLOPACITY: {
+ double opacity = v[0];
+ if (opacity > 100 || opacity < 0) break;
+ animation->mAnimation->setValue<rlottie::Property::FillOpacity>(keypath, (float)opacity);
+ break;
+ }
+ case LOTTIE_ANIMATION_PROPERTY_STROKECOLOR: {
+ double r = v[0];
+ double g = v[1];
+ double b = v[2];
+ if (r > 1 || r < 0 || g > 1 || g < 0 || b > 1 || b < 0) break;
+ animation->mAnimation->setValue<rlottie::Property::StrokeColor>(keypath, rlottie::Color(r, g, b));
+ break;
+ }
+ case LOTTIE_ANIMATION_PROPERTY_STROKEOPACITY: {
+ double opacity = v[0];
+ if (opacity > 100 || opacity < 0) break;
+ animation->mAnimation->setValue<rlottie::Property::StrokeOpacity>(keypath, (float)opacity);
+ break;
+ }
+ case LOTTIE_ANIMATION_PROPERTY_STROKEWIDTH: {
+ double width = v[0];
+ if (width < 0) break;
+ animation->mAnimation->setValue<rlottie::Property::StrokeWidth>(keypath, (float)width);
+ break;
+ }
+ case LOTTIE_ANIMATION_PROPERTY_TR_POSITION: {
+ double x = v[0];
+ double y = v[1];
+ animation->mAnimation->setValue<rlottie::Property::TrPosition>(keypath, rlottie::Point((float)x, (float)y));
+ break;
+ }
+ case LOTTIE_ANIMATION_PROPERTY_TR_SCALE: {
+ double w = v[0];
+ double h = v[1];
+ animation->mAnimation->setValue<rlottie::Property::TrScale>(keypath, rlottie::Size((float)w, (float)h));
+ break;
+ }
+ case LOTTIE_ANIMATION_PROPERTY_TR_ROTATION: {
+ double r = v[0];
+ animation->mAnimation->setValue<rlottie::Property::TrRotation>(keypath, (float)r);
+ break;
+ }
+ case LOTTIE_ANIMATION_PROPERTY_TR_ANCHOR:
+ case LOTTIE_ANIMATION_PROPERTY_TR_OPACITY:
+ //@TODO handle propery update.
+ break;
+ }
+}
+
+RLOTTIE_API const LOTMarkerList*
+lottie_animation_get_markerlist(Lottie_Animation_S *animation)
+{
+ if (!animation) return nullptr;
+
+ auto markers = animation->mAnimation->markers();
+ if (markers.size() == 0) return nullptr;
+ if (animation->mMarkerList) return (const LOTMarkerList*)animation->mMarkerList;
+
+ animation->mMarkerList = new LOTMarkerList();
+ animation->mMarkerList->size = markers.size();
+ animation->mMarkerList->ptr = new LOTMarker[markers.size()]();
+
+ for(size_t i = 0; i < markers.size(); i++) {
+ animation->mMarkerList->ptr[i].name = strdup(std::get<0>(markers[i]).c_str());
+ animation->mMarkerList->ptr[i].startframe= std::get<1>(markers[i]);
+ animation->mMarkerList->ptr[i].endframe= std::get<2>(markers[i]);
+ }
+ return (const LOTMarkerList*)animation->mMarkerList;
+}
+
+RLOTTIE_API void
+lottie_configure_model_cache_size(size_t cacheSize)
+{
+ rlottie::configureModelCacheSize(cacheSize);
+}
+
+}
diff --git a/vendor/github.com/Benau/go_rlottie/config.h b/vendor/github.com/Benau/go_rlottie/config.h
new file mode 100644
index 00000000..21b16ee4
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/config.h
@@ -0,0 +1,10 @@
+#ifndef GO_RLOTTIE_HPP
+#define GO_RLOTTIE_HPP
+#ifndef __APPLE__
+#ifdef __ARM_NEON__
+#define USE_ARM_NEON
+#endif
+#endif
+#define LOTTIE_THREAD_SUPPORT
+#define LOTTIE_CACHE_SUPPORT
+#endif
diff --git a/vendor/github.com/Benau/go_rlottie/generate_from_rlottie.py b/vendor/github.com/Benau/go_rlottie/generate_from_rlottie.py
new file mode 100644
index 00000000..6c756b17
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/generate_from_rlottie.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python3
+# ./generate_from_rlottie.py /path/to/clean/rlottie/src/ /path/to/clean/rlottie/inc/
+import glob
+import os
+import re
+import sys
+
+FILE_KEYS = {}
+
+def get_closest_local_header(header):
+ for full_path, local in FILE_KEYS.items():
+ if os.path.basename(full_path) == header:
+ return local
+ return ''
+
+def fix_headers(code_text):
+ out = ''
+ has_neon = False
+ for line in code_text:
+ # Special fixes
+ if line == '#include <vpoint.h>':
+ line = '#include "vpoint.h"'
+ if line == '#include <vsharedptr.h>':
+ line = '#include "vsharedptr.h"'
+ if line == '#include <vglobal.h>':
+ line = '#include "vglobal.h"'
+ if line == '#include <vrect.h>':
+ line = '#include "vrect.h"'
+ # ARM on apple fixes
+ if '__ARM_NEON__' in line:
+ has_neon = True
+ line = line.replace('__ARM_NEON__', 'USE_ARM_NEON')
+ header_file = re.match('#include\s+["]([^"]+)["].*', line)
+ # regex to search for <, > too
+ #header_file = re.match('#include\s+[<"]([^>"]+)[>"].*', line)
+ if header_file:
+ header = header_file.groups()[0]
+ abs_header = os.path.abspath(header)
+ header_exists = os.path.exists(abs_header)
+ if header_exists and abs_header in FILE_KEYS:
+ out += '#include "' + FILE_KEYS[abs_header] + '"\n'
+ else:
+ local = get_closest_local_header(header)
+ if local != '':
+ out += '#include "' + local + '"\n'
+ else:
+ out += line + '\n'
+ else:
+ out += line + '\n'
+ if has_neon:
+ out = '#include "config.h"\n' + out
+ return out
+
+if len(sys.argv) < 2:
+ print('usage: ./generate_from_rlottie.py /path/to/clean/rlottie/src/ /path/to/clean/rlottie/inc/')
+ os._exit(1)
+
+code = ['.c', '.s', '.S', '.sx', 'cc', 'cpp', 'cpp' ]
+header = ['.h', '.hh', '.hpp', '.hxx' ]
+
+# Remove old files
+files = os.listdir('.')
+for file in files:
+ if file.endswith(tuple(code)) or file.endswith(tuple(header)):
+ os.remove(os.path.join('.', file))
+
+paths = []
+it = iter(sys.argv)
+next(it, None)
+for argv in it:
+ paths.append(argv)
+
+for path in paths:
+ for file in glob.iglob(path + '/**', recursive=True):
+ # Ignore msvc config.h and wasm file
+ if file.endswith('config.h') or 'wasm' in file:
+ continue
+ if file.endswith(tuple(code)) or file.endswith(tuple(header)):
+ key = os.path.abspath(file)
+ val = file.replace(path, '').replace('/', '_')
+ FILE_KEYS[key] = val
+
+header_check = []
+for full_path, local in FILE_KEYS.items():
+ header_file = os.path.basename(full_path)
+ if header_file.endswith(tuple(code)):
+ continue
+ if not header_file in header_check:
+ header_check.append(header_file)
+ else:
+ print('WARNING: ' + header_file + ' has multiple reference in subdirectories')
+
+cur_dir = os.path.abspath('.')
+for full_path, local in FILE_KEYS.items():
+ os.chdir(os.path.dirname(full_path))
+ with open(full_path) as code:
+ code_text = code.read().splitlines()
+ code.close()
+ fixed = fix_headers(code_text)
+ os.chdir(cur_dir)
+ local_file = open(local, "w")
+ local_file.write(fixed)
+ local_file.close()
+
+# Write config.h
+config = '#ifndef GO_RLOTTIE_HPP\n#define GO_RLOTTIE_HPP\n'
+# ARM on apple won't compile
+config += '#ifndef __APPLE__\n#ifdef __ARM_NEON__\n#define USE_ARM_NEON\n#endif\n#endif\n'
+config += '#define LOTTIE_THREAD_SUPPORT\n#define LOTTIE_CACHE_SUPPORT\n'
+config += '#endif\n'
+config_file = open('config.h', "w")
+config_file.write(config)
+config_file.close()
+
+# Fix vector_pixman_pixman-arm-neon-asm.S
+with open('vector_pixman_pixman-arm-neon-asm.S') as code:
+ assembly = code.read()
+code.close()
+assembly = '#include "config.h"\n#ifdef USE_ARM_NEON\n' + assembly + '#endif\n'
+fixed_assembly = open('vector_pixman_pixman-arm-neon-asm.S', "w")
+fixed_assembly.write(assembly)
+fixed_assembly.close()
diff --git a/vendor/github.com/Benau/go_rlottie/go.mod b/vendor/github.com/Benau/go_rlottie/go.mod
new file mode 100644
index 00000000..a2af3cf5
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/go.mod
@@ -0,0 +1 @@
+module github.com/Benau/go_rlottie
diff --git a/vendor/github.com/Benau/go_rlottie/go_rlottie.go b/vendor/github.com/Benau/go_rlottie/go_rlottie.go
new file mode 100644
index 00000000..c53392e3
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/go_rlottie.go
@@ -0,0 +1,56 @@
+package go_rlottie
+
+/*
+#cgo !windows LDFLAGS: -lm
+#cgo windows CFLAGS: -DRLOTTIE_BUILD=0
+#cgo windows CXXFLAGS: -DRLOTTIE_BUILD=0
+#cgo CXXFLAGS: -std=c++14 -fno-exceptions -fno-asynchronous-unwind-tables -fno-rtti -Wall -fvisibility=hidden -Wnon-virtual-dtor -Woverloaded-virtual -Wno-unused-parameter
+#include "rlottie_capi.h"
+void lottie_configure_model_cache_size(size_t cacheSize);
+*/
+import "C"
+import "unsafe"
+
+type Lottie_Animation *C.Lottie_Animation
+
+func LottieConfigureModelCacheSize(size uint) {
+ C.lottie_configure_model_cache_size(C.size_t(size))
+}
+
+func LottieAnimationFromData(data string, key string, resource_path string) Lottie_Animation {
+ var animation Lottie_Animation
+ animation = C.lottie_animation_from_data(C.CString(data), C.CString(key), C.CString(resource_path))
+ return animation
+}
+
+func LottieAnimationDestroy(animation Lottie_Animation) {
+ C.lottie_animation_destroy(animation)
+}
+
+func LottieAnimationGetSize(animation Lottie_Animation) (uint, uint) {
+ var width C.size_t
+ var height C.size_t
+ C.lottie_animation_get_size(animation, &width, &height)
+ return uint(width), uint(height)
+}
+
+func LottieAnimationGetTotalframe(animation Lottie_Animation) uint {
+ return uint(C.lottie_animation_get_totalframe(animation))
+}
+
+func LottieAnimationGetFramerate(animation Lottie_Animation) float64 {
+ return float64(C.lottie_animation_get_framerate(animation))
+}
+
+func LottieAnimationGetFrameAtPos(animation Lottie_Animation, pos float32) uint {
+ return uint(C.lottie_animation_get_frame_at_pos(animation, C.float(pos)))
+}
+
+func LottieAnimationGetDuration(animation Lottie_Animation) float64 {
+ return float64(C.lottie_animation_get_duration(animation))
+}
+
+func LottieAnimationRender(animation Lottie_Animation, frame_num uint, buffer []byte, width uint, height uint, bytes_per_line uint) {
+ var ptr *C.uint32_t = (*C.uint32_t)(unsafe.Pointer(&buffer[0]));
+ C.lottie_animation_render(animation, C.size_t(frame_num), ptr, C.size_t(width), C.size_t(height), C.size_t(bytes_per_line))
+}
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottieanimation.cpp b/vendor/github.com/Benau/go_rlottie/lottie_lottieanimation.cpp
new file mode 100644
index 00000000..341489b3
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_lottieanimation.cpp
@@ -0,0 +1,457 @@
+#include "config.h"
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "config.h"
+#include "lottie_lottieitem.h"
+#include "lottie_lottiemodel.h"
+#include "rlottie.h"
+
+#include <fstream>
+
+using namespace rlottie;
+using namespace rlottie::internal;
+
+RLOTTIE_API void rlottie::configureModelCacheSize(size_t cacheSize)
+{
+ internal::model::configureModelCacheSize(cacheSize);
+}
+
+struct RenderTask {
+ RenderTask() { receiver = sender.get_future(); }
+ std::promise<Surface> sender;
+ std::future<Surface> receiver;
+ AnimationImpl * playerImpl{nullptr};
+ size_t frameNo{0};
+ Surface surface;
+ bool keepAspectRatio{true};
+};
+using SharedRenderTask = std::shared_ptr<RenderTask>;
+
+class AnimationImpl {
+public:
+ void init(std::shared_ptr<model::Composition> composition);
+ bool update(size_t frameNo, const VSize &size, bool keepAspectRatio);
+ VSize size() const { return mModel->size(); }
+ double duration() const { return mModel->duration(); }
+ double frameRate() const { return mModel->frameRate(); }
+ size_t totalFrame() const { return mModel->totalFrame(); }
+ size_t frameAtPos(double pos) const { return mModel->frameAtPos(pos); }
+ Surface render(size_t frameNo, const Surface &surface,
+ bool keepAspectRatio);
+ std::future<Surface> renderAsync(size_t frameNo, Surface &&surface,
+ bool keepAspectRatio);
+ const LOTLayerNode * renderTree(size_t frameNo, const VSize &size);
+
+ const LayerInfoList &layerInfoList() const
+ {
+ if (mLayerList.empty()) {
+ mLayerList = mModel->layerInfoList();
+ }
+ return mLayerList;
+ }
+ const MarkerList &markers() const { return mModel->markers(); }
+ void setValue(const std::string &keypath, LOTVariant &&value);
+ void removeFilter(const std::string &keypath, Property prop);
+
+private:
+ mutable LayerInfoList mLayerList;
+ model::Composition * mModel;
+ SharedRenderTask mTask;
+ std::atomic<bool> mRenderInProgress;
+ std::unique_ptr<renderer::Composition> mRenderer{nullptr};
+};
+
+void AnimationImpl::setValue(const std::string &keypath, LOTVariant &&value)
+{
+ if (keypath.empty()) return;
+ mRenderer->setValue(keypath, value);
+}
+
+const LOTLayerNode *AnimationImpl::renderTree(size_t frameNo, const VSize &size)
+{
+ if (update(frameNo, size, true)) {
+ mRenderer->buildRenderTree();
+ }
+ return mRenderer->renderTree();
+}
+
+bool AnimationImpl::update(size_t frameNo, const VSize &size,
+ bool keepAspectRatio)
+{
+ frameNo += mModel->startFrame();
+
+ if (frameNo > mModel->endFrame()) frameNo = mModel->endFrame();
+
+ if (frameNo < mModel->startFrame()) frameNo = mModel->startFrame();
+
+ return mRenderer->update(int(frameNo), size, keepAspectRatio);
+}
+
+Surface AnimationImpl::render(size_t frameNo, const Surface &surface,
+ bool keepAspectRatio)
+{
+ bool renderInProgress = mRenderInProgress.load();
+ if (renderInProgress) {
+ vCritical << "Already Rendering Scheduled for this Animation";
+ return surface;
+ }
+
+ mRenderInProgress.store(true);
+ update(
+ frameNo,
+ VSize(int(surface.drawRegionWidth()), int(surface.drawRegionHeight())),
+ keepAspectRatio);
+ mRenderer->render(surface);
+ mRenderInProgress.store(false);
+
+ return surface;
+}
+
+void AnimationImpl::init(std::shared_ptr<model::Composition> composition)
+{
+ mModel = composition.get();
+ mRenderer = std::make_unique<renderer::Composition>(composition);
+ mRenderInProgress = false;
+}
+
+#ifdef LOTTIE_THREAD_SUPPORT
+
+#include <thread>
+#include "vector_vtaskqueue.h"
+
+/*
+ * Implement a task stealing schduler to perform render task
+ * As each player draws into its own buffer we can delegate this
+ * task to a slave thread. The scheduler creates a threadpool depending
+ * on the number of cores available in the system and does a simple fair
+ * scheduling by assigning the task in a round-robin fashion. Each thread
+ * in the threadpool has its own queue. once it finishes all the task on its
+ * own queue it goes through rest of the queue and looks for task if it founds
+ * one it steals the task from it and executes. if it couldn't find one then it
+ * just waits for new task on its own queue.
+ */
+class RenderTaskScheduler {
+ const unsigned _count{std::thread::hardware_concurrency()};
+ std::vector<std::thread> _threads;
+ std::vector<TaskQueue<SharedRenderTask>> _q{_count};
+ std::atomic<unsigned> _index{0};
+
+ void run(unsigned i)
+ {
+ while (true) {
+ bool success = false;
+ SharedRenderTask task;
+ for (unsigned n = 0; n != _count * 2; ++n) {
+ if (_q[(i + n) % _count].try_pop(task)) {
+ success = true;
+ break;
+ }
+ }
+ if (!success && !_q[i].pop(task)) break;
+
+ auto result = task->playerImpl->render(task->frameNo, task->surface,
+ task->keepAspectRatio);
+ task->sender.set_value(result);
+ }
+ }
+
+ RenderTaskScheduler()
+ {
+ for (unsigned n = 0; n != _count; ++n) {
+ _threads.emplace_back([&, n] { run(n); });
+ }
+ }
+
+public:
+ static RenderTaskScheduler &instance()
+ {
+ static RenderTaskScheduler singleton;
+ return singleton;
+ }
+
+ ~RenderTaskScheduler()
+ {
+ for (auto &e : _q) e.done();
+
+ for (auto &e : _threads) e.join();
+ }
+
+ std::future<Surface> process(SharedRenderTask task)
+ {
+ auto receiver = std::move(task->receiver);
+ auto i = _index++;
+
+ for (unsigned n = 0; n != _count; ++n) {
+ if (_q[(i + n) % _count].try_push(std::move(task))) return receiver;
+ }
+
+ if (_count > 0) {
+ _q[i % _count].push(std::move(task));
+ }
+
+ return receiver;
+ }
+};
+
+#else
+class RenderTaskScheduler {
+public:
+ static RenderTaskScheduler &instance()
+ {
+ static RenderTaskScheduler singleton;
+ return singleton;
+ }
+
+ std::future<Surface> process(SharedRenderTask task)
+ {
+ auto result = task->playerImpl->render(task->frameNo, task->surface,
+ task->keepAspectRatio);
+ task->sender.set_value(result);
+ return std::move(task->receiver);
+ }
+};
+#endif
+
+std::future<Surface> AnimationImpl::renderAsync(size_t frameNo,
+ Surface &&surface,
+ bool keepAspectRatio)
+{
+ if (!mTask) {
+ mTask = std::make_shared<RenderTask>();
+ } else {
+ mTask->sender = std::promise<Surface>();
+ mTask->receiver = mTask->sender.get_future();
+ }
+ mTask->playerImpl = this;
+ mTask->frameNo = frameNo;
+ mTask->surface = std::move(surface);
+ mTask->keepAspectRatio = keepAspectRatio;
+
+ return RenderTaskScheduler::instance().process(mTask);
+}
+
+/**
+ * \breif Brief abput the Api.
+ * Description about the setFilePath Api
+ * @param path add the details
+ */
+std::unique_ptr<Animation> Animation::loadFromData(
+ std::string jsonData, const std::string &key,
+ const std::string &resourcePath, bool cachePolicy)
+{
+ if (jsonData.empty()) {
+ vWarning << "jason data is empty";
+ return nullptr;
+ }
+
+ auto composition = model::loadFromData(std::move(jsonData), key,
+ resourcePath, cachePolicy);
+ if (composition) {
+ auto animation = std::unique_ptr<Animation>(new Animation);
+ animation->d->init(std::move(composition));
+ return animation;
+ }
+
+ return nullptr;
+}
+
+std::unique_ptr<Animation> Animation::loadFromData(std::string jsonData,
+ std::string resourcePath,
+ ColorFilter filter)
+{
+ if (jsonData.empty()) {
+ vWarning << "jason data is empty";
+ return nullptr;
+ }
+
+ auto composition = model::loadFromData(
+ std::move(jsonData), std::move(resourcePath), std::move(filter));
+ if (composition) {
+ auto animation = std::unique_ptr<Animation>(new Animation);
+ animation->d->init(std::move(composition));
+ return animation;
+ }
+ return nullptr;
+}
+
+std::unique_ptr<Animation> Animation::loadFromFile(const std::string &path,
+ bool cachePolicy)
+{
+ if (path.empty()) {
+ vWarning << "File path is empty";
+ return nullptr;
+ }
+
+ auto composition = model::loadFromFile(path, cachePolicy);
+ if (composition) {
+ auto animation = std::unique_ptr<Animation>(new Animation);
+ animation->d->init(std::move(composition));
+ return animation;
+ }
+ return nullptr;
+}
+
+void Animation::size(size_t &width, size_t &height) const
+{
+ VSize sz = d->size();
+
+ width = sz.width();
+ height = sz.height();
+}
+
+double Animation::duration() const
+{
+ return d->duration();
+}
+
+double Animation::frameRate() const
+{
+ return d->frameRate();
+}
+
+size_t Animation::totalFrame() const
+{
+ return d->totalFrame();
+}
+
+size_t Animation::frameAtPos(double pos)
+{
+ return d->frameAtPos(pos);
+}
+
+const LOTLayerNode *Animation::renderTree(size_t frameNo, size_t width,
+ size_t height) const
+{
+ return d->renderTree(frameNo, VSize(int(width), int(height)));
+}
+
+std::future<Surface> Animation::render(size_t frameNo, Surface surface,
+ bool keepAspectRatio)
+{
+ return d->renderAsync(frameNo, std::move(surface), keepAspectRatio);
+}
+
+void Animation::renderSync(size_t frameNo, Surface surface,
+ bool keepAspectRatio)
+{
+ d->render(frameNo, surface, keepAspectRatio);
+}
+
+const LayerInfoList &Animation::layers() const
+{
+ return d->layerInfoList();
+}
+
+const MarkerList &Animation::markers() const
+{
+ return d->markers();
+}
+
+void Animation::setValue(Color_Type, Property prop, const std::string &keypath,
+ Color value)
+{
+ d->setValue(keypath,
+ LOTVariant(prop, [value](const FrameInfo &) { return value; }));
+}
+
+void Animation::setValue(Float_Type, Property prop, const std::string &keypath,
+ float value)
+{
+ d->setValue(keypath,
+ LOTVariant(prop, [value](const FrameInfo &) { return value; }));
+}
+
+void Animation::setValue(Size_Type, Property prop, const std::string &keypath,
+ Size value)
+{
+ d->setValue(keypath,
+ LOTVariant(prop, [value](const FrameInfo &) { return value; }));
+}
+
+void Animation::setValue(Point_Type, Property prop, const std::string &keypath,
+ Point value)
+{
+ d->setValue(keypath,
+ LOTVariant(prop, [value](const FrameInfo &) { return value; }));
+}
+
+void Animation::setValue(Color_Type, Property prop, const std::string &keypath,
+ std::function<Color(const FrameInfo &)> &&value)
+{
+ d->setValue(keypath, LOTVariant(prop, value));
+}
+
+void Animation::setValue(Float_Type, Property prop, const std::string &keypath,
+ std::function<float(const FrameInfo &)> &&value)
+{
+ d->setValue(keypath, LOTVariant(prop, value));
+}
+
+void Animation::setValue(Size_Type, Property prop, const std::string &keypath,
+ std::function<Size(const FrameInfo &)> &&value)
+{
+ d->setValue(keypath, LOTVariant(prop, value));
+}
+
+void Animation::setValue(Point_Type, Property prop, const std::string &keypath,
+ std::function<Point(const FrameInfo &)> &&value)
+{
+ d->setValue(keypath, LOTVariant(prop, value));
+}
+
+Animation::~Animation() = default;
+Animation::Animation() : d(std::make_unique<AnimationImpl>()) {}
+
+Surface::Surface(uint32_t *buffer, size_t width, size_t height,
+ size_t bytesPerLine)
+ : mBuffer(buffer),
+ mWidth(width),
+ mHeight(height),
+ mBytesPerLine(bytesPerLine)
+{
+ mDrawArea.w = mWidth;
+ mDrawArea.h = mHeight;
+}
+
+void Surface::setDrawRegion(size_t x, size_t y, size_t width, size_t height)
+{
+ if ((x + width > mWidth) || (y + height > mHeight)) return;
+
+ mDrawArea.x = x;
+ mDrawArea.y = y;
+ mDrawArea.w = width;
+ mDrawArea.h = height;
+}
+
+#ifdef LOTTIE_LOGGING_SUPPORT
+void initLogging()
+{
+#if defined(USE_ARM_NEON)
+ set_log_level(LogLevel::OFF);
+#else
+ initialize(GuaranteedLogger(), "/tmp/", "rlottie", 1);
+ set_log_level(LogLevel::INFO);
+#endif
+}
+
+V_CONSTRUCTOR_FUNCTION(initLogging)
+#endif
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottiefiltermodel.h b/vendor/github.com/Benau/go_rlottie/lottie_lottiefiltermodel.h
new file mode 100644
index 00000000..130de590
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_lottiefiltermodel.h
@@ -0,0 +1,435 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef LOTTIEFILTERMODEL_H
+#define LOTTIEFILTERMODEL_H
+
+#include <algorithm>
+#include <bitset>
+#include <cassert>
+#include "lottie_lottiemodel.h"
+#include "rlottie.h"
+
+using namespace rlottie::internal;
+// Naive way to implement std::variant
+// refactor it when we move to c++17
+// users should make sure proper combination
+// of id and value are passed while creating the object.
+class LOTVariant {
+public:
+ using ValueFunc = std::function<float(const rlottie::FrameInfo&)>;
+ using ColorFunc = std::function<rlottie::Color(const rlottie::FrameInfo&)>;
+ using PointFunc = std::function<rlottie::Point(const rlottie::FrameInfo&)>;
+ using SizeFunc = std::function<rlottie::Size(const rlottie::FrameInfo&)>;
+
+ LOTVariant(rlottie::Property prop, const ValueFunc& v)
+ : mPropery(prop), mTag(Value)
+ {
+ construct(impl.valueFunc, v);
+ }
+
+ LOTVariant(rlottie::Property prop, ValueFunc&& v)
+ : mPropery(prop), mTag(Value)
+ {
+ moveConstruct(impl.valueFunc, std::move(v));
+ }
+
+ LOTVariant(rlottie::Property prop, const ColorFunc& v)
+ : mPropery(prop), mTag(Color)
+ {
+ construct(impl.colorFunc, v);
+ }
+
+ LOTVariant(rlottie::Property prop, ColorFunc&& v)
+ : mPropery(prop), mTag(Color)
+ {
+ moveConstruct(impl.colorFunc, std::move(v));
+ }
+
+ LOTVariant(rlottie::Property prop, const PointFunc& v)
+ : mPropery(prop), mTag(Point)
+ {
+ construct(impl.pointFunc, v);
+ }
+
+ LOTVariant(rlottie::Property prop, PointFunc&& v)
+ : mPropery(prop), mTag(Point)
+ {
+ moveConstruct(impl.pointFunc, std::move(v));
+ }
+
+ LOTVariant(rlottie::Property prop, const SizeFunc& v)
+ : mPropery(prop), mTag(Size)
+ {
+ construct(impl.sizeFunc, v);
+ }
+
+ LOTVariant(rlottie::Property prop, SizeFunc&& v)
+ : mPropery(prop), mTag(Size)
+ {
+ moveConstruct(impl.sizeFunc, std::move(v));
+ }
+
+ rlottie::Property property() const { return mPropery; }
+
+ const ColorFunc& color() const
+ {
+ assert(mTag == Color);
+ return impl.colorFunc;
+ }
+
+ const ValueFunc& value() const
+ {
+ assert(mTag == Value);
+ return impl.valueFunc;
+ }
+
+ const PointFunc& point() const
+ {
+ assert(mTag == Point);
+ return impl.pointFunc;
+ }
+
+ const SizeFunc& size() const
+ {
+ assert(mTag == Size);
+ return impl.sizeFunc;
+ }
+
+ LOTVariant() = default;
+ ~LOTVariant() noexcept { Destroy(); }
+ LOTVariant(const LOTVariant& other) { Copy(other); }
+ LOTVariant(LOTVariant&& other) noexcept { Move(std::move(other)); }
+ LOTVariant& operator=(LOTVariant&& other)
+ {
+ Destroy();
+ Move(std::move(other));
+ return *this;
+ }
+ LOTVariant& operator=(const LOTVariant& other)
+ {
+ Destroy();
+ Copy(other);
+ return *this;
+ }
+
+private:
+ template <typename T>
+ void construct(T& member, const T& val)
+ {
+ new (&member) T(val);
+ }
+
+ template <typename T>
+ void moveConstruct(T& member, T&& val)
+ {
+ new (&member) T(std::move(val));
+ }
+
+ void Move(LOTVariant&& other)
+ {
+ switch (other.mTag) {
+ case Type::Value:
+ moveConstruct(impl.valueFunc, std::move(other.impl.valueFunc));
+ break;
+ case Type::Color:
+ moveConstruct(impl.colorFunc, std::move(other.impl.colorFunc));
+ break;
+ case Type::Point:
+ moveConstruct(impl.pointFunc, std::move(other.impl.pointFunc));
+ break;
+ case Type::Size:
+ moveConstruct(impl.sizeFunc, std::move(other.impl.sizeFunc));
+ break;
+ default:
+ break;
+ }
+ mTag = other.mTag;
+ mPropery = other.mPropery;
+ other.mTag = MonoState;
+ }
+
+ void Copy(const LOTVariant& other)
+ {
+ switch (other.mTag) {
+ case Type::Value:
+ construct(impl.valueFunc, other.impl.valueFunc);
+ break;
+ case Type::Color:
+ construct(impl.colorFunc, other.impl.colorFunc);
+ break;
+ case Type::Point:
+ construct(impl.pointFunc, other.impl.pointFunc);
+ break;
+ case Type::Size:
+ construct(impl.sizeFunc, other.impl.sizeFunc);
+ break;
+ default:
+ break;
+ }
+ mTag = other.mTag;
+ mPropery = other.mPropery;
+ }
+
+ void Destroy()
+ {
+ switch (mTag) {
+ case MonoState: {
+ break;
+ }
+ case Value: {
+ impl.valueFunc.~ValueFunc();
+ break;
+ }
+ case Color: {
+ impl.colorFunc.~ColorFunc();
+ break;
+ }
+ case Point: {
+ impl.pointFunc.~PointFunc();
+ break;
+ }
+ case Size: {
+ impl.sizeFunc.~SizeFunc();
+ break;
+ }
+ }
+ }
+
+ enum Type { MonoState, Value, Color, Point, Size };
+ rlottie::Property mPropery;
+ Type mTag{MonoState};
+ union details {
+ ColorFunc colorFunc;
+ ValueFunc valueFunc;
+ PointFunc pointFunc;
+ SizeFunc sizeFunc;
+ details() {}
+ ~details() noexcept {}
+ } impl;
+};
+
+namespace rlottie {
+
+namespace internal {
+
+namespace model {
+
+class FilterData {
+public:
+ void addValue(LOTVariant& value)
+ {
+ uint index = static_cast<uint>(value.property());
+ if (mBitset.test(index)) {
+ std::replace_if(mFilters.begin(), mFilters.end(),
+ [&value](const LOTVariant& e) {
+ return e.property() == value.property();
+ },
+ value);
+ } else {
+ mBitset.set(index);
+ mFilters.push_back(value);
+ }
+ }
+
+ void removeValue(LOTVariant& value)
+ {
+ uint index = static_cast<uint>(value.property());
+ if (mBitset.test(index)) {
+ mBitset.reset(index);
+ mFilters.erase(std::remove_if(mFilters.begin(), mFilters.end(),
+ [&value](const LOTVariant& e) {
+ return e.property() ==
+ value.property();
+ }),
+ mFilters.end());
+ }
+ }
+ bool hasFilter(rlottie::Property prop) const
+ {
+ return mBitset.test(static_cast<uint>(prop));
+ }
+ model::Color color(rlottie::Property prop, int frame) const
+ {
+ rlottie::FrameInfo info(frame);
+ rlottie::Color col = data(prop).color()(info);
+ return model::Color(col.r(), col.g(), col.b());
+ }
+ VPointF point(rlottie::Property prop, int frame) const
+ {
+ rlottie::FrameInfo info(frame);
+ rlottie::Point pt = data(prop).point()(info);
+ return VPointF(pt.x(), pt.y());
+ }
+ VSize scale(rlottie::Property prop, int frame) const
+ {
+ rlottie::FrameInfo info(frame);
+ rlottie::Size sz = data(prop).size()(info);
+ return VSize(sz.w(), sz.h());
+ }
+ float opacity(rlottie::Property prop, int frame) const
+ {
+ rlottie::FrameInfo info(frame);
+ float val = data(prop).value()(info);
+ return val / 100;
+ }
+ float value(rlottie::Property prop, int frame) const
+ {
+ rlottie::FrameInfo info(frame);
+ return data(prop).value()(info);
+ }
+
+private:
+ const LOTVariant& data(rlottie::Property prop) const
+ {
+ auto result = std::find_if(
+ mFilters.begin(), mFilters.end(),
+ [prop](const LOTVariant& e) { return e.property() == prop; });
+ return *result;
+ }
+ std::bitset<32> mBitset{0};
+ std::vector<LOTVariant> mFilters;
+};
+
+template <typename T>
+struct FilterBase
+{
+ FilterBase(T *model): model_(model){}
+
+ const char* name() const { return model_->name(); }
+
+ FilterData* filter() {
+ if (!filterData_) filterData_ = std::make_unique<FilterData>();
+ return filterData_.get();
+ }
+
+ const FilterData * filter() const { return filterData_.get(); }
+ const T* model() const { return model_;}
+
+ bool hasFilter(rlottie::Property prop) const {
+ return filterData_ ? filterData_->hasFilter(prop)
+ : false;
+ }
+
+ T* model_{nullptr};
+ std::unique_ptr<FilterData> filterData_{nullptr};
+};
+
+
+template <typename T>
+class Filter : public FilterBase<T> {
+public:
+ Filter(T* model): FilterBase<T>(model){}
+ model::Color color(int frame) const
+ {
+ if (this->hasFilter(rlottie::Property::StrokeColor)) {
+ return this->filter()->color(rlottie::Property::StrokeColor, frame);
+ }
+ return this->model()->color(frame);
+ }
+ float opacity(int frame) const
+ {
+ if (this->hasFilter(rlottie::Property::StrokeOpacity)) {
+ return this->filter()->opacity(rlottie::Property::StrokeOpacity, frame);
+ }
+ return this->model()->opacity(frame);
+ }
+
+ float strokeWidth(int frame) const
+ {
+ if (this->hasFilter(rlottie::Property::StrokeWidth)) {
+ return this->filter()->value(rlottie::Property::StrokeWidth, frame);
+ }
+ return this->model()->strokeWidth(frame);
+ }
+
+ float miterLimit() const { return this->model()->miterLimit(); }
+ CapStyle capStyle() const { return this->model()->capStyle(); }
+ JoinStyle joinStyle() const { return this->model()->joinStyle(); }
+ bool hasDashInfo() const { return this->model()->hasDashInfo(); }
+ void getDashInfo(int frameNo, std::vector<float>& result) const
+ {
+ return this->model()->getDashInfo(frameNo, result);
+ }
+};
+
+
+template <>
+class Filter<model::Fill>: public FilterBase<model::Fill>
+{
+public:
+ Filter(model::Fill* model) : FilterBase<model::Fill>(model) {}
+
+ model::Color color(int frame) const
+ {
+ if (this->hasFilter(rlottie::Property::FillColor)) {
+ return this->filter()->color(rlottie::Property::FillColor, frame);
+ }
+ return this->model()->color(frame);
+ }
+
+ float opacity(int frame) const
+ {
+ if (this->hasFilter(rlottie::Property::FillOpacity)) {
+ return this->filter()->opacity(rlottie::Property::FillOpacity, frame);
+ }
+ return this->model()->opacity(frame);
+ }
+
+ FillRule fillRule() const { return this->model()->fillRule(); }
+};
+
+template <>
+class Filter<model::Group> : public FilterBase<model::Group>
+{
+public:
+ Filter(model::Group* model = nullptr) : FilterBase<model::Group>(model) {}
+
+ bool hasModel() const { return this->model() ? true : false; }
+
+ model::Transform* transform() const { return this->model() ? this->model()->mTransform : nullptr; }
+ VMatrix matrix(int frame) const
+ {
+ VMatrix mS, mR, mT;
+ if (this->hasFilter(rlottie::Property::TrScale)) {
+ VSize s = this->filter()->scale(rlottie::Property::TrScale, frame);
+ mS.scale(s.width() / 100.0, s.height() / 100.0);
+ }
+ if (this->hasFilter(rlottie::Property::TrRotation)) {
+ mR.rotate(this->filter()->value(rlottie::Property::TrRotation, frame));
+ }
+ if (this->hasFilter(rlottie::Property::TrPosition)) {
+ mT.translate(this->filter()->point(rlottie::Property::TrPosition, frame));
+ }
+
+ return this->model()->mTransform->matrix(frame) * mS * mR * mT;
+ }
+};
+
+
+} // namespace model
+
+} // namespace internal
+
+} // namespace rlottie
+
+#endif // LOTTIEFILTERMODEL_H
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottieitem.cpp b/vendor/github.com/Benau/go_rlottie/lottie_lottieitem.cpp
new file mode 100644
index 00000000..99f80044
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_lottieitem.cpp
@@ -0,0 +1,1491 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "lottie_lottieitem.h"
+#include <algorithm>
+#include <cmath>
+#include <iterator>
+#include "lottie_lottiekeypath.h"
+#include "vector_vbitmap.h"
+#include "vector_vpainter.h"
+#include "vector_vraster.h"
+
+/* Lottie Layer Rules
+ * 1. time stretch is pre calculated and applied to all the properties of the
+ * lottilayer model and all its children
+ * 2. The frame property could be reversed using,time-reverse layer property in
+ * AE. which means (start frame > endFrame) 3.
+ */
+
+static bool transformProp(rlottie::Property prop)
+{
+ switch (prop) {
+ case rlottie::Property::TrAnchor:
+ case rlottie::Property::TrScale:
+ case rlottie::Property::TrOpacity:
+ case rlottie::Property::TrPosition:
+ case rlottie::Property::TrRotation:
+ return true;
+ default:
+ return false;
+ }
+}
+static bool fillProp(rlottie::Property prop)
+{
+ switch (prop) {
+ case rlottie::Property::FillColor:
+ case rlottie::Property::FillOpacity:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool strokeProp(rlottie::Property prop)
+{
+ switch (prop) {
+ case rlottie::Property::StrokeColor:
+ case rlottie::Property::StrokeOpacity:
+ case rlottie::Property::StrokeWidth:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static renderer::Layer *createLayerItem(model::Layer *layerData,
+ VArenaAlloc * allocator)
+{
+ switch (layerData->mLayerType) {
+ case model::Layer::Type::Precomp: {
+ return allocator->make<renderer::CompLayer>(layerData, allocator);
+ }
+ case model::Layer::Type::Solid: {
+ return allocator->make<renderer::SolidLayer>(layerData);
+ }
+ case model::Layer::Type::Shape: {
+ return allocator->make<renderer::ShapeLayer>(layerData, allocator);
+ }
+ case model::Layer::Type::Null: {
+ return allocator->make<renderer::NullLayer>(layerData);
+ }
+ case model::Layer::Type::Image: {
+ return allocator->make<renderer::ImageLayer>(layerData);
+ }
+ default:
+ return nullptr;
+ break;
+ }
+}
+
+renderer::Composition::Composition(std::shared_ptr<model::Composition> model)
+ : mCurFrameNo(-1)
+{
+ mModel = std::move(model);
+ mRootLayer = createLayerItem(mModel->mRootLayer, &mAllocator);
+ mRootLayer->setComplexContent(false);
+ mViewSize = mModel->size();
+}
+
+void renderer::Composition::setValue(const std::string &keypath,
+ LOTVariant & value)
+{
+ LOTKeyPath key(keypath);
+ mRootLayer->resolveKeyPath(key, 0, value);
+}
+
+bool renderer::Composition::update(int frameNo, const VSize &size,
+ bool keepAspectRatio)
+{
+ // check if cached frame is same as requested frame.
+ if ((mViewSize == size) && (mCurFrameNo == frameNo) &&
+ (mKeepAspectRatio == keepAspectRatio))
+ return false;
+
+ mViewSize = size;
+ mCurFrameNo = frameNo;
+ mKeepAspectRatio = keepAspectRatio;
+
+ /*
+ * if viewbox dosen't scale exactly to the viewport
+ * we scale the viewbox keeping AspectRatioPreserved and then align the
+ * viewbox to the viewport using AlignCenter rule.
+ */
+ VMatrix m;
+ VSize viewPort = mViewSize;
+ VSize viewBox = mModel->size();
+ float sx = float(viewPort.width()) / viewBox.width();
+ float sy = float(viewPort.height()) / viewBox.height();
+ if (mKeepAspectRatio) {
+ float scale = std::min(sx, sy);
+ float tx = (viewPort.width() - viewBox.width() * scale) * 0.5f;
+ float ty = (viewPort.height() - viewBox.height() * scale) * 0.5f;
+ m.translate(tx, ty).scale(scale, scale);
+ } else {
+ m.scale(sx, sy);
+ }
+ mRootLayer->update(frameNo, m, 1.0);
+ return true;
+}
+
+bool renderer::Composition::render(const rlottie::Surface &surface)
+{
+ mSurface.reset(reinterpret_cast<uchar *>(surface.buffer()),
+ uint(surface.width()), uint(surface.height()),
+ uint(surface.bytesPerLine()),
+ VBitmap::Format::ARGB32_Premultiplied);
+
+ /* schedule all preprocess task for this frame at once.
+ */
+ VRect clip(0, 0, int(surface.drawRegionWidth()),
+ int(surface.drawRegionHeight()));
+ mRootLayer->preprocess(clip);
+
+ VPainter painter(&mSurface);
+ // set sub surface area for drawing.
+ painter.setDrawRegion(
+ VRect(int(surface.drawRegionPosX()), int(surface.drawRegionPosY()),
+ int(surface.drawRegionWidth()), int(surface.drawRegionHeight())));
+ mRootLayer->render(&painter, {}, {}, mSurfaceCache);
+ painter.end();
+ return true;
+}
+
+void renderer::Mask::update(int frameNo, const VMatrix &parentMatrix,
+ float /*parentAlpha*/, const DirtyFlag &flag)
+{
+ bool dirtyPath = false;
+
+ if (flag.testFlag(DirtyFlagBit::None) && mData->isStatic()) return;
+
+ if (mData->mShape.isStatic()) {
+ if (mLocalPath.empty()) {
+ dirtyPath = true;
+ mData->mShape.value(frameNo, mLocalPath);
+ }
+ } else {
+ dirtyPath = true;
+ mData->mShape.value(frameNo, mLocalPath);
+ }
+ /* mask item dosen't inherit opacity */
+ mCombinedAlpha = mData->opacity(frameNo);
+
+ if ( flag.testFlag(DirtyFlagBit::Matrix) || dirtyPath ) {
+ mFinalPath.clone(mLocalPath);
+ mFinalPath.transform(parentMatrix);
+ mRasterRequest = true;
+ }
+}
+
+VRle renderer::Mask::rle()
+{
+ if (!vCompare(mCombinedAlpha, 1.0f)) {
+ VRle obj = mRasterizer.rle();
+ obj *= uchar(mCombinedAlpha * 255);
+ return obj;
+ } else {
+ return mRasterizer.rle();
+ }
+}
+
+void renderer::Mask::preprocess(const VRect &clip)
+{
+ if (mRasterRequest)
+ mRasterizer.rasterize(mFinalPath, FillRule::Winding, clip);
+}
+
+void renderer::Layer::render(VPainter *painter, const VRle &inheritMask,
+ const VRle &matteRle, SurfaceCache &)
+{
+ auto renderlist = renderList();
+
+ if (renderlist.empty()) return;
+
+ VRle mask;
+ if (mLayerMask) {
+ mask = mLayerMask->maskRle(painter->clipBoundingRect());
+ if (!inheritMask.empty()) mask = mask & inheritMask;
+ // if resulting mask is empty then return.
+ if (mask.empty()) return;
+ } else {
+ mask = inheritMask;
+ }
+
+ for (auto &i : renderlist) {
+ painter->setBrush(i->mBrush);
+ VRle rle = i->rle();
+ if (matteRle.empty()) {
+ if (mask.empty()) {
+ // no mask no matte
+ painter->drawRle(VPoint(), rle);
+ } else {
+ // only mask
+ painter->drawRle(rle, mask);
+ }
+
+ } else {
+ if (!mask.empty()) rle = rle & mask;
+
+ if (rle.empty()) continue;
+ if (matteType() == model::MatteType::AlphaInv) {
+ rle = rle - matteRle;
+ painter->drawRle(VPoint(), rle);
+ } else {
+ // render with matteRle as clip.
+ painter->drawRle(rle, matteRle);
+ }
+ }
+ }
+}
+
+void renderer::LayerMask::preprocess(const VRect &clip)
+{
+ for (auto &i : mMasks) {
+ i.preprocess(clip);
+ }
+}
+
+renderer::LayerMask::LayerMask(model::Layer *layerData)
+{
+ if (!layerData->mExtra) return;
+
+ mMasks.reserve(layerData->mExtra->mMasks.size());
+
+ for (auto &i : layerData->mExtra->mMasks) {
+ mMasks.emplace_back(i);
+ mStatic &= i->isStatic();
+ }
+}
+
+void renderer::LayerMask::update(int frameNo, const VMatrix &parentMatrix,
+ float parentAlpha, const DirtyFlag &flag)
+{
+ if (flag.testFlag(DirtyFlagBit::None) && isStatic()) return;
+
+ for (auto &i : mMasks) {
+ i.update(frameNo, parentMatrix, parentAlpha, flag);
+ }
+ mDirty = true;
+}
+
+VRle renderer::LayerMask::maskRle(const VRect &clipRect)
+{
+ if (!mDirty) return mRle;
+
+ VRle rle;
+ for (auto &e : mMasks) {
+ const auto cur = [&]() {
+ if (e.inverted())
+ return clipRect - e.rle();
+ else
+ return e.rle();
+ }();
+
+ switch (e.maskMode()) {
+ case model::Mask::Mode::Add: {
+ rle = rle + cur;
+ break;
+ }
+ case model::Mask::Mode::Substarct: {
+ if (rle.empty() && !clipRect.empty())
+ rle = clipRect - cur;
+ else
+ rle = rle - cur;
+ break;
+ }
+ case model::Mask::Mode::Intersect: {
+ if (rle.empty() && !clipRect.empty())
+ rle = clipRect & cur;
+ else
+ rle = rle & cur;
+ break;
+ }
+ case model::Mask::Mode::Difference: {
+ rle = rle ^ cur;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ if (!rle.empty() && !rle.unique()) {
+ mRle.clone(rle);
+ } else {
+ mRle = rle;
+ }
+ mDirty = false;
+ return mRle;
+}
+
+renderer::Layer::Layer(model::Layer *layerData) : mLayerData(layerData)
+{
+ if (mLayerData->mHasMask)
+ mLayerMask = std::make_unique<renderer::LayerMask>(mLayerData);
+}
+
+bool renderer::Layer::resolveKeyPath(LOTKeyPath &keyPath, uint depth,
+ LOTVariant &value)
+{
+ if (!keyPath.matches(name(), depth)) {
+ return false;
+ }
+
+ if (!keyPath.skip(name())) {
+ if (keyPath.fullyResolvesTo(name(), depth) &&
+ transformProp(value.property())) {
+ //@TODO handle propery update.
+ }
+ }
+ return true;
+}
+
+bool renderer::ShapeLayer::resolveKeyPath(LOTKeyPath &keyPath, uint depth,
+ LOTVariant &value)
+{
+ if (renderer::Layer::resolveKeyPath(keyPath, depth, value)) {
+ if (keyPath.propagate(name(), depth)) {
+ uint newDepth = keyPath.nextDepth(name(), depth);
+ mRoot->resolveKeyPath(keyPath, newDepth, value);
+ }
+ return true;
+ }
+ return false;
+}
+
+bool renderer::CompLayer::resolveKeyPath(LOTKeyPath &keyPath, uint depth,
+ LOTVariant &value)
+{
+ if (renderer::Layer::resolveKeyPath(keyPath, depth, value)) {
+ if (keyPath.propagate(name(), depth)) {
+ uint newDepth = keyPath.nextDepth(name(), depth);
+ for (const auto &layer : mLayers) {
+ layer->resolveKeyPath(keyPath, newDepth, value);
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+void renderer::Layer::update(int frameNumber, const VMatrix &parentMatrix,
+ float parentAlpha)
+{
+ mFrameNo = frameNumber;
+ // 1. check if the layer is part of the current frame
+ if (!visible()) return;
+
+ float alpha = parentAlpha * opacity(frameNo());
+ if (vIsZero(alpha)) {
+ mCombinedAlpha = 0;
+ return;
+ }
+
+ // 2. calculate the parent matrix and alpha
+ VMatrix m = matrix(frameNo());
+ m *= parentMatrix;
+
+ // 3. update the dirty flag based on the change
+ if (mCombinedMatrix != m) {
+ mDirtyFlag |= DirtyFlagBit::Matrix;
+ mCombinedMatrix = m;
+ }
+
+ if (!vCompare(mCombinedAlpha, alpha)) {
+ mDirtyFlag |= DirtyFlagBit::Alpha;
+ mCombinedAlpha = alpha;
+ }
+
+ // 4. update the mask
+ if (mLayerMask) {
+ mLayerMask->update(frameNo(), mCombinedMatrix, mCombinedAlpha,
+ mDirtyFlag);
+ }
+
+ // 5. if no parent property change and layer is static then nothing to do.
+ if (!mLayerData->precompLayer() && flag().testFlag(DirtyFlagBit::None) &&
+ isStatic())
+ return;
+
+ // 6. update the content of the layer
+ updateContent();
+
+ // 7. reset the dirty flag
+ mDirtyFlag = DirtyFlagBit::None;
+}
+
+VMatrix renderer::Layer::matrix(int frameNo) const
+{
+ return mParentLayer
+ ? (mLayerData->matrix(frameNo) * mParentLayer->matrix(frameNo))
+ : mLayerData->matrix(frameNo);
+}
+
+bool renderer::Layer::visible() const
+{
+ return (frameNo() >= mLayerData->inFrame() &&
+ frameNo() < mLayerData->outFrame());
+}
+
+void renderer::Layer::preprocess(const VRect &clip)
+{
+ // layer dosen't contribute to the frame
+ if (skipRendering()) return;
+
+ // preprocess layer masks
+ if (mLayerMask) mLayerMask->preprocess(clip);
+
+ preprocessStage(clip);
+}
+
+renderer::CompLayer::CompLayer(model::Layer *layerModel, VArenaAlloc *allocator)
+ : renderer::Layer(layerModel)
+{
+ if (!mLayerData->mChildren.empty())
+ mLayers.reserve(mLayerData->mChildren.size());
+
+ // 1. keep the layer in back-to-front order.
+ // as lottie model keeps the data in front-toback-order.
+ for (auto it = mLayerData->mChildren.crbegin();
+ it != mLayerData->mChildren.rend(); ++it) {
+ auto model = static_cast<model::Layer *>(*it);
+ auto item = createLayerItem(model, allocator);
+ if (item) mLayers.push_back(item);
+ }
+
+ // 2. update parent layer
+ for (const auto &layer : mLayers) {
+ int id = layer->parentId();
+ if (id >= 0) {
+ auto search =
+ std::find_if(mLayers.begin(), mLayers.end(),
+ [id](const auto &val) { return val->id() == id; });
+ if (search != mLayers.end()) layer->setParentLayer(*search);
+ }
+ }
+
+ // 4. check if its a nested composition
+ if (!layerModel->layerSize().empty()) {
+ mClipper = std::make_unique<renderer::Clipper>(layerModel->layerSize());
+ }
+
+ if (mLayers.size() > 1) setComplexContent(true);
+}
+
+void renderer::CompLayer::render(VPainter *painter, const VRle &inheritMask,
+ const VRle &matteRle, SurfaceCache &cache)
+{
+ if (vIsZero(combinedAlpha())) return;
+
+ if (vCompare(combinedAlpha(), 1.0)) {
+ renderHelper(painter, inheritMask, matteRle, cache);
+ } else {
+ if (complexContent()) {
+ VSize size = painter->clipBoundingRect().size();
+ VPainter srcPainter;
+ VBitmap srcBitmap = cache.make_surface(size.width(), size.height());
+ srcPainter.begin(&srcBitmap);
+ renderHelper(&srcPainter, inheritMask, matteRle, cache);
+ srcPainter.end();
+ painter->drawBitmap(VPoint(), srcBitmap,
+ uchar(combinedAlpha() * 255.0f));
+ cache.release_surface(srcBitmap);
+ } else {
+ renderHelper(painter, inheritMask, matteRle, cache);
+ }
+ }
+}
+
+void renderer::CompLayer::renderHelper(VPainter * painter,
+ const VRle & inheritMask,
+ const VRle & matteRle,
+ SurfaceCache &cache)
+{
+ VRle mask;
+ if (mLayerMask) {
+ mask = mLayerMask->maskRle(painter->clipBoundingRect());
+ if (!inheritMask.empty()) mask = mask & inheritMask;
+ // if resulting mask is empty then return.
+ if (mask.empty()) return;
+ } else {
+ mask = inheritMask;
+ }
+
+ if (mClipper) {
+ mask = mClipper->rle(mask);
+ if (mask.empty()) return;
+ }
+
+ renderer::Layer *matte = nullptr;
+ for (const auto &layer : mLayers) {
+ if (layer->hasMatte()) {
+ matte = layer;
+ } else {
+ if (layer->visible()) {
+ if (matte) {
+ if (matte->visible())
+ renderMatteLayer(painter, mask, matteRle, matte, layer,
+ cache);
+ } else {
+ layer->render(painter, mask, matteRle, cache);
+ }
+ }
+ matte = nullptr;
+ }
+ }
+}
+
+void renderer::CompLayer::renderMatteLayer(VPainter *painter, const VRle &mask,
+ const VRle & matteRle,
+ renderer::Layer *layer,
+ renderer::Layer *src,
+ SurfaceCache & cache)
+{
+ VSize size = painter->clipBoundingRect().size();
+ // Decide if we can use fast matte.
+ // 1. draw src layer to matte buffer
+ VPainter srcPainter;
+ VBitmap srcBitmap = cache.make_surface(size.width(), size.height());
+ srcPainter.begin(&srcBitmap);
+ src->render(&srcPainter, mask, matteRle, cache);
+ srcPainter.end();
+
+ // 2. draw layer to layer buffer
+ VPainter layerPainter;
+ VBitmap layerBitmap = cache.make_surface(size.width(), size.height());
+ layerPainter.begin(&layerBitmap);
+ layer->render(&layerPainter, mask, matteRle, cache);
+
+ // 2.1update composition mode
+ switch (layer->matteType()) {
+ case model::MatteType::Alpha:
+ case model::MatteType::Luma: {
+ layerPainter.setBlendMode(BlendMode::DestIn);
+ break;
+ }
+ case model::MatteType::AlphaInv:
+ case model::MatteType::LumaInv: {
+ layerPainter.setBlendMode(BlendMode::DestOut);
+ break;
+ }
+ default:
+ break;
+ }
+
+ // 2.2 update srcBuffer if the matte is luma type
+ if (layer->matteType() == model::MatteType::Luma ||
+ layer->matteType() == model::MatteType::LumaInv) {
+ srcBitmap.updateLuma();
+ }
+
+ auto clip = layerPainter.clipBoundingRect();
+
+ // if the layer has only one renderer then use it as the clip rect
+ // when blending 2 buffer and copy back to final buffer to avoid
+ // unnecessary pixel processing.
+ if (layer->renderList().size() == 1)
+ {
+ clip = layer->renderList()[0]->rle().boundingRect();
+ }
+
+ // 2.3 draw src buffer as mask
+ layerPainter.drawBitmap(clip, srcBitmap, clip);
+ layerPainter.end();
+ // 3. draw the result buffer into painter
+ painter->drawBitmap(clip, layerBitmap, clip);
+
+ cache.release_surface(srcBitmap);
+ cache.release_surface(layerBitmap);
+}
+
+void renderer::Clipper::update(const VMatrix &matrix)
+{
+ mPath.reset();
+ mPath.addRect(VRectF(0, 0, mSize.width(), mSize.height()));
+ mPath.transform(matrix);
+ mRasterRequest = true;
+}
+
+void renderer::Clipper::preprocess(const VRect &clip)
+{
+ if (mRasterRequest) mRasterizer.rasterize(mPath, FillRule::Winding, clip);
+
+ mRasterRequest = false;
+}
+
+VRle renderer::Clipper::rle(const VRle &mask)
+{
+ if (mask.empty()) return mRasterizer.rle();
+
+ mMaskedRle.clone(mask);
+ mMaskedRle &= mRasterizer.rle();
+ return mMaskedRle;
+}
+
+void renderer::CompLayer::updateContent()
+{
+ if (mClipper && flag().testFlag(DirtyFlagBit::Matrix)) {
+ mClipper->update(combinedMatrix());
+ }
+ int mappedFrame = mLayerData->timeRemap(frameNo());
+ float alpha = combinedAlpha();
+ if (complexContent()) alpha = 1;
+ for (const auto &layer : mLayers) {
+ layer->update(mappedFrame, combinedMatrix(), alpha);
+ }
+}
+
+void renderer::CompLayer::preprocessStage(const VRect &clip)
+{
+ // if layer has clipper
+ if (mClipper) mClipper->preprocess(clip);
+
+ renderer::Layer *matte = nullptr;
+ for (const auto &layer : mLayers) {
+ if (layer->hasMatte()) {
+ matte = layer;
+ } else {
+ if (layer->visible()) {
+ if (matte) {
+ if (matte->visible()) {
+ layer->preprocess(clip);
+ matte->preprocess(clip);
+ }
+ } else {
+ layer->preprocess(clip);
+ }
+ }
+ matte = nullptr;
+ }
+ }
+}
+
+renderer::SolidLayer::SolidLayer(model::Layer *layerData)
+ : renderer::Layer(layerData)
+{
+ mDrawableList = &mRenderNode;
+}
+
+void renderer::SolidLayer::updateContent()
+{
+ if (flag() & DirtyFlagBit::Matrix) {
+ mPath.reset();
+ mPath.addRect(VRectF(0, 0, mLayerData->layerSize().width(),
+ mLayerData->layerSize().height()));
+ mPath.transform(combinedMatrix());
+ mRenderNode.mFlag |= VDrawable::DirtyState::Path;
+ mRenderNode.mPath = mPath;
+ }
+ if (flag() & DirtyFlagBit::Alpha) {
+ model::Color color = mLayerData->solidColor();
+ VBrush brush(color.toColor(combinedAlpha()));
+ mRenderNode.setBrush(brush);
+ mRenderNode.mFlag |= VDrawable::DirtyState::Brush;
+ }
+}
+
+void renderer::SolidLayer::preprocessStage(const VRect &clip)
+{
+ mRenderNode.preprocess(clip);
+}
+
+renderer::DrawableList renderer::SolidLayer::renderList()
+{
+ if (skipRendering()) return {};
+
+ return {&mDrawableList, 1};
+}
+
+renderer::ImageLayer::ImageLayer(model::Layer *layerData)
+ : renderer::Layer(layerData)
+{
+ mDrawableList = &mRenderNode;
+
+ if (!mLayerData->asset()) return;
+
+ mTexture.mBitmap = mLayerData->asset()->bitmap();
+ VBrush brush(&mTexture);
+ mRenderNode.setBrush(brush);
+}
+
+void renderer::ImageLayer::updateContent()
+{
+ if (!mLayerData->asset()) return;
+
+ if (flag() & DirtyFlagBit::Matrix) {
+ mPath.reset();
+ mPath.addRect(VRectF(0, 0, mLayerData->asset()->mWidth,
+ mLayerData->asset()->mHeight));
+ mPath.transform(combinedMatrix());
+ mRenderNode.mFlag |= VDrawable::DirtyState::Path;
+ mRenderNode.mPath = mPath;
+ mTexture.mMatrix = combinedMatrix();
+ }
+
+ if (flag() & DirtyFlagBit::Alpha) {
+ mTexture.mAlpha = int(combinedAlpha() * 255);
+ }
+}
+
+void renderer::ImageLayer::preprocessStage(const VRect &clip)
+{
+ mRenderNode.preprocess(clip);
+}
+
+renderer::DrawableList renderer::ImageLayer::renderList()
+{
+ if (skipRendering()) return {};
+
+ return {&mDrawableList, 1};
+}
+
+renderer::NullLayer::NullLayer(model::Layer *layerData)
+ : renderer::Layer(layerData)
+{
+}
+void renderer::NullLayer::updateContent() {}
+
+static renderer::Object *createContentItem(model::Object *contentData,
+ VArenaAlloc * allocator)
+{
+ switch (contentData->type()) {
+ case model::Object::Type::Group: {
+ return allocator->make<renderer::Group>(
+ static_cast<model::Group *>(contentData), allocator);
+ }
+ case model::Object::Type::Rect: {
+ return allocator->make<renderer::Rect>(
+ static_cast<model::Rect *>(contentData));
+ }
+ case model::Object::Type::Ellipse: {
+ return allocator->make<renderer::Ellipse>(
+ static_cast<model::Ellipse *>(contentData));
+ }
+ case model::Object::Type::Path: {
+ return allocator->make<renderer::Path>(
+ static_cast<model::Path *>(contentData));
+ }
+ case model::Object::Type::Polystar: {
+ return allocator->make<renderer::Polystar>(
+ static_cast<model::Polystar *>(contentData));
+ }
+ case model::Object::Type::Fill: {
+ return allocator->make<renderer::Fill>(
+ static_cast<model::Fill *>(contentData));
+ }
+ case model::Object::Type::GFill: {
+ return allocator->make<renderer::GradientFill>(
+ static_cast<model::GradientFill *>(contentData));
+ }
+ case model::Object::Type::Stroke: {
+ return allocator->make<renderer::Stroke>(
+ static_cast<model::Stroke *>(contentData));
+ }
+ case model::Object::Type::GStroke: {
+ return allocator->make<renderer::GradientStroke>(
+ static_cast<model::GradientStroke *>(contentData));
+ }
+ case model::Object::Type::Repeater: {
+ return allocator->make<renderer::Repeater>(
+ static_cast<model::Repeater *>(contentData), allocator);
+ }
+ case model::Object::Type::Trim: {
+ return allocator->make<renderer::Trim>(
+ static_cast<model::Trim *>(contentData));
+ }
+ default:
+ return nullptr;
+ break;
+ }
+}
+
+renderer::ShapeLayer::ShapeLayer(model::Layer *layerData,
+ VArenaAlloc * allocator)
+ : renderer::Layer(layerData),
+ mRoot(allocator->make<renderer::Group>(nullptr, allocator))
+{
+ mRoot->addChildren(layerData, allocator);
+
+ std::vector<renderer::Shape *> list;
+ mRoot->processPaintItems(list);
+
+ if (layerData->hasPathOperator()) {
+ list.clear();
+ mRoot->processTrimItems(list);
+ }
+}
+
+void renderer::ShapeLayer::updateContent()
+{
+ mRoot->update(frameNo(), combinedMatrix(), combinedAlpha(), flag());
+
+ if (mLayerData->hasPathOperator()) {
+ mRoot->applyTrim();
+ }
+}
+
+void renderer::ShapeLayer::preprocessStage(const VRect &clip)
+{
+ mDrawableList.clear();
+ mRoot->renderList(mDrawableList);
+
+ for (auto &drawable : mDrawableList) drawable->preprocess(clip);
+}
+
+renderer::DrawableList renderer::ShapeLayer::renderList()
+{
+ if (skipRendering()) return {};
+
+ mDrawableList.clear();
+ mRoot->renderList(mDrawableList);
+
+ if (mDrawableList.empty()) return {};
+
+ return {mDrawableList.data(), mDrawableList.size()};
+}
+
+bool renderer::Group::resolveKeyPath(LOTKeyPath &keyPath, uint depth,
+ LOTVariant &value)
+{
+ if (!keyPath.skip(name())) {
+ if (!keyPath.matches(mModel.name(), depth)) {
+ return false;
+ }
+
+ if (!keyPath.skip(mModel.name())) {
+ if (keyPath.fullyResolvesTo(mModel.name(), depth) &&
+ transformProp(value.property())) {
+ mModel.filter()->addValue(value);
+ }
+ }
+ }
+
+ if (keyPath.propagate(name(), depth)) {
+ uint newDepth = keyPath.nextDepth(name(), depth);
+ for (auto &child : mContents) {
+ child->resolveKeyPath(keyPath, newDepth, value);
+ }
+ }
+ return true;
+}
+
+bool renderer::Fill::resolveKeyPath(LOTKeyPath &keyPath, uint depth,
+ LOTVariant &value)
+{
+ if (!keyPath.matches(mModel.name(), depth)) {
+ return false;
+ }
+
+ if (keyPath.fullyResolvesTo(mModel.name(), depth) &&
+ fillProp(value.property())) {
+ mModel.filter()->addValue(value);
+ return true;
+ }
+ return false;
+}
+
+bool renderer::Stroke::resolveKeyPath(LOTKeyPath &keyPath, uint depth,
+ LOTVariant &value)
+{
+ if (!keyPath.matches(mModel.name(), depth)) {
+ return false;
+ }
+
+ if (keyPath.fullyResolvesTo(mModel.name(), depth) &&
+ strokeProp(value.property())) {
+ mModel.filter()->addValue(value);
+ return true;
+ }
+ return false;
+}
+
+renderer::Group::Group(model::Group *data, VArenaAlloc *allocator)
+ : mModel(data)
+{
+ addChildren(data, allocator);
+}
+
+void renderer::Group::addChildren(model::Group *data, VArenaAlloc *allocator)
+{
+ if (!data) return;
+
+ if (!data->mChildren.empty()) mContents.reserve(data->mChildren.size());
+
+ // keep the content in back-to-front order.
+ // as lottie model keeps it in front-to-back order.
+ for (auto it = data->mChildren.crbegin(); it != data->mChildren.rend();
+ ++it) {
+ auto content = createContentItem(*it, allocator);
+ if (content) {
+ mContents.push_back(content);
+ }
+ }
+}
+
+void renderer::Group::update(int frameNo, const VMatrix &parentMatrix,
+ float parentAlpha, const DirtyFlag &flag)
+{
+ DirtyFlag newFlag = flag;
+ float alpha;
+
+ if (mModel.hasModel() && mModel.transform()) {
+ VMatrix m = mModel.matrix(frameNo);
+
+ m *= parentMatrix;
+ if (!(flag & DirtyFlagBit::Matrix) && !mModel.transform()->isStatic() &&
+ (m != mMatrix)) {
+ newFlag |= DirtyFlagBit::Matrix;
+ }
+
+ mMatrix = m;
+
+ alpha = parentAlpha * mModel.transform()->opacity(frameNo);
+ if (!vCompare(alpha, parentAlpha)) {
+ newFlag |= DirtyFlagBit::Alpha;
+ }
+ } else {
+ mMatrix = parentMatrix;
+ alpha = parentAlpha;
+ }
+
+ for (const auto &content : mContents) {
+ content->update(frameNo, matrix(), alpha, newFlag);
+ }
+}
+
+void renderer::Group::applyTrim()
+{
+ for (auto i = mContents.rbegin(); i != mContents.rend(); ++i) {
+ auto content = (*i);
+ switch (content->type()) {
+ case renderer::Object::Type::Trim: {
+ static_cast<renderer::Trim *>(content)->update();
+ break;
+ }
+ case renderer::Object::Type::Group: {
+ static_cast<renderer::Group *>(content)->applyTrim();
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+void renderer::Group::renderList(std::vector<VDrawable *> &list)
+{
+ for (const auto &content : mContents) {
+ content->renderList(list);
+ }
+}
+
+void renderer::Group::processPaintItems(std::vector<renderer::Shape *> &list)
+{
+ size_t curOpCount = list.size();
+ for (auto i = mContents.rbegin(); i != mContents.rend(); ++i) {
+ auto content = (*i);
+ switch (content->type()) {
+ case renderer::Object::Type::Shape: {
+ auto pathItem = static_cast<renderer::Shape *>(content);
+ pathItem->setParent(this);
+ list.push_back(pathItem);
+ break;
+ }
+ case renderer::Object::Type::Paint: {
+ static_cast<renderer::Paint *>(content)->addPathItems(list,
+ curOpCount);
+ break;
+ }
+ case renderer::Object::Type::Group: {
+ static_cast<renderer::Group *>(content)->processPaintItems(list);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+void renderer::Group::processTrimItems(std::vector<renderer::Shape *> &list)
+{
+ size_t curOpCount = list.size();
+ for (auto i = mContents.rbegin(); i != mContents.rend(); ++i) {
+ auto content = (*i);
+
+ switch (content->type()) {
+ case renderer::Object::Type::Shape: {
+ list.push_back(static_cast<renderer::Shape *>(content));
+ break;
+ }
+ case renderer::Object::Type::Trim: {
+ static_cast<renderer::Trim *>(content)->addPathItems(list,
+ curOpCount);
+ break;
+ }
+ case renderer::Object::Type::Group: {
+ static_cast<renderer::Group *>(content)->processTrimItems(list);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+/*
+ * renderer::Shape uses 2 path objects for path object reuse.
+ * mLocalPath - keeps track of the local path of the item before
+ * applying path operation and transformation.
+ * mTemp - keeps a referece to the mLocalPath and can be updated by the
+ * path operation objects(trim, merge path),
+ * We update the DirtyPath flag if the path needs to be updated again
+ * beacuse of local path or matrix or some path operation has changed which
+ * affects the final path.
+ * The PaintObject queries the dirty flag to check if it needs to compute the
+ * final path again and calls finalPath() api to do the same.
+ * finalPath() api passes a result Object so that we keep only one copy of
+ * the path object in the paintItem (for memory efficiency).
+ * NOTE: As path objects are COW objects we have to be
+ * carefull about the refcount so that we don't generate deep copy while
+ * modifying the path objects.
+ */
+void renderer::Shape::update(int frameNo, const VMatrix &, float,
+ const DirtyFlag &flag)
+{
+ mDirtyPath = false;
+
+ // 1. update the local path if needed
+ if (hasChanged(frameNo)) {
+ // loose the reference to mLocalPath if any
+ // from the last frame update.
+ mTemp = VPath();
+
+ updatePath(mLocalPath, frameNo);
+ mDirtyPath = true;
+ }
+ // 2. keep a reference path in temp in case there is some
+ // path operation like trim which will update the path.
+ // we don't want to update the local path.
+ mTemp = mLocalPath;
+
+ // 3. mark the path dirty if matrix has changed.
+ if (flag & DirtyFlagBit::Matrix) {
+ mDirtyPath = true;
+ }
+}
+
+void renderer::Shape::finalPath(VPath &result)
+{
+ result.addPath(mTemp, static_cast<renderer::Group *>(parent())->matrix());
+}
+
+renderer::Rect::Rect(model::Rect *data)
+ : renderer::Shape(data->isStatic()), mData(data)
+{
+}
+
+void renderer::Rect::updatePath(VPath &path, int frameNo)
+{
+ VPointF pos = mData->mPos.value(frameNo);
+ VPointF size = mData->mSize.value(frameNo);
+ float roundness = mData->roundness(frameNo);
+ VRectF r(pos.x() - size.x() / 2, pos.y() - size.y() / 2, size.x(),
+ size.y());
+
+ path.reset();
+ path.addRoundRect(r, roundness, mData->direction());
+}
+
+renderer::Ellipse::Ellipse(model::Ellipse *data)
+ : renderer::Shape(data->isStatic()), mData(data)
+{
+}
+
+void renderer::Ellipse::updatePath(VPath &path, int frameNo)
+{
+ VPointF pos = mData->mPos.value(frameNo);
+ VPointF size = mData->mSize.value(frameNo);
+ VRectF r(pos.x() - size.x() / 2, pos.y() - size.y() / 2, size.x(),
+ size.y());
+
+ path.reset();
+ path.addOval(r, mData->direction());
+}
+
+renderer::Path::Path(model::Path *data)
+ : renderer::Shape(data->isStatic()), mData(data)
+{
+}
+
+void renderer::Path::updatePath(VPath &path, int frameNo)
+{
+ mData->mShape.value(frameNo, path);
+}
+
+renderer::Polystar::Polystar(model::Polystar *data)
+ : renderer::Shape(data->isStatic()), mData(data)
+{
+}
+
+void renderer::Polystar::updatePath(VPath &path, int frameNo)
+{
+ VPointF pos = mData->mPos.value(frameNo);
+ float points = mData->mPointCount.value(frameNo);
+ float innerRadius = mData->mInnerRadius.value(frameNo);
+ float outerRadius = mData->mOuterRadius.value(frameNo);
+ float innerRoundness = mData->mInnerRoundness.value(frameNo);
+ float outerRoundness = mData->mOuterRoundness.value(frameNo);
+ float rotation = mData->mRotation.value(frameNo);
+
+ path.reset();
+ VMatrix m;
+
+ if (mData->mPolyType == model::Polystar::PolyType::Star) {
+ path.addPolystar(points, innerRadius, outerRadius, innerRoundness,
+ outerRoundness, 0.0, 0.0, 0.0, mData->direction());
+ } else {
+ path.addPolygon(points, outerRadius, outerRoundness, 0.0, 0.0, 0.0,
+ mData->direction());
+ }
+
+ m.translate(pos.x(), pos.y()).rotate(rotation);
+ m.rotate(rotation);
+ path.transform(m);
+}
+
+/*
+ * PaintData Node handling
+ *
+ */
+renderer::Paint::Paint(bool staticContent) : mStaticContent(staticContent) {}
+
+void renderer::Paint::update(int frameNo, const VMatrix &parentMatrix,
+ float parentAlpha, const DirtyFlag & /*flag*/)
+{
+ mRenderNodeUpdate = true;
+ mContentToRender = updateContent(frameNo, parentMatrix, parentAlpha);
+}
+
+void renderer::Paint::updateRenderNode()
+{
+ bool dirty = false;
+ for (auto &i : mPathItems) {
+ if (i->dirty()) {
+ dirty = true;
+ break;
+ }
+ }
+
+ if (dirty) {
+ mPath.reset();
+ for (const auto &i : mPathItems) {
+ i->finalPath(mPath);
+ }
+ mDrawable.setPath(mPath);
+ } else {
+ if (mDrawable.mFlag & VDrawable::DirtyState::Path)
+ mDrawable.mPath = mPath;
+ }
+}
+
+void renderer::Paint::renderList(std::vector<VDrawable *> &list)
+{
+ if (mRenderNodeUpdate) {
+ updateRenderNode();
+ mRenderNodeUpdate = false;
+ }
+
+ // Q: Why we even update the final path if we don't have content
+ // to render ?
+ // Ans: We update the render nodes because we will loose the
+ // dirty path information at end of this frame.
+ // so if we return early without updating the final path.
+ // in the subsequent frame when we have content to render but
+ // we may not able to update our final path properly as we
+ // don't know what paths got changed in between.
+ if (mContentToRender) list.push_back(&mDrawable);
+}
+
+void renderer::Paint::addPathItems(std::vector<renderer::Shape *> &list,
+ size_t startOffset)
+{
+ std::copy(list.begin() + startOffset, list.end(),
+ back_inserter(mPathItems));
+}
+
+renderer::Fill::Fill(model::Fill *data)
+ : renderer::Paint(data->isStatic()), mModel(data)
+{
+ mDrawable.setName(mModel.name());
+}
+
+bool renderer::Fill::updateContent(int frameNo, const VMatrix &, float alpha)
+{
+ auto combinedAlpha = alpha * mModel.opacity(frameNo);
+ auto color = mModel.color(frameNo).toColor(combinedAlpha);
+
+ VBrush brush(color);
+ mDrawable.setBrush(brush);
+ mDrawable.setFillRule(mModel.fillRule());
+
+ return !color.isTransparent();
+}
+
+renderer::GradientFill::GradientFill(model::GradientFill *data)
+ : renderer::Paint(data->isStatic()), mData(data)
+{
+ mDrawable.setName(mData->name());
+}
+
+bool renderer::GradientFill::updateContent(int frameNo, const VMatrix &matrix,
+ float alpha)
+{
+ float combinedAlpha = alpha * mData->opacity(frameNo);
+
+ mData->update(mGradient, frameNo);
+ mGradient->setAlpha(combinedAlpha);
+ mGradient->mMatrix = matrix;
+ mDrawable.setBrush(VBrush(mGradient.get()));
+ mDrawable.setFillRule(mData->fillRule());
+
+ return !vIsZero(combinedAlpha);
+}
+
+renderer::Stroke::Stroke(model::Stroke *data)
+ : renderer::Paint(data->isStatic()), mModel(data)
+{
+ mDrawable.setName(mModel.name());
+ if (mModel.hasDashInfo()) {
+ mDrawable.setType(VDrawable::Type::StrokeWithDash);
+ } else {
+ mDrawable.setType(VDrawable::Type::Stroke);
+ }
+}
+
+static vthread_local std::vector<float> Dash_Vector;
+
+bool renderer::Stroke::updateContent(int frameNo, const VMatrix &matrix,
+ float alpha)
+{
+ auto combinedAlpha = alpha * mModel.opacity(frameNo);
+ auto color = mModel.color(frameNo).toColor(combinedAlpha);
+
+ VBrush brush(color);
+ mDrawable.setBrush(brush);
+ float scale = matrix.scale();
+ mDrawable.setStrokeInfo(mModel.capStyle(), mModel.joinStyle(),
+ mModel.miterLimit(),
+ mModel.strokeWidth(frameNo) * scale);
+
+ if (mModel.hasDashInfo()) {
+ Dash_Vector.clear();
+ mModel.getDashInfo(frameNo, Dash_Vector);
+ if (!Dash_Vector.empty()) {
+ for (auto &elm : Dash_Vector) elm *= scale;
+ mDrawable.setDashInfo(Dash_Vector);
+ }
+ }
+
+ return !color.isTransparent();
+}
+
+renderer::GradientStroke::GradientStroke(model::GradientStroke *data)
+ : renderer::Paint(data->isStatic()), mData(data)
+{
+ mDrawable.setName(mData->name());
+ if (mData->hasDashInfo()) {
+ mDrawable.setType(VDrawable::Type::StrokeWithDash);
+ } else {
+ mDrawable.setType(VDrawable::Type::Stroke);
+ }
+}
+
+bool renderer::GradientStroke::updateContent(int frameNo, const VMatrix &matrix,
+ float alpha)
+{
+ float combinedAlpha = alpha * mData->opacity(frameNo);
+
+ mData->update(mGradient, frameNo);
+ mGradient->setAlpha(combinedAlpha);
+ mGradient->mMatrix = matrix;
+ auto scale = mGradient->mMatrix.scale();
+ mDrawable.setBrush(VBrush(mGradient.get()));
+ mDrawable.setStrokeInfo(mData->capStyle(), mData->joinStyle(),
+ mData->miterLimit(), mData->width(frameNo) * scale);
+
+ if (mData->hasDashInfo()) {
+ Dash_Vector.clear();
+ mData->getDashInfo(frameNo, Dash_Vector);
+ if (!Dash_Vector.empty()) {
+ for (auto &elm : Dash_Vector) elm *= scale;
+ mDrawable.setDashInfo(Dash_Vector);
+ }
+ }
+
+ return !vIsZero(combinedAlpha);
+}
+
+void renderer::Trim::update(int frameNo, const VMatrix & /*parentMatrix*/,
+ float /*parentAlpha*/, const DirtyFlag & /*flag*/)
+{
+ mDirty = false;
+
+ if (mCache.mFrameNo == frameNo) return;
+
+ model::Trim::Segment segment = mData->segment(frameNo);
+
+ if (!(vCompare(mCache.mSegment.start, segment.start) &&
+ vCompare(mCache.mSegment.end, segment.end))) {
+ mDirty = true;
+ mCache.mSegment = segment;
+ }
+ mCache.mFrameNo = frameNo;
+}
+
+void renderer::Trim::update()
+{
+ // when both path and trim are not dirty
+ if (!(mDirty || pathDirty())) return;
+
+ if (vCompare(mCache.mSegment.start, mCache.mSegment.end)) {
+ for (auto &i : mPathItems) {
+ i->updatePath(VPath());
+ }
+ return;
+ }
+
+ if (vCompare(std::fabs(mCache.mSegment.start - mCache.mSegment.end), 1)) {
+ for (auto &i : mPathItems) {
+ i->updatePath(i->localPath());
+ }
+ return;
+ }
+
+ if (mData->type() == model::Trim::TrimType::Simultaneously) {
+ for (auto &i : mPathItems) {
+ mPathMesure.setRange(mCache.mSegment.start, mCache.mSegment.end);
+ i->updatePath(mPathMesure.trim(i->localPath()));
+ }
+ } else { // model::Trim::TrimType::Individually
+ float totalLength = 0.0;
+ for (auto &i : mPathItems) {
+ totalLength += i->localPath().length();
+ }
+ float start = totalLength * mCache.mSegment.start;
+ float end = totalLength * mCache.mSegment.end;
+
+ if (start < end) {
+ float curLen = 0.0;
+ for (auto &i : mPathItems) {
+ if (curLen > end) {
+ // update with empty path.
+ i->updatePath(VPath());
+ continue;
+ }
+ float len = i->localPath().length();
+
+ if (curLen < start && curLen + len < start) {
+ curLen += len;
+ // update with empty path.
+ i->updatePath(VPath());
+ continue;
+ } else if (start <= curLen && end >= curLen + len) {
+ // inside segment
+ curLen += len;
+ continue;
+ } else {
+ float local_start = start > curLen ? start - curLen : 0;
+ local_start /= len;
+ float local_end = curLen + len < end ? len : end - curLen;
+ local_end /= len;
+ mPathMesure.setRange(local_start, local_end);
+ i->updatePath(mPathMesure.trim(i->localPath()));
+ curLen += len;
+ }
+ }
+ }
+ }
+}
+
+void renderer::Trim::addPathItems(std::vector<renderer::Shape *> &list,
+ size_t startOffset)
+{
+ std::copy(list.begin() + startOffset, list.end(),
+ back_inserter(mPathItems));
+}
+
+renderer::Repeater::Repeater(model::Repeater *data, VArenaAlloc *allocator)
+ : mRepeaterData(data)
+{
+ assert(mRepeaterData->content());
+
+ mCopies = mRepeaterData->maxCopies();
+
+ for (int i = 0; i < mCopies; i++) {
+ auto content = allocator->make<renderer::Group>(
+ mRepeaterData->content(), allocator);
+ // content->setParent(this);
+ mContents.push_back(content);
+ }
+}
+
+void renderer::Repeater::update(int frameNo, const VMatrix &parentMatrix,
+ float parentAlpha, const DirtyFlag &flag)
+{
+ DirtyFlag newFlag = flag;
+
+ float copies = mRepeaterData->copies(frameNo);
+ int visibleCopies = int(copies);
+
+ if (visibleCopies == 0) {
+ mHidden = true;
+ return;
+ }
+
+ mHidden = false;
+
+ if (!mRepeaterData->isStatic()) newFlag |= DirtyFlagBit::Matrix;
+
+ float offset = mRepeaterData->offset(frameNo);
+ float startOpacity = mRepeaterData->mTransform.startOpacity(frameNo);
+ float endOpacity = mRepeaterData->mTransform.endOpacity(frameNo);
+
+ newFlag |= DirtyFlagBit::Alpha;
+
+ for (int i = 0; i < mCopies; ++i) {
+ float newAlpha =
+ parentAlpha * lerp(startOpacity, endOpacity, i / copies);
+
+ // hide rest of the copies , @TODO find a better solution.
+ if (i >= visibleCopies) newAlpha = 0;
+
+ VMatrix result = mRepeaterData->mTransform.matrix(frameNo, i + offset) *
+ parentMatrix;
+ mContents[i]->update(frameNo, result, newAlpha, newFlag);
+ }
+}
+
+void renderer::Repeater::renderList(std::vector<VDrawable *> &list)
+{
+ if (mHidden) return;
+ return renderer::Group::renderList(list);
+}
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottieitem.h b/vendor/github.com/Benau/go_rlottie/lottie_lottieitem.h
new file mode 100644
index 00000000..e1276113
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_lottieitem.h
@@ -0,0 +1,626 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef LOTTIEITEM_H
+#define LOTTIEITEM_H
+
+#include <memory>
+#include <sstream>
+
+#include "lottie_lottiekeypath.h"
+#include "lottie_lottiefiltermodel.h"
+#include "rlottie.h"
+#include "rlottiecommon.h"
+#include "vector_varenaalloc.h"
+#include "vector_vdrawable.h"
+#include "vector_vmatrix.h"
+#include "vector_vpainter.h"
+#include "vector_vpath.h"
+#include "vector_vpathmesure.h"
+#include "vector_vpoint.h"
+
+V_USE_NAMESPACE
+
+namespace rlottie {
+
+namespace internal {
+
+template <class T>
+class VSpan {
+public:
+ using reference = T &;
+ using pointer = T *;
+ using const_pointer = T const *;
+ using const_reference = T const &;
+ using index_type = size_t;
+
+ using iterator = pointer;
+ using const_iterator = const_pointer;
+
+ VSpan() = default;
+ VSpan(pointer data, index_type size) : _data(data), _size(size) {}
+
+ constexpr pointer data() const noexcept { return _data; }
+ constexpr index_type size() const noexcept { return _size; }
+ constexpr bool empty() const noexcept { return size() == 0; }
+ constexpr iterator begin() const noexcept { return data(); }
+ constexpr iterator end() const noexcept { return data() + size(); }
+ constexpr const_iterator cbegin() const noexcept { return data(); }
+ constexpr const_iterator cend() const noexcept { return data() + size(); }
+ constexpr reference operator[](index_type idx) const
+ {
+ return *(data() + idx);
+ }
+
+private:
+ pointer _data{nullptr};
+ index_type _size{0};
+};
+
+namespace renderer {
+
+using DrawableList = VSpan<VDrawable *>;
+
+enum class DirtyFlagBit : uchar {
+ None = 0x00,
+ Matrix = 0x01,
+ Alpha = 0x02,
+ All = (Matrix | Alpha)
+};
+typedef vFlag<DirtyFlagBit> DirtyFlag;
+
+class SurfaceCache {
+public:
+ SurfaceCache() { mCache.reserve(10); }
+
+ VBitmap make_surface(
+ size_t width, size_t height,
+ VBitmap::Format format = VBitmap::Format::ARGB32_Premultiplied)
+ {
+ if (mCache.empty()) return {width, height, format};
+
+ auto surface = mCache.back();
+ surface.reset(width, height, format);
+
+ mCache.pop_back();
+ return surface;
+ }
+
+ void release_surface(VBitmap &surface) { mCache.push_back(surface); }
+
+private:
+ std::vector<VBitmap> mCache;
+};
+
+class Drawable final : public VDrawable {
+public:
+ void sync();
+
+public:
+ std::unique_ptr<LOTNode> mCNode{nullptr};
+
+ ~Drawable() noexcept
+ {
+ if (mCNode && mCNode->mGradient.stopPtr)
+ free(mCNode->mGradient.stopPtr);
+ }
+};
+
+struct CApiData {
+ CApiData();
+ LOTLayerNode mLayer;
+ std::vector<LOTMask> mMasks;
+ std::vector<LOTLayerNode *> mLayers;
+ std::vector<LOTNode *> mCNodeList;
+};
+
+class Clipper {
+public:
+ explicit Clipper(VSize size) : mSize(size) {}
+ void update(const VMatrix &matrix);
+ void preprocess(const VRect &clip);
+ VRle rle(const VRle &mask);
+
+public:
+ VSize mSize;
+ VPath mPath;
+ VRle mMaskedRle;
+ VRasterizer mRasterizer;
+ bool mRasterRequest{false};
+};
+
+class Mask {
+public:
+ explicit Mask(model::Mask *data) : mData(data) {}
+ void update(int frameNo, const VMatrix &parentMatrix, float parentAlpha,
+ const DirtyFlag &flag);
+ model::Mask::Mode maskMode() const { return mData->mMode; }
+ VRle rle();
+ void preprocess(const VRect &clip);
+ bool inverted() const { return mData->mInv; }
+public:
+ model::Mask *mData{nullptr};
+ VPath mLocalPath;
+ VPath mFinalPath;
+ VRasterizer mRasterizer;
+ float mCombinedAlpha{0};
+ bool mRasterRequest{false};
+};
+
+/*
+ * Handels mask property of a layer item
+ */
+class LayerMask {
+public:
+ explicit LayerMask(model::Layer *layerData);
+ void update(int frameNo, const VMatrix &parentMatrix, float parentAlpha,
+ const DirtyFlag &flag);
+ bool isStatic() const { return mStatic; }
+ VRle maskRle(const VRect &clipRect);
+ void preprocess(const VRect &clip);
+
+public:
+ std::vector<Mask> mMasks;
+ VRle mRle;
+ bool mStatic{true};
+ bool mDirty{true};
+};
+
+class Layer;
+
+class Composition {
+public:
+ explicit Composition(std::shared_ptr<model::Composition> composition);
+ bool update(int frameNo, const VSize &size, bool keepAspectRatio);
+ VSize size() const { return mViewSize; }
+ void buildRenderTree();
+ const LOTLayerNode *renderTree() const;
+ bool render(const rlottie::Surface &surface);
+ void setValue(const std::string &keypath, LOTVariant &value);
+
+private:
+ SurfaceCache mSurfaceCache;
+ VBitmap mSurface;
+ VMatrix mScaleMatrix;
+ VSize mViewSize;
+ std::shared_ptr<model::Composition> mModel;
+ Layer * mRootLayer{nullptr};
+ VArenaAlloc mAllocator{2048};
+ int mCurFrameNo;
+ bool mKeepAspectRatio{true};
+};
+
+class Layer {
+public:
+ virtual ~Layer() = default;
+ Layer &operator=(Layer &&) noexcept = delete;
+ Layer(model::Layer *layerData);
+ int id() const { return mLayerData->id(); }
+ int parentId() const { return mLayerData->parentId(); }
+ void setParentLayer(Layer *parent) { mParentLayer = parent; }
+ void setComplexContent(bool value) { mComplexContent = value; }
+ bool complexContent() const { return mComplexContent; }
+ virtual void update(int frameNo, const VMatrix &parentMatrix,
+ float parentAlpha);
+ VMatrix matrix(int frameNo) const;
+ void preprocess(const VRect &clip);
+ virtual DrawableList renderList() { return {}; }
+ virtual void render(VPainter *painter, const VRle &mask,
+ const VRle &matteRle, SurfaceCache &cache);
+ bool hasMatte()
+ {
+ if (mLayerData->mMatteType == model::MatteType::None) return false;
+ return true;
+ }
+ model::MatteType matteType() const { return mLayerData->mMatteType; }
+ bool visible() const;
+ virtual void buildLayerNode();
+ LOTLayerNode & clayer() { return mCApiData->mLayer; }
+ std::vector<LOTLayerNode *> &clayers() { return mCApiData->mLayers; }
+ std::vector<LOTMask> & cmasks() { return mCApiData->mMasks; }
+ std::vector<LOTNode *> & cnodes() { return mCApiData->mCNodeList; }
+ const char * name() const { return mLayerData->name(); }
+ virtual bool resolveKeyPath(LOTKeyPath &keyPath, uint depth,
+ LOTVariant &value);
+
+protected:
+ virtual void preprocessStage(const VRect &clip) = 0;
+ virtual void updateContent() = 0;
+ inline VMatrix combinedMatrix() const { return mCombinedMatrix; }
+ inline int frameNo() const { return mFrameNo; }
+ inline float combinedAlpha() const { return mCombinedAlpha; }
+ inline bool isStatic() const { return mLayerData->isStatic(); }
+ float opacity(int frameNo) const { return mLayerData->opacity(frameNo); }
+ inline DirtyFlag flag() const { return mDirtyFlag; }
+ bool skipRendering() const
+ {
+ return (!visible() || vIsZero(combinedAlpha()));
+ }
+
+protected:
+ std::unique_ptr<LayerMask> mLayerMask;
+ model::Layer * mLayerData{nullptr};
+ Layer * mParentLayer{nullptr};
+ VMatrix mCombinedMatrix;
+ float mCombinedAlpha{0.0};
+ int mFrameNo{-1};
+ DirtyFlag mDirtyFlag{DirtyFlagBit::All};
+ bool mComplexContent{false};
+ std::unique_ptr<CApiData> mCApiData;
+};
+
+class CompLayer final : public Layer {
+public:
+ explicit CompLayer(model::Layer *layerData, VArenaAlloc *allocator);
+
+ void render(VPainter *painter, const VRle &mask, const VRle &matteRle,
+ SurfaceCache &cache) final;
+ void buildLayerNode() final;
+ bool resolveKeyPath(LOTKeyPath &keyPath, uint depth,
+ LOTVariant &value) override;
+
+protected:
+ void preprocessStage(const VRect &clip) final;
+ void updateContent() final;
+
+private:
+ void renderHelper(VPainter *painter, const VRle &mask, const VRle &matteRle,
+ SurfaceCache &cache);
+ void renderMatteLayer(VPainter *painter, const VRle &inheritMask,
+ const VRle &matteRle, Layer *layer, Layer *src,
+ SurfaceCache &cache);
+
+private:
+ std::vector<Layer *> mLayers;
+ std::unique_ptr<Clipper> mClipper;
+};
+
+class SolidLayer final : public Layer {
+public:
+ explicit SolidLayer(model::Layer *layerData);
+ void buildLayerNode() final;
+ DrawableList renderList() final;
+
+protected:
+ void preprocessStage(const VRect &clip) final;
+ void updateContent() final;
+
+private:
+ Drawable mRenderNode;
+ VPath mPath;
+ VDrawable *mDrawableList{nullptr}; // to work with the Span api
+};
+
+class Group;
+
+class ShapeLayer final : public Layer {
+public:
+ explicit ShapeLayer(model::Layer *layerData, VArenaAlloc *allocator);
+ DrawableList renderList() final;
+ void buildLayerNode() final;
+ bool resolveKeyPath(LOTKeyPath &keyPath, uint depth,
+ LOTVariant &value) override;
+
+protected:
+ void preprocessStage(const VRect &clip) final;
+ void updateContent() final;
+ std::vector<VDrawable *> mDrawableList;
+ Group * mRoot{nullptr};
+};
+
+class NullLayer final : public Layer {
+public:
+ explicit NullLayer(model::Layer *layerData);
+
+protected:
+ void preprocessStage(const VRect &) final {}
+ void updateContent() final;
+};
+
+class ImageLayer final : public Layer {
+public:
+ explicit ImageLayer(model::Layer *layerData);
+ void buildLayerNode() final;
+ DrawableList renderList() final;
+
+protected:
+ void preprocessStage(const VRect &clip) final;
+ void updateContent() final;
+
+private:
+ Drawable mRenderNode;
+ VTexture mTexture;
+ VPath mPath;
+ VDrawable *mDrawableList{nullptr}; // to work with the Span api
+};
+
+class Object {
+public:
+ enum class Type : uchar { Unknown, Group, Shape, Paint, Trim };
+ virtual ~Object() = default;
+ Object & operator=(Object &&) noexcept = delete;
+ virtual void update(int frameNo, const VMatrix &parentMatrix,
+ float parentAlpha, const DirtyFlag &flag) = 0;
+ virtual void renderList(std::vector<VDrawable *> &) {}
+ virtual bool resolveKeyPath(LOTKeyPath &, uint, LOTVariant &)
+ {
+ return false;
+ }
+ virtual Object::Type type() const { return Object::Type::Unknown; }
+};
+
+class Shape;
+class Group : public Object {
+public:
+ Group() = default;
+ explicit Group(model::Group *data, VArenaAlloc *allocator);
+ void addChildren(model::Group *data, VArenaAlloc *allocator);
+ void update(int frameNo, const VMatrix &parentMatrix, float parentAlpha,
+ const DirtyFlag &flag) override;
+ void applyTrim();
+ void processTrimItems(std::vector<Shape *> &list);
+ void processPaintItems(std::vector<Shape *> &list);
+ void renderList(std::vector<VDrawable *> &list) override;
+ Object::Type type() const final { return Object::Type::Group; }
+ const VMatrix &matrix() const { return mMatrix; }
+ const char * name() const
+ {
+ static const char *TAG = "__";
+ return mModel.hasModel() ? mModel.name() : TAG;
+ }
+ bool resolveKeyPath(LOTKeyPath &keyPath, uint depth,
+ LOTVariant &value) override;
+
+protected:
+ std::vector<Object *> mContents;
+ VMatrix mMatrix;
+
+private:
+ model::Filter<model::Group> mModel;
+};
+
+class Shape : public Object {
+public:
+ Shape(bool staticPath) : mStaticPath(staticPath) {}
+ void update(int frameNo, const VMatrix &parentMatrix, float parentAlpha,
+ const DirtyFlag &flag) final;
+ Object::Type type() const final { return Object::Type::Shape; }
+ bool dirty() const { return mDirtyPath; }
+ const VPath &localPath() const { return mTemp; }
+ void finalPath(VPath &result);
+ void updatePath(const VPath &path)
+ {
+ mTemp = path;
+ mDirtyPath = true;
+ }
+ bool staticPath() const { return mStaticPath; }
+ void setParent(Group *parent) { mParent = parent; }
+ Group *parent() const { return mParent; }
+
+protected:
+ virtual void updatePath(VPath &path, int frameNo) = 0;
+ virtual bool hasChanged(int prevFrame, int curFrame) = 0;
+
+private:
+ bool hasChanged(int frameNo)
+ {
+ int prevFrame = mFrameNo;
+ mFrameNo = frameNo;
+ if (prevFrame == -1) return true;
+ if (mStaticPath || (prevFrame == frameNo)) return false;
+ return hasChanged(prevFrame, frameNo);
+ }
+ Group *mParent{nullptr};
+ VPath mLocalPath;
+ VPath mTemp;
+ int mFrameNo{-1};
+ bool mDirtyPath{true};
+ bool mStaticPath;
+};
+
+class Rect final : public Shape {
+public:
+ explicit Rect(model::Rect *data);
+
+protected:
+ void updatePath(VPath &path, int frameNo) final;
+ model::Rect *mData{nullptr};
+
+ bool hasChanged(int prevFrame, int curFrame) final
+ {
+ return (mData->mPos.changed(prevFrame, curFrame) ||
+ mData->mSize.changed(prevFrame, curFrame) ||
+ mData->roundnessChanged(prevFrame, curFrame));
+ }
+};
+
+class Ellipse final : public Shape {
+public:
+ explicit Ellipse(model::Ellipse *data);
+
+private:
+ void updatePath(VPath &path, int frameNo) final;
+ model::Ellipse *mData{nullptr};
+ bool hasChanged(int prevFrame, int curFrame) final
+ {
+ return (mData->mPos.changed(prevFrame, curFrame) ||
+ mData->mSize.changed(prevFrame, curFrame));
+ }
+};
+
+class Path final : public Shape {
+public:
+ explicit Path(model::Path *data);
+
+private:
+ void updatePath(VPath &path, int frameNo) final;
+ model::Path *mData{nullptr};
+ bool hasChanged(int prevFrame, int curFrame) final
+ {
+ return mData->mShape.changed(prevFrame, curFrame);
+ }
+};
+
+class Polystar final : public Shape {
+public:
+ explicit Polystar(model::Polystar *data);
+
+private:
+ void updatePath(VPath &path, int frameNo) final;
+ model::Polystar *mData{nullptr};
+
+ bool hasChanged(int prevFrame, int curFrame) final
+ {
+ return (mData->mPos.changed(prevFrame, curFrame) ||
+ mData->mPointCount.changed(prevFrame, curFrame) ||
+ mData->mInnerRadius.changed(prevFrame, curFrame) ||
+ mData->mOuterRadius.changed(prevFrame, curFrame) ||
+ mData->mInnerRoundness.changed(prevFrame, curFrame) ||
+ mData->mOuterRoundness.changed(prevFrame, curFrame) ||
+ mData->mRotation.changed(prevFrame, curFrame));
+ }
+};
+
+class Paint : public Object {
+public:
+ Paint(bool staticContent);
+ void addPathItems(std::vector<Shape *> &list, size_t startOffset);
+ void update(int frameNo, const VMatrix &parentMatrix, float parentAlpha,
+ const DirtyFlag &flag) override;
+ void renderList(std::vector<VDrawable *> &list) final;
+ Object::Type type() const final { return Object::Type::Paint; }
+
+protected:
+ virtual bool updateContent(int frameNo, const VMatrix &matrix,
+ float alpha) = 0;
+
+private:
+ void updateRenderNode();
+
+protected:
+ std::vector<Shape *> mPathItems;
+ Drawable mDrawable;
+ VPath mPath;
+ DirtyFlag mFlag;
+ bool mStaticContent;
+ bool mRenderNodeUpdate{true};
+ bool mContentToRender{true};
+};
+
+class Fill final : public Paint {
+public:
+ explicit Fill(model::Fill *data);
+
+protected:
+ bool updateContent(int frameNo, const VMatrix &matrix, float alpha) final;
+ bool resolveKeyPath(LOTKeyPath &keyPath, uint depth,
+ LOTVariant &value) final;
+
+private:
+ model::Filter<model::Fill> mModel;
+};
+
+class GradientFill final : public Paint {
+public:
+ explicit GradientFill(model::GradientFill *data);
+
+protected:
+ bool updateContent(int frameNo, const VMatrix &matrix, float alpha) final;
+
+private:
+ model::GradientFill * mData{nullptr};
+ std::unique_ptr<VGradient> mGradient;
+};
+
+class Stroke : public Paint {
+public:
+ explicit Stroke(model::Stroke *data);
+
+protected:
+ bool updateContent(int frameNo, const VMatrix &matrix, float alpha) final;
+ bool resolveKeyPath(LOTKeyPath &keyPath, uint depth,
+ LOTVariant &value) final;
+
+private:
+ model::Filter<model::Stroke> mModel;
+};
+
+class GradientStroke final : public Paint {
+public:
+ explicit GradientStroke(model::GradientStroke *data);
+
+protected:
+ bool updateContent(int frameNo, const VMatrix &matrix, float alpha) final;
+
+private:
+ model::GradientStroke * mData{nullptr};
+ std::unique_ptr<VGradient> mGradient;
+};
+
+class Trim final : public Object {
+public:
+ explicit Trim(model::Trim *data) : mData(data) {}
+ void update(int frameNo, const VMatrix &parentMatrix, float parentAlpha,
+ const DirtyFlag &flag) final;
+ Object::Type type() const final { return Object::Type::Trim; }
+ void update();
+ void addPathItems(std::vector<Shape *> &list, size_t startOffset);
+
+private:
+ bool pathDirty() const
+ {
+ for (auto &i : mPathItems) {
+ if (i->dirty()) return true;
+ }
+ return false;
+ }
+ struct Cache {
+ int mFrameNo{-1};
+ model::Trim::Segment mSegment{};
+ };
+ Cache mCache;
+ std::vector<Shape *> mPathItems;
+ model::Trim * mData{nullptr};
+ VPathMesure mPathMesure;
+ bool mDirty{true};
+};
+
+class Repeater final : public Group {
+public:
+ explicit Repeater(model::Repeater *data, VArenaAlloc *allocator);
+ void update(int frameNo, const VMatrix &parentMatrix, float parentAlpha,
+ const DirtyFlag &flag) final;
+ void renderList(std::vector<VDrawable *> &list) final;
+
+private:
+ model::Repeater *mRepeaterData{nullptr};
+ bool mHidden{false};
+ int mCopies{0};
+};
+
+} // namespace renderer
+
+} // namespace internal
+
+} // namespace rlottie
+
+#endif // LOTTIEITEM_H
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottieitem_capi.cpp b/vendor/github.com/Benau/go_rlottie/lottie_lottieitem_capi.cpp
new file mode 100644
index 00000000..8ab8f842
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_lottieitem_capi.cpp
@@ -0,0 +1,339 @@
+/*
+ * Implements LottieItem functions needed
+ * to support renderTree() api.
+ * Moving all those implementation to its own
+ * file make clear separation as well easy of
+ * maintenance.
+ */
+
+#include "lottie_lottieitem.h"
+#include "vector_vdasher.h"
+
+using namespace rlottie::internal;
+
+renderer::CApiData::CApiData()
+{
+ mLayer.mMaskList.ptr = nullptr;
+ mLayer.mMaskList.size = 0;
+ mLayer.mLayerList.ptr = nullptr;
+ mLayer.mLayerList.size = 0;
+ mLayer.mNodeList.ptr = nullptr;
+ mLayer.mNodeList.size = 0;
+ mLayer.mMatte = MatteNone;
+ mLayer.mVisible = 0;
+ mLayer.mAlpha = 255;
+ mLayer.mClipPath.ptPtr = nullptr;
+ mLayer.mClipPath.elmPtr = nullptr;
+ mLayer.mClipPath.ptCount = 0;
+ mLayer.mClipPath.elmCount = 0;
+ mLayer.keypath = nullptr;
+}
+
+void renderer::Composition::buildRenderTree()
+{
+ mRootLayer->buildLayerNode();
+}
+
+const LOTLayerNode *renderer::Composition::renderTree() const
+{
+ return &mRootLayer->clayer();
+}
+
+void renderer::CompLayer::buildLayerNode()
+{
+ renderer::Layer::buildLayerNode();
+ if (mClipper) {
+ const auto &elm = mClipper->mPath.elements();
+ const auto &pts = mClipper->mPath.points();
+ auto ptPtr = reinterpret_cast<const float *>(pts.data());
+ auto elmPtr = reinterpret_cast<const char *>(elm.data());
+ clayer().mClipPath.ptPtr = ptPtr;
+ clayer().mClipPath.elmPtr = elmPtr;
+ clayer().mClipPath.ptCount = 2 * pts.size();
+ clayer().mClipPath.elmCount = elm.size();
+ }
+ if (mLayers.size() != clayers().size()) {
+ for (const auto &layer : mLayers) {
+ layer->buildLayerNode();
+ clayers().push_back(&layer->clayer());
+ }
+ clayer().mLayerList.ptr = clayers().data();
+ clayer().mLayerList.size = clayers().size();
+ } else {
+ for (const auto &layer : mLayers) {
+ layer->buildLayerNode();
+ }
+ }
+}
+
+void renderer::ShapeLayer::buildLayerNode()
+{
+ renderer::Layer::buildLayerNode();
+
+ auto renderlist = renderList();
+
+ cnodes().clear();
+ for (auto &i : renderlist) {
+ auto lotDrawable = static_cast<renderer::Drawable *>(i);
+ lotDrawable->sync();
+ cnodes().push_back(lotDrawable->mCNode.get());
+ }
+ clayer().mNodeList.ptr = cnodes().data();
+ clayer().mNodeList.size = cnodes().size();
+}
+
+void renderer::Layer::buildLayerNode()
+{
+ if (!mCApiData) {
+ mCApiData = std::make_unique<renderer::CApiData>();
+ clayer().keypath = name();
+ }
+ if (complexContent()) clayer().mAlpha = uchar(combinedAlpha() * 255.f);
+ clayer().mVisible = visible();
+ // update matte
+ if (hasMatte()) {
+ switch (mLayerData->mMatteType) {
+ case model::MatteType::Alpha:
+ clayer().mMatte = MatteAlpha;
+ break;
+ case model::MatteType::AlphaInv:
+ clayer().mMatte = MatteAlphaInv;
+ break;
+ case model::MatteType::Luma:
+ clayer().mMatte = MatteLuma;
+ break;
+ case model::MatteType::LumaInv:
+ clayer().mMatte = MatteLumaInv;
+ break;
+ default:
+ clayer().mMatte = MatteNone;
+ break;
+ }
+ }
+ if (mLayerMask) {
+ cmasks().clear();
+ cmasks().resize(mLayerMask->mMasks.size());
+ size_t i = 0;
+ for (const auto &mask : mLayerMask->mMasks) {
+ auto & cNode = cmasks()[i++];
+ const auto &elm = mask.mFinalPath.elements();
+ const auto &pts = mask.mFinalPath.points();
+ auto ptPtr = reinterpret_cast<const float *>(pts.data());
+ auto elmPtr = reinterpret_cast<const char *>(elm.data());
+ cNode.mPath.ptPtr = ptPtr;
+ cNode.mPath.ptCount = 2 * pts.size();
+ cNode.mPath.elmPtr = elmPtr;
+ cNode.mPath.elmCount = elm.size();
+ cNode.mAlpha = uchar(mask.mCombinedAlpha * 255.0f);
+ switch (mask.maskMode()) {
+ case model::Mask::Mode::Add:
+ cNode.mMode = MaskAdd;
+ break;
+ case model::Mask::Mode::Substarct:
+ cNode.mMode = MaskSubstract;
+ break;
+ case model::Mask::Mode::Intersect:
+ cNode.mMode = MaskIntersect;
+ break;
+ case model::Mask::Mode::Difference:
+ cNode.mMode = MaskDifference;
+ break;
+ default:
+ cNode.mMode = MaskAdd;
+ break;
+ }
+ }
+ clayer().mMaskList.ptr = cmasks().data();
+ clayer().mMaskList.size = cmasks().size();
+ }
+}
+
+void renderer::SolidLayer::buildLayerNode()
+{
+ renderer::Layer::buildLayerNode();
+
+ auto renderlist = renderList();
+
+ cnodes().clear();
+ for (auto &i : renderlist) {
+ auto lotDrawable = static_cast<renderer::Drawable *>(i);
+ lotDrawable->sync();
+ cnodes().push_back(lotDrawable->mCNode.get());
+ }
+ clayer().mNodeList.ptr = cnodes().data();
+ clayer().mNodeList.size = cnodes().size();
+}
+
+void renderer::ImageLayer::buildLayerNode()
+{
+ renderer::Layer::buildLayerNode();
+
+ auto renderlist = renderList();
+
+ cnodes().clear();
+ for (auto &i : renderlist) {
+ auto lotDrawable = static_cast<renderer::Drawable *>(i);
+ lotDrawable->sync();
+
+ lotDrawable->mCNode->mImageInfo.data =
+ lotDrawable->mBrush.mTexture->mBitmap.data();
+ lotDrawable->mCNode->mImageInfo.width =
+ int(lotDrawable->mBrush.mTexture->mBitmap.width());
+ lotDrawable->mCNode->mImageInfo.height =
+ int(lotDrawable->mBrush.mTexture->mBitmap.height());
+
+ lotDrawable->mCNode->mImageInfo.mMatrix.m11 = combinedMatrix().m_11();
+ lotDrawable->mCNode->mImageInfo.mMatrix.m12 = combinedMatrix().m_12();
+ lotDrawable->mCNode->mImageInfo.mMatrix.m13 = combinedMatrix().m_13();
+
+ lotDrawable->mCNode->mImageInfo.mMatrix.m21 = combinedMatrix().m_21();
+ lotDrawable->mCNode->mImageInfo.mMatrix.m22 = combinedMatrix().m_22();
+ lotDrawable->mCNode->mImageInfo.mMatrix.m23 = combinedMatrix().m_23();
+
+ lotDrawable->mCNode->mImageInfo.mMatrix.m31 = combinedMatrix().m_tx();
+ lotDrawable->mCNode->mImageInfo.mMatrix.m32 = combinedMatrix().m_ty();
+ lotDrawable->mCNode->mImageInfo.mMatrix.m33 = combinedMatrix().m_33();
+
+ // Alpha calculation already combined.
+ lotDrawable->mCNode->mImageInfo.mAlpha =
+ uchar(lotDrawable->mBrush.mTexture->mAlpha);
+
+ cnodes().push_back(lotDrawable->mCNode.get());
+ }
+ clayer().mNodeList.ptr = cnodes().data();
+ clayer().mNodeList.size = cnodes().size();
+}
+
+static void updateGStops(LOTNode *n, const VGradient *grad)
+{
+ if (grad->mStops.size() != n->mGradient.stopCount) {
+ if (n->mGradient.stopCount) free(n->mGradient.stopPtr);
+ n->mGradient.stopCount = grad->mStops.size();
+ n->mGradient.stopPtr = (LOTGradientStop *)malloc(
+ n->mGradient.stopCount * sizeof(LOTGradientStop));
+ }
+
+ LOTGradientStop *ptr = n->mGradient.stopPtr;
+ for (const auto &i : grad->mStops) {
+ ptr->pos = i.first;
+ ptr->a = uchar(i.second.alpha() * grad->alpha());
+ ptr->r = i.second.red();
+ ptr->g = i.second.green();
+ ptr->b = i.second.blue();
+ ptr++;
+ }
+}
+
+void renderer::Drawable::sync()
+{
+ if (!mCNode) {
+ mCNode = std::make_unique<LOTNode>();
+ mCNode->mGradient.stopPtr = nullptr;
+ mCNode->mGradient.stopCount = 0;
+ }
+
+ mCNode->mFlag = ChangeFlagNone;
+ if (mFlag & DirtyState::None) return;
+
+ if (mFlag & DirtyState::Path) {
+ applyDashOp();
+ const std::vector<VPath::Element> &elm = mPath.elements();
+ const std::vector<VPointF> & pts = mPath.points();
+ const float *ptPtr = reinterpret_cast<const float *>(pts.data());
+ const char * elmPtr = reinterpret_cast<const char *>(elm.data());
+ mCNode->mPath.elmPtr = elmPtr;
+ mCNode->mPath.elmCount = elm.size();
+ mCNode->mPath.ptPtr = ptPtr;
+ mCNode->mPath.ptCount = 2 * pts.size();
+ mCNode->mFlag |= ChangeFlagPath;
+ mCNode->keypath = name();
+ }
+
+ if (mStrokeInfo) {
+ mCNode->mStroke.width = mStrokeInfo->width;
+ mCNode->mStroke.miterLimit = mStrokeInfo->miterLimit;
+ mCNode->mStroke.enable = 1;
+
+ switch (mStrokeInfo->cap) {
+ case CapStyle::Flat:
+ mCNode->mStroke.cap = LOTCapStyle::CapFlat;
+ break;
+ case CapStyle::Square:
+ mCNode->mStroke.cap = LOTCapStyle::CapSquare;
+ break;
+ case CapStyle::Round:
+ mCNode->mStroke.cap = LOTCapStyle::CapRound;
+ break;
+ }
+
+ switch (mStrokeInfo->join) {
+ case JoinStyle::Miter:
+ mCNode->mStroke.join = LOTJoinStyle::JoinMiter;
+ break;
+ case JoinStyle::Bevel:
+ mCNode->mStroke.join = LOTJoinStyle::JoinBevel;
+ break;
+ case JoinStyle::Round:
+ mCNode->mStroke.join = LOTJoinStyle::JoinRound;
+ break;
+ default:
+ mCNode->mStroke.join = LOTJoinStyle::JoinMiter;
+ break;
+ }
+ } else {
+ mCNode->mStroke.enable = 0;
+ }
+
+ switch (mFillRule) {
+ case FillRule::EvenOdd:
+ mCNode->mFillRule = LOTFillRule::FillEvenOdd;
+ break;
+ default:
+ mCNode->mFillRule = LOTFillRule::FillWinding;
+ break;
+ }
+
+ switch (mBrush.type()) {
+ case VBrush::Type::Solid:
+ mCNode->mBrushType = LOTBrushType::BrushSolid;
+ mCNode->mColor.r = mBrush.mColor.r;
+ mCNode->mColor.g = mBrush.mColor.g;
+ mCNode->mColor.b = mBrush.mColor.b;
+ mCNode->mColor.a = mBrush.mColor.a;
+ break;
+ case VBrush::Type::LinearGradient: {
+ mCNode->mBrushType = LOTBrushType::BrushGradient;
+ mCNode->mGradient.type = LOTGradientType::GradientLinear;
+ VPointF s = mBrush.mGradient->mMatrix.map(
+ {mBrush.mGradient->linear.x1, mBrush.mGradient->linear.y1});
+ VPointF e = mBrush.mGradient->mMatrix.map(
+ {mBrush.mGradient->linear.x2, mBrush.mGradient->linear.y2});
+ mCNode->mGradient.start.x = s.x();
+ mCNode->mGradient.start.y = s.y();
+ mCNode->mGradient.end.x = e.x();
+ mCNode->mGradient.end.y = e.y();
+ updateGStops(mCNode.get(), mBrush.mGradient);
+ break;
+ }
+ case VBrush::Type::RadialGradient: {
+ mCNode->mBrushType = LOTBrushType::BrushGradient;
+ mCNode->mGradient.type = LOTGradientType::GradientRadial;
+ VPointF c = mBrush.mGradient->mMatrix.map(
+ {mBrush.mGradient->radial.cx, mBrush.mGradient->radial.cy});
+ VPointF f = mBrush.mGradient->mMatrix.map(
+ {mBrush.mGradient->radial.fx, mBrush.mGradient->radial.fy});
+ mCNode->mGradient.center.x = c.x();
+ mCNode->mGradient.center.y = c.y();
+ mCNode->mGradient.focal.x = f.x();
+ mCNode->mGradient.focal.y = f.y();
+
+ float scale = mBrush.mGradient->mMatrix.scale();
+ mCNode->mGradient.cradius = mBrush.mGradient->radial.cradius * scale;
+ mCNode->mGradient.fradius = mBrush.mGradient->radial.fradius * scale;
+ updateGStops(mCNode.get(), mBrush.mGradient);
+ break;
+ }
+ default:
+ break;
+ }
+}
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottiekeypath.cpp b/vendor/github.com/Benau/go_rlottie/lottie_lottiekeypath.cpp
new file mode 100644
index 00000000..4fd090e9
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_lottiekeypath.cpp
@@ -0,0 +1,86 @@
+#include "lottie_lottiekeypath.h"
+
+#include <sstream>
+
+LOTKeyPath::LOTKeyPath(const std::string &keyPath)
+{
+ std::stringstream ss(keyPath);
+ std::string item;
+
+ while (getline(ss, item, '.')) {
+ mKeys.push_back(item);
+ }
+}
+
+bool LOTKeyPath::matches(const std::string &key, uint depth)
+{
+ if (skip(key)) {
+ // This is an object we programatically create.
+ return true;
+ }
+ if (depth > size()) {
+ return false;
+ }
+ if ((mKeys[depth] == key) || (mKeys[depth] == "*") ||
+ (mKeys[depth] == "**")) {
+ return true;
+ }
+ return false;
+}
+
+uint LOTKeyPath::nextDepth(const std::string key, uint depth)
+{
+ if (skip(key)) {
+ // If it's a container then we added programatically and it isn't a part
+ // of the keypath.
+ return depth;
+ }
+ if (mKeys[depth] != "**") {
+ // If it's not a globstar then it is part of the keypath.
+ return depth + 1;
+ }
+ if (depth == size()) {
+ // The last key is a globstar.
+ return depth;
+ }
+ if (mKeys[depth + 1] == key) {
+ // We are a globstar and the next key is our current key so consume
+ // both.
+ return depth + 2;
+ }
+ return depth;
+}
+
+bool LOTKeyPath::fullyResolvesTo(const std::string key, uint depth)
+{
+ if (depth > mKeys.size()) {
+ return false;
+ }
+
+ bool isLastDepth = (depth == size());
+
+ if (!isGlobstar(depth)) {
+ bool matches = (mKeys[depth] == key) || isGlob(depth);
+ return (isLastDepth || (depth == size() - 1 && endsWithGlobstar())) &&
+ matches;
+ }
+
+ bool isGlobstarButNextKeyMatches = !isLastDepth && mKeys[depth + 1] == key;
+ if (isGlobstarButNextKeyMatches) {
+ return depth == size() - 1 ||
+ (depth == size() - 2 && endsWithGlobstar());
+ }
+
+ if (isLastDepth) {
+ return true;
+ }
+
+ if (depth + 1 < size()) {
+ // We are a globstar but there is more than 1 key after the globstar we
+ // we can't fully match.
+ return false;
+ }
+ // Return whether the next key (which we now know is the last one) is the
+ // same as the current key.
+ return mKeys[depth + 1] == key;
+}
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottiekeypath.h b/vendor/github.com/Benau/go_rlottie/lottie_lottiekeypath.h
new file mode 100644
index 00000000..c2fd8511
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_lottiekeypath.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef LOTTIEKEYPATH_H
+#define LOTTIEKEYPATH_H
+
+#include <string>
+#include <vector>
+#include "vector_vglobal.h"
+
+class LOTKeyPath {
+public:
+ LOTKeyPath(const std::string &keyPath);
+ bool matches(const std::string &key, uint depth);
+ uint nextDepth(const std::string key, uint depth);
+ bool fullyResolvesTo(const std::string key, uint depth);
+
+ bool propagate(const std::string key, uint depth)
+ {
+ return skip(key) ? true : (depth < size()) || (mKeys[depth] == "**");
+ }
+ bool skip(const std::string &key) const { return key == "__"; }
+
+private:
+ bool isGlobstar(uint depth) const { return mKeys[depth] == "**"; }
+ bool isGlob(uint depth) const { return mKeys[depth] == "*"; }
+ bool endsWithGlobstar() const { return mKeys.back() == "**"; }
+ size_t size() const { return mKeys.size() - 1; }
+
+private:
+ std::vector<std::string> mKeys;
+};
+
+#endif // LOTTIEKEYPATH_H
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottieloader.cpp b/vendor/github.com/Benau/go_rlottie/lottie_lottieloader.cpp
new file mode 100644
index 00000000..cfcd8f84
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_lottieloader.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstring>
+#include <fstream>
+#include <sstream>
+
+#include "lottie_lottiemodel.h"
+
+using namespace rlottie::internal;
+
+#ifdef LOTTIE_CACHE_SUPPORT
+
+#include <mutex>
+#include <unordered_map>
+
+class ModelCache {
+public:
+ static ModelCache &instance()
+ {
+ static ModelCache singleton;
+ return singleton;
+ }
+ std::shared_ptr<model::Composition> find(const std::string &key)
+ {
+ std::lock_guard<std::mutex> guard(mMutex);
+
+ if (!mcacheSize) return nullptr;
+
+ auto search = mHash.find(key);
+
+ return (search != mHash.end()) ? search->second : nullptr;
+ }
+ void add(const std::string &key, std::shared_ptr<model::Composition> value)
+ {
+ std::lock_guard<std::mutex> guard(mMutex);
+
+ if (!mcacheSize) return;
+
+ //@TODO just remove the 1st element
+ // not the best of LRU logic
+ if (mcacheSize == mHash.size()) mHash.erase(mHash.cbegin());
+
+ mHash[key] = std::move(value);
+ }
+
+ void configureCacheSize(size_t cacheSize)
+ {
+ std::lock_guard<std::mutex> guard(mMutex);
+ mcacheSize = cacheSize;
+
+ if (!mcacheSize) mHash.clear();
+ }
+
+private:
+ ModelCache() = default;
+
+ std::unordered_map<std::string, std::shared_ptr<model::Composition>> mHash;
+ std::mutex mMutex;
+ size_t mcacheSize{10};
+};
+
+#else
+
+class ModelCache {
+public:
+ static ModelCache &instance()
+ {
+ static ModelCache singleton;
+ return singleton;
+ }
+ std::shared_ptr<model::Composition> find(const std::string &)
+ {
+ return nullptr;
+ }
+ void add(const std::string &, std::shared_ptr<model::Composition>) {}
+ void configureCacheSize(size_t) {}
+};
+
+#endif
+
+static std::string dirname(const std::string &path)
+{
+ const char *ptr = strrchr(path.c_str(), '/');
+#ifdef _WIN32
+ if (ptr) ptr = strrchr(ptr + 1, '\\');
+#endif
+ int len = int(ptr + 1 - path.c_str()); // +1 to include '/'
+ return std::string(path, 0, len);
+}
+
+void model::configureModelCacheSize(size_t cacheSize)
+{
+ ModelCache::instance().configureCacheSize(cacheSize);
+}
+
+std::shared_ptr<model::Composition> model::loadFromFile(const std::string &path,
+ bool cachePolicy)
+{
+ if (cachePolicy) {
+ auto obj = ModelCache::instance().find(path);
+ if (obj) return obj;
+ }
+
+ std::ifstream f;
+ f.open(path);
+
+ if (!f.is_open()) {
+ vCritical << "failed to open file = " << path.c_str();
+ return {};
+ } else {
+ std::string content;
+
+ std::getline(f, content, '\0');
+ f.close();
+
+ if (content.empty()) return {};
+
+ auto obj = internal::model::parse(const_cast<char *>(content.c_str()),
+ dirname(path));
+
+ if (obj && cachePolicy) ModelCache::instance().add(path, obj);
+
+ return obj;
+ }
+}
+
+std::shared_ptr<model::Composition> model::loadFromData(
+ std::string jsonData, const std::string &key, std::string resourcePath,
+ bool cachePolicy)
+{
+ if (cachePolicy) {
+ auto obj = ModelCache::instance().find(key);
+ if (obj) return obj;
+ }
+
+ auto obj = internal::model::parse(const_cast<char *>(jsonData.c_str()),
+ std::move(resourcePath));
+
+ if (obj && cachePolicy) ModelCache::instance().add(key, obj);
+
+ return obj;
+}
+
+std::shared_ptr<model::Composition> model::loadFromData(
+ std::string jsonData, std::string resourcePath, model::ColorFilter filter)
+{
+ return internal::model::parse(const_cast<char *>(jsonData.c_str()),
+ std::move(resourcePath), std::move(filter));
+}
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottiemodel.cpp b/vendor/github.com/Benau/go_rlottie/lottie_lottiemodel.cpp
new file mode 100644
index 00000000..0389e8aa
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_lottiemodel.cpp
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "lottie_lottiemodel.h"
+#include <cassert>
+#include <iterator>
+#include <stack>
+#include "vector_vimageloader.h"
+#include "vector_vline.h"
+
+using namespace rlottie::internal;
+
+/*
+ * We process the iterator objects in the children list
+ * by iterating from back to front. when we find a repeater object
+ * we remove the objects from satrt till repeater object and then place
+ * under a new shape group object which we add it as children to the repeater
+ * object.
+ * Then we visit the childrens of the newly created shape group object to
+ * process the remaining repeater object(when children list contains more than
+ * one repeater).
+ *
+ */
+class LottieRepeaterProcesser {
+public:
+ void visitChildren(model::Group *obj)
+ {
+ for (auto i = obj->mChildren.rbegin(); i != obj->mChildren.rend();
+ ++i) {
+ auto child = (*i);
+ if (child->type() == model::Object::Type::Repeater) {
+ model::Repeater *repeater =
+ static_cast<model::Repeater *>(child);
+ // check if this repeater is already processed
+ // can happen if the layer is an asset and referenced by
+ // multiple layer.
+ if (repeater->processed()) continue;
+
+ repeater->markProcessed();
+
+ auto content = repeater->content();
+ // 1. increment the reverse iterator to point to the
+ // object before the repeater
+ ++i;
+ // 2. move all the children till repater to the group
+ std::move(obj->mChildren.begin(), i.base(),
+ back_inserter(content->mChildren));
+ // 3. erase the objects from the original children list
+ obj->mChildren.erase(obj->mChildren.begin(), i.base());
+
+ // 5. visit newly created group to process remaining repeater
+ // object.
+ visitChildren(content);
+ // 6. exit the loop as the current iterators are invalid
+ break;
+ }
+ visit(child);
+ }
+ }
+
+ void visit(model::Object *obj)
+ {
+ switch (obj->type()) {
+ case model::Object::Type::Group:
+ case model::Object::Type::Layer: {
+ visitChildren(static_cast<model::Group *>(obj));
+ break;
+ }
+ default:
+ break;
+ }
+ }
+};
+
+class LottieUpdateStatVisitor {
+ model::Composition::Stats *stat;
+
+public:
+ explicit LottieUpdateStatVisitor(model::Composition::Stats *s) : stat(s) {}
+ void visitChildren(model::Group *obj)
+ {
+ for (const auto &child : obj->mChildren) {
+ if (child) visit(child);
+ }
+ }
+ void visitLayer(model::Layer *layer)
+ {
+ switch (layer->mLayerType) {
+ case model::Layer::Type::Precomp:
+ stat->precompLayerCount++;
+ break;
+ case model::Layer::Type::Null:
+ stat->nullLayerCount++;
+ break;
+ case model::Layer::Type::Shape:
+ stat->shapeLayerCount++;
+ break;
+ case model::Layer::Type::Solid:
+ stat->solidLayerCount++;
+ break;
+ case model::Layer::Type::Image:
+ stat->imageLayerCount++;
+ break;
+ default:
+ break;
+ }
+ visitChildren(layer);
+ }
+ void visit(model::Object *obj)
+ {
+ switch (obj->type()) {
+ case model::Object::Type::Layer: {
+ visitLayer(static_cast<model::Layer *>(obj));
+ break;
+ }
+ case model::Object::Type::Repeater: {
+ visitChildren(static_cast<model::Repeater *>(obj)->content());
+ break;
+ }
+ case model::Object::Type::Group: {
+ visitChildren(static_cast<model::Group *>(obj));
+ break;
+ }
+ default:
+ break;
+ }
+ }
+};
+
+void model::Composition::processRepeaterObjects()
+{
+ LottieRepeaterProcesser visitor;
+ visitor.visit(mRootLayer);
+}
+
+void model::Composition::updateStats()
+{
+ LottieUpdateStatVisitor visitor(&mStats);
+ visitor.visit(mRootLayer);
+}
+
+VMatrix model::Repeater::Transform::matrix(int frameNo, float multiplier) const
+{
+ VPointF scale = mScale.value(frameNo) / 100.f;
+ scale.setX(std::pow(scale.x(), multiplier));
+ scale.setY(std::pow(scale.y(), multiplier));
+ VMatrix m;
+ m.translate(mPosition.value(frameNo) * multiplier)
+ .translate(mAnchor.value(frameNo))
+ .scale(scale)
+ .rotate(mRotation.value(frameNo) * multiplier)
+ .translate(-mAnchor.value(frameNo));
+
+ return m;
+}
+
+VMatrix model::Transform::Data::matrix(int frameNo, bool autoOrient) const
+{
+ VMatrix m;
+ VPointF position;
+ if (mExtra && mExtra->mSeparate) {
+ position.setX(mExtra->mSeparateX.value(frameNo));
+ position.setY(mExtra->mSeparateY.value(frameNo));
+ } else {
+ position = mPosition.value(frameNo);
+ }
+
+ float angle = autoOrient ? mPosition.angle(frameNo) : 0;
+ if (mExtra && mExtra->m3DData) {
+ m.translate(position)
+ .rotate(mExtra->m3DRz.value(frameNo) + angle)
+ .rotate(mExtra->m3DRy.value(frameNo), VMatrix::Axis::Y)
+ .rotate(mExtra->m3DRx.value(frameNo), VMatrix::Axis::X)
+ .scale(mScale.value(frameNo) / 100.f)
+ .translate(-mAnchor.value(frameNo));
+ } else {
+ m.translate(position)
+ .rotate(mRotation.value(frameNo) + angle)
+ .scale(mScale.value(frameNo) / 100.f)
+ .translate(-mAnchor.value(frameNo));
+ }
+ return m;
+}
+
+void model::Dash::getDashInfo(int frameNo, std::vector<float> &result) const
+{
+ result.clear();
+
+ if (mData.size() <= 1) return;
+
+ if (result.capacity() < mData.size()) result.reserve(mData.size() + 1);
+
+ for (const auto &elm : mData) result.push_back(elm.value(frameNo));
+
+ // if the size is even then we are missing last
+ // gap information which is same as the last dash value
+ // copy it from the last dash value.
+ // NOTE: last value is the offset and last-1 is the last dash value.
+ auto size = result.size();
+ if ((size % 2) == 0) {
+ // copy offset value to end.
+ result.push_back(result.back());
+ // copy dash value to gap.
+ result[size - 1] = result[size - 2];
+ }
+}
+
+/**
+ * Both the color stops and opacity stops are in the same array.
+ * There are {@link #colorPoints} colors sequentially as:
+ * [
+ * ...,
+ * position,
+ * red,
+ * green,
+ * blue,
+ * ...
+ * ]
+ *
+ * The remainder of the array is the opacity stops sequentially as:
+ * [
+ * ...,
+ * position,
+ * opacity,
+ * ...
+ * ]
+ */
+void model::Gradient::populate(VGradientStops &stops, int frameNo)
+{
+ model::Gradient::Data gradData = mGradient.value(frameNo);
+ auto size = gradData.mGradient.size();
+ float * ptr = gradData.mGradient.data();
+ int colorPoints = mColorPoints;
+ if (colorPoints == -1) { // for legacy bodymovin (ref: lottie-android)
+ colorPoints = int(size / 4);
+ }
+ auto opacityArraySize = size - colorPoints * 4;
+ float *opacityPtr = ptr + (colorPoints * 4);
+ stops.clear();
+ size_t j = 0;
+ for (int i = 0; i < colorPoints; i++) {
+ float colorStop = ptr[0];
+ model::Color color = model::Color(ptr[1], ptr[2], ptr[3]);
+ if (opacityArraySize) {
+ if (j == opacityArraySize) {
+ // already reached the end
+ float stop1 = opacityPtr[j - 4];
+ float op1 = opacityPtr[j - 3];
+ float stop2 = opacityPtr[j - 2];
+ float op2 = opacityPtr[j - 1];
+ if (colorStop > stop2) {
+ stops.push_back(
+ std::make_pair(colorStop, color.toColor(op2)));
+ } else {
+ float progress = (colorStop - stop1) / (stop2 - stop1);
+ float opacity = op1 + progress * (op2 - op1);
+ stops.push_back(
+ std::make_pair(colorStop, color.toColor(opacity)));
+ }
+ continue;
+ }
+ for (; j < opacityArraySize; j += 2) {
+ float opacityStop = opacityPtr[j];
+ if (opacityStop < colorStop) {
+ // add a color using opacity stop
+ stops.push_back(std::make_pair(
+ opacityStop, color.toColor(opacityPtr[j + 1])));
+ continue;
+ }
+ // add a color using color stop
+ if (j == 0) {
+ stops.push_back(std::make_pair(
+ colorStop, color.toColor(opacityPtr[j + 1])));
+ } else {
+ float progress = (colorStop - opacityPtr[j - 2]) /
+ (opacityPtr[j] - opacityPtr[j - 2]);
+ float opacity =
+ opacityPtr[j - 1] +
+ progress * (opacityPtr[j + 1] - opacityPtr[j - 1]);
+ stops.push_back(
+ std::make_pair(colorStop, color.toColor(opacity)));
+ }
+ j += 2;
+ break;
+ }
+ } else {
+ stops.push_back(std::make_pair(colorStop, color.toColor()));
+ }
+ ptr += 4;
+ }
+}
+
+void model::Gradient::update(std::unique_ptr<VGradient> &grad, int frameNo)
+{
+ bool init = false;
+ if (!grad) {
+ if (mGradientType == 1)
+ grad = std::make_unique<VGradient>(VGradient::Type::Linear);
+ else
+ grad = std::make_unique<VGradient>(VGradient::Type::Radial);
+ grad->mSpread = VGradient::Spread::Pad;
+ init = true;
+ }
+
+ if (!mGradient.isStatic() || init) {
+ populate(grad->mStops, frameNo);
+ }
+
+ if (mGradientType == 1) { // linear gradient
+ VPointF start = mStartPoint.value(frameNo);
+ VPointF end = mEndPoint.value(frameNo);
+ grad->linear.x1 = start.x();
+ grad->linear.y1 = start.y();
+ grad->linear.x2 = end.x();
+ grad->linear.y2 = end.y();
+ } else { // radial gradient
+ VPointF start = mStartPoint.value(frameNo);
+ VPointF end = mEndPoint.value(frameNo);
+ grad->radial.cx = start.x();
+ grad->radial.cy = start.y();
+ grad->radial.cradius =
+ VLine::length(start.x(), start.y(), end.x(), end.y());
+ /*
+ * Focal point is the point lives in highlight length distance from
+ * center along the line (start, end) and rotated by highlight angle.
+ * below calculation first finds the quadrant(angle) on which the point
+ * lives by applying inverse slope formula then adds the rotation angle
+ * to find the final angle. then point is retrived using circle equation
+ * of center, angle and distance.
+ */
+ float progress = mHighlightLength.value(frameNo) / 100.0f;
+ if (vCompare(progress, 1.0f)) progress = 0.99f;
+ float startAngle = VLine(start, end).angle();
+ float highlightAngle = mHighlightAngle.value(frameNo);
+ static constexpr float K_PI = 3.1415926f;
+ float angle = (startAngle + highlightAngle) * (K_PI / 180.0f);
+ grad->radial.fx =
+ grad->radial.cx + std::cos(angle) * progress * grad->radial.cradius;
+ grad->radial.fy =
+ grad->radial.cy + std::sin(angle) * progress * grad->radial.cradius;
+ // Lottie dosen't have any focal radius concept.
+ grad->radial.fradius = 0;
+ }
+}
+
+void model::Asset::loadImageData(std::string data)
+{
+ if (!data.empty())
+ mBitmap = VImageLoader::instance().load(data.c_str(), data.length());
+}
+
+void model::Asset::loadImagePath(std::string path)
+{
+ if (!path.empty()) mBitmap = VImageLoader::instance().load(path.c_str());
+}
+
+std::vector<LayerInfo> model::Composition::layerInfoList() const
+{
+ if (!mRootLayer || mRootLayer->mChildren.empty()) return {};
+
+ std::vector<LayerInfo> result;
+
+ result.reserve(mRootLayer->mChildren.size());
+
+ for (auto it : mRootLayer->mChildren) {
+ auto layer = static_cast<model::Layer *>(it);
+ result.emplace_back(layer->name(), layer->mInFrame, layer->mOutFrame);
+ }
+
+ return result;
+}
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottiemodel.h b/vendor/github.com/Benau/go_rlottie/lottie_lottiemodel.h
new file mode 100644
index 00000000..defec500
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_lottiemodel.h
@@ -0,0 +1,1148 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef LOTModel_H
+#define LOTModel_H
+
+#include <algorithm>
+#include <cmath>
+#include <cstring>
+#include <functional>
+#include <memory>
+#include <unordered_map>
+#include <vector>
+#include "vector_varenaalloc.h"
+#include "vector_vbezier.h"
+#include "vector_vbrush.h"
+#include "vector_vinterpolator.h"
+#include "vector_vmatrix.h"
+#include "vector_vpath.h"
+#include "vector_vpoint.h"
+#include "vector_vrect.h"
+
+V_USE_NAMESPACE
+
+namespace rlottie {
+
+namespace internal {
+
+using Marker = std::tuple<std::string, int, int>;
+
+using LayerInfo = Marker;
+
+template <typename T>
+inline T lerp(const T &start, const T &end, float t)
+{
+ return start + t * (end - start);
+}
+
+namespace model {
+
+enum class MatteType : uchar { None = 0, Alpha = 1, AlphaInv, Luma, LumaInv };
+
+enum class BlendMode : uchar {
+ Normal = 0,
+ Multiply = 1,
+ Screen = 2,
+ OverLay = 3
+};
+
+class Color {
+public:
+ Color() = default;
+ Color(float red, float green, float blue) : r(red), g(green), b(blue) {}
+ VColor toColor(float a = 1)
+ {
+ return VColor(uchar(255 * r), uchar(255 * g), uchar(255 * b),
+ uchar(255 * a));
+ }
+ friend inline Color operator+(const Color &c1, const Color &c2);
+ friend inline Color operator-(const Color &c1, const Color &c2);
+
+public:
+ float r{1};
+ float g{1};
+ float b{1};
+};
+
+inline Color operator-(const Color &c1, const Color &c2)
+{
+ return Color(c1.r - c2.r, c1.g - c2.g, c1.b - c2.b);
+}
+inline Color operator+(const Color &c1, const Color &c2)
+{
+ return Color(c1.r + c2.r, c1.g + c2.g, c1.b + c2.b);
+}
+
+inline const Color operator*(const Color &c, float m)
+{
+ return Color(c.r * m, c.g * m, c.b * m);
+}
+
+inline const Color operator*(float m, const Color &c)
+{
+ return Color(c.r * m, c.g * m, c.b * m);
+}
+
+struct PathData {
+ std::vector<VPointF> mPoints;
+ bool mClosed = false; /* "c" */
+ void reserve(size_t size) { mPoints.reserve(mPoints.size() + size); }
+ static void lerp(const PathData &start, const PathData &end, float t,
+ VPath &result)
+ {
+ result.reset();
+ // test for empty animation data.
+ if (start.mPoints.empty() || end.mPoints.empty())
+ {
+ return;
+ }
+ auto size = std::min(start.mPoints.size(), end.mPoints.size());
+ /* reserve exact memory requirement at once
+ * ptSize = size + 1(size + close)
+ * elmSize = size/3 cubic + 1 move + 1 close
+ */
+ result.reserve(size + 1, size / 3 + 2);
+ result.moveTo(start.mPoints[0] +
+ t * (end.mPoints[0] - start.mPoints[0]));
+ for (size_t i = 1; i < size; i += 3) {
+ result.cubicTo(
+ start.mPoints[i] + t * (end.mPoints[i] - start.mPoints[i]),
+ start.mPoints[i + 1] +
+ t * (end.mPoints[i + 1] - start.mPoints[i + 1]),
+ start.mPoints[i + 2] +
+ t * (end.mPoints[i + 2] - start.mPoints[i + 2]));
+ }
+ if (start.mClosed) result.close();
+ }
+ void toPath(VPath &path) const
+ {
+ path.reset();
+
+ if (mPoints.empty()) return;
+
+ auto size = mPoints.size();
+ auto points = mPoints.data();
+ /* reserve exact memory requirement at once
+ * ptSize = size + 1(size + close)
+ * elmSize = size/3 cubic + 1 move + 1 close
+ */
+ path.reserve(size + 1, size / 3 + 2);
+ path.moveTo(points[0]);
+ for (size_t i = 1; i < size; i += 3) {
+ path.cubicTo(points[i], points[i + 1], points[i + 2]);
+ }
+ if (mClosed) path.close();
+ }
+};
+
+template <typename T, typename Tag = void>
+struct Value {
+ T start_;
+ T end_;
+ T at(float t) const { return lerp(start_, end_, t); }
+ float angle(float) const { return 0; }
+ void cache() {}
+};
+
+struct Position;
+
+template <typename T>
+struct Value<T, Position> {
+ T start_;
+ T end_;
+ T inTangent_;
+ T outTangent_;
+ float length_{0};
+ bool hasTangent_{false};
+
+ void cache()
+ {
+ if (hasTangent_) {
+ inTangent_ = end_ + inTangent_;
+ outTangent_ = start_ + outTangent_;
+ length_ = VBezier::fromPoints(start_, outTangent_, inTangent_, end_)
+ .length();
+ if (vIsZero(length_)) {
+ // this segment has zero length.
+ // so disable expensive path computaion.
+ hasTangent_ = false;
+ }
+ }
+ }
+
+ T at(float t) const
+ {
+ if (hasTangent_) {
+ /*
+ * position along the path calcualated
+ * using bezier at progress length (t * bezlen)
+ */
+ VBezier b =
+ VBezier::fromPoints(start_, outTangent_, inTangent_, end_);
+ return b.pointAt(b.tAtLength(t * length_, length_));
+ }
+ return lerp(start_, end_, t);
+ }
+
+ float angle(float t) const
+ {
+ if (hasTangent_) {
+ VBezier b =
+ VBezier::fromPoints(start_, outTangent_, inTangent_, end_);
+ return b.angleAt(b.tAtLength(t * length_, length_));
+ }
+ return 0;
+ }
+};
+
+template <typename T, typename Tag>
+class KeyFrames {
+public:
+ struct Frame {
+ float progress(int frameNo) const
+ {
+ return interpolator_ ? interpolator_->value((frameNo - start_) /
+ (end_ - start_))
+ : 0;
+ }
+ T value(int frameNo) const { return value_.at(progress(frameNo)); }
+ float angle(int frameNo) const
+ {
+ return value_.angle(progress(frameNo));
+ }
+
+ float start_{0};
+ float end_{0};
+ VInterpolator *interpolator_{nullptr};
+ Value<T, Tag> value_;
+ };
+
+ T value(int frameNo) const
+ {
+ if (frames_.front().start_ >= frameNo)
+ return frames_.front().value_.start_;
+ if (frames_.back().end_ <= frameNo) return frames_.back().value_.end_;
+
+ for (const auto &keyFrame : frames_) {
+ if (frameNo >= keyFrame.start_ && frameNo < keyFrame.end_)
+ return keyFrame.value(frameNo);
+ }
+ return {};
+ }
+
+ float angle(int frameNo) const
+ {
+ if ((frames_.front().start_ >= frameNo) ||
+ (frames_.back().end_ <= frameNo))
+ return 0;
+
+ for (const auto &frame : frames_) {
+ if (frameNo >= frame.start_ && frameNo < frame.end_)
+ return frame.angle(frameNo);
+ }
+ return 0;
+ }
+
+ bool changed(int prevFrame, int curFrame) const
+ {
+ auto first = frames_.front().start_;
+ auto last = frames_.back().end_;
+
+ return !((first > prevFrame && first > curFrame) ||
+ (last < prevFrame && last < curFrame));
+ }
+ void cache()
+ {
+ for (auto &e : frames_) e.value_.cache();
+ }
+
+public:
+ std::vector<Frame> frames_;
+};
+
+template <typename T, typename Tag = void>
+class Property {
+public:
+ using Animation = KeyFrames<T, Tag>;
+
+ Property() { construct(impl_.value_, {}); }
+ explicit Property(T value) { construct(impl_.value_, std::move(value)); }
+
+ const Animation &animation() const { return *(impl_.animation_.get()); }
+ const T & value() const { return impl_.value_; }
+
+ Animation &animation()
+ {
+ if (isValue_) {
+ destroy();
+ construct(impl_.animation_, std::make_unique<Animation>());
+ isValue_ = false;
+ }
+ return *(impl_.animation_.get());
+ }
+
+ T &value()
+ {
+ assert(isValue_);
+ return impl_.value_;
+ }
+
+ Property(Property &&other) noexcept
+ {
+ if (!other.isValue_) {
+ construct(impl_.animation_, std::move(other.impl_.animation_));
+ isValue_ = false;
+ } else {
+ construct(impl_.value_, std::move(other.impl_.value_));
+ isValue_ = true;
+ }
+ }
+ // delete special member functions
+ Property(const Property &) = delete;
+ Property &operator=(const Property &) = delete;
+ Property &operator=(Property &&) = delete;
+
+ ~Property() { destroy(); }
+
+ bool isStatic() const { return isValue_; }
+
+ T value(int frameNo) const
+ {
+ return isStatic() ? value() : animation().value(frameNo);
+ }
+
+ // special function only for type T=PathData
+ template <typename forT = PathData>
+ auto value(int frameNo, VPath &path) const ->
+ typename std::enable_if_t<std::is_same<T, forT>::value, void>
+ {
+ if (isStatic()) {
+ value().toPath(path);
+ } else {
+ const auto &vec = animation().frames_;
+ if (vec.front().start_ >= frameNo)
+ return vec.front().value_.start_.toPath(path);
+ if (vec.back().end_ <= frameNo)
+ return vec.back().value_.end_.toPath(path);
+
+ for (const auto &keyFrame : vec) {
+ if (frameNo >= keyFrame.start_ && frameNo < keyFrame.end_) {
+ T::lerp(keyFrame.value_.start_, keyFrame.value_.end_,
+ keyFrame.progress(frameNo), path);
+ }
+ }
+ }
+ }
+
+ float angle(int frameNo) const
+ {
+ return isStatic() ? 0 : animation().angle(frameNo);
+ }
+
+ bool changed(int prevFrame, int curFrame) const
+ {
+ return isStatic() ? false : animation().changed(prevFrame, curFrame);
+ }
+ void cache()
+ {
+ if (!isStatic()) animation().cache();
+ }
+
+private:
+ template <typename Tp>
+ void construct(Tp &member, Tp &&val)
+ {
+ new (&member) Tp(std::move(val));
+ }
+
+ void destroy()
+ {
+ if (isValue_) {
+ impl_.value_.~T();
+ } else {
+ using std::unique_ptr;
+ impl_.animation_.~unique_ptr<Animation>();
+ }
+ }
+ union details {
+ std::unique_ptr<Animation> animation_;
+ T value_;
+ details(){};
+ details(const details &) = delete;
+ details(details &&) = delete;
+ details &operator=(details &&) = delete;
+ details &operator=(const details &) = delete;
+ ~details() noexcept {};
+ } impl_;
+ bool isValue_{true};
+};
+
+class Path;
+struct PathData;
+struct Dash {
+ std::vector<Property<float>> mData;
+ bool empty() const { return mData.empty(); }
+ size_t size() const { return mData.size(); }
+ bool isStatic() const
+ {
+ for (const auto &elm : mData)
+ if (!elm.isStatic()) return false;
+ return true;
+ }
+ void getDashInfo(int frameNo, std::vector<float> &result) const;
+};
+
+class Mask {
+public:
+ enum class Mode { None, Add, Substarct, Intersect, Difference };
+ float opacity(int frameNo) const
+ {
+ return mOpacity.value(frameNo) / 100.0f;
+ }
+ bool isStatic() const { return mIsStatic; }
+
+public:
+ Property<PathData> mShape;
+ Property<float> mOpacity{100};
+ bool mInv{false};
+ bool mIsStatic{true};
+ Mask::Mode mMode;
+};
+
+class Object {
+public:
+ enum class Type : unsigned char {
+ Composition = 1,
+ Layer,
+ Group,
+ Transform,
+ Fill,
+ Stroke,
+ GFill,
+ GStroke,
+ Rect,
+ Ellipse,
+ Path,
+ Polystar,
+ Trim,
+ Repeater,
+ RoundedCorner
+ };
+
+ explicit Object(Object::Type type) : mPtr(nullptr)
+ {
+ mData._type = type;
+ mData._static = true;
+ mData._shortString = true;
+ mData._hidden = false;
+ }
+ ~Object() noexcept
+ {
+ if (!shortString() && mPtr) free(mPtr);
+ }
+ Object(const Object &) = delete;
+ Object &operator=(const Object &) = delete;
+
+ void setStatic(bool value) { mData._static = value; }
+ bool isStatic() const { return mData._static; }
+ bool hidden() const { return mData._hidden; }
+ void setHidden(bool value) { mData._hidden = value; }
+ void setType(Object::Type type) { mData._type = type; }
+ Object::Type type() const { return mData._type; }
+ void setName(const char *name)
+ {
+ if (name) {
+ auto len = strlen(name);
+ if (len < maxShortStringLength) {
+ setShortString(true);
+ strncpy(mData._buffer, name, len + 1);
+ } else {
+ setShortString(false);
+ mPtr = strdup(name);
+ }
+ }
+ }
+ const char *name() const { return shortString() ? mData._buffer : mPtr; }
+
+private:
+ static constexpr unsigned char maxShortStringLength = 14;
+ void setShortString(bool value) { mData._shortString = value; }
+ bool shortString() const { return mData._shortString; }
+ struct Data {
+ char _buffer[maxShortStringLength];
+ Object::Type _type;
+ bool _static : 1;
+ bool _hidden : 1;
+ bool _shortString : 1;
+ };
+ union {
+ Data mData;
+ char *mPtr{nullptr};
+ };
+};
+
+struct Asset {
+ enum class Type : unsigned char { Precomp, Image, Char };
+ bool isStatic() const { return mStatic; }
+ void setStatic(bool value) { mStatic = value; }
+ VBitmap bitmap() const { return mBitmap; }
+ void loadImageData(std::string data);
+ void loadImagePath(std::string Path);
+ Type mAssetType{Type::Precomp};
+ bool mStatic{true};
+ std::string mRefId; // ref id
+ std::vector<Object *> mLayers;
+ // image asset data
+ int mWidth{0};
+ int mHeight{0};
+ VBitmap mBitmap;
+};
+
+class Layer;
+
+class Composition : public Object {
+public:
+ Composition() : Object(Object::Type::Composition) {}
+ std::vector<LayerInfo> layerInfoList() const;
+ const std::vector<Marker> &markers() const { return mMarkers; }
+ double duration() const
+ {
+ return frameDuration() / frameRate(); // in second
+ }
+ size_t frameAtPos(double pos) const
+ {
+ if (pos < 0) pos = 0;
+ if (pos > 1) pos = 1;
+ return size_t(round(pos * frameDuration()));
+ }
+ long frameAtTime(double timeInSec) const
+ {
+ return long(frameAtPos(timeInSec / duration()));
+ }
+ size_t totalFrame() const { return mEndFrame - mStartFrame; }
+ long frameDuration() const { return mEndFrame - mStartFrame - 1; }
+ float frameRate() const { return mFrameRate; }
+ size_t startFrame() const { return mStartFrame; }
+ size_t endFrame() const { return mEndFrame; }
+ VSize size() const { return mSize; }
+ void processRepeaterObjects();
+ void updateStats();
+
+public:
+ struct Stats {
+ uint16_t precompLayerCount{0};
+ uint16_t solidLayerCount{0};
+ uint16_t shapeLayerCount{0};
+ uint16_t imageLayerCount{0};
+ uint16_t nullLayerCount{0};
+ };
+
+public:
+ std::string mVersion;
+ VSize mSize;
+ long mStartFrame{0};
+ long mEndFrame{0};
+ float mFrameRate{60};
+ BlendMode mBlendMode{BlendMode::Normal};
+ Layer * mRootLayer{nullptr};
+ std::unordered_map<std::string, Asset *> mAssets;
+
+ std::vector<Marker> mMarkers;
+ VArenaAlloc mArenaAlloc{2048};
+ Stats mStats;
+};
+
+class Transform : public Object {
+public:
+ struct Data {
+ struct Extra {
+ Property<float> m3DRx{0};
+ Property<float> m3DRy{0};
+ Property<float> m3DRz{0};
+ Property<float> mSeparateX{0};
+ Property<float> mSeparateY{0};
+ bool mSeparate{false};
+ bool m3DData{false};
+ };
+ VMatrix matrix(int frameNo, bool autoOrient = false) const;
+ float opacity(int frameNo) const
+ {
+ return mOpacity.value(frameNo) / 100.0f;
+ }
+ void createExtraData()
+ {
+ if (!mExtra) mExtra = std::make_unique<Extra>();
+ }
+ Property<float> mRotation{0}; /* "r" */
+ Property<VPointF> mScale{{100, 100}}; /* "s" */
+ Property<VPointF, Position> mPosition; /* "p" */
+ Property<VPointF> mAnchor; /* "a" */
+ Property<float> mOpacity{100}; /* "o" */
+ std::unique_ptr<Extra> mExtra;
+ };
+
+ Transform() : Object(Object::Type::Transform) {}
+ void set(Transform::Data *data, bool staticFlag)
+ {
+ setStatic(staticFlag);
+ if (isStatic()) {
+ new (&impl.mStaticData)
+ StaticData(data->matrix(0), data->opacity(0));
+ } else {
+ impl.mData = data;
+ }
+ }
+ VMatrix matrix(int frameNo, bool autoOrient = false) const
+ {
+ if (isStatic()) return impl.mStaticData.mMatrix;
+ return impl.mData->matrix(frameNo, autoOrient);
+ }
+ float opacity(int frameNo) const
+ {
+ if (isStatic()) return impl.mStaticData.mOpacity;
+ return impl.mData->opacity(frameNo);
+ }
+ Transform(const Transform &) = delete;
+ Transform(Transform &&) = delete;
+ Transform &operator=(Transform &) = delete;
+ Transform &operator=(Transform &&) = delete;
+ ~Transform() noexcept { destroy(); }
+
+private:
+ void destroy()
+ {
+ if (isStatic()) {
+ impl.mStaticData.~StaticData();
+ }
+ }
+ struct StaticData {
+ StaticData(VMatrix &&m, float opacity)
+ : mOpacity(opacity), mMatrix(std::move(m))
+ {
+ }
+ float mOpacity;
+ VMatrix mMatrix;
+ };
+ union details {
+ Data * mData{nullptr};
+ StaticData mStaticData;
+ details(){};
+ details(const details &) = delete;
+ details(details &&) = delete;
+ details &operator=(details &&) = delete;
+ details &operator=(const details &) = delete;
+ ~details() noexcept {};
+ } impl;
+};
+
+class Group : public Object {
+public:
+ Group() : Object(Object::Type::Group) {}
+ explicit Group(Object::Type type) : Object(type) {}
+
+public:
+ std::vector<Object *> mChildren;
+ Transform * mTransform{nullptr};
+};
+
+class Layer : public Group {
+public:
+ enum class Type : uchar {
+ Precomp = 0,
+ Solid = 1,
+ Image = 2,
+ Null = 3,
+ Shape = 4,
+ Text = 5
+ };
+ Layer() : Group(Object::Type::Layer) {}
+ bool hasRoundedCorner() const noexcept { return mHasRoundedCorner; }
+ bool hasPathOperator() const noexcept { return mHasPathOperator; }
+ bool hasGradient() const noexcept { return mHasGradient; }
+ bool hasMask() const noexcept { return mHasMask; }
+ bool hasRepeater() const noexcept { return mHasRepeater; }
+ int id() const noexcept { return mId; }
+ int parentId() const noexcept { return mParentId; }
+ bool hasParent() const noexcept { return mParentId != -1; }
+ int inFrame() const noexcept { return mInFrame; }
+ int outFrame() const noexcept { return mOutFrame; }
+ int startFrame() const noexcept { return mStartFrame; }
+ Color solidColor() const noexcept
+ {
+ return mExtra ? mExtra->mSolidColor : Color();
+ }
+ bool autoOrient() const noexcept { return mAutoOrient; }
+ int timeRemap(int frameNo) const;
+ VSize layerSize() const { return mLayerSize; }
+ bool precompLayer() const { return mLayerType == Type::Precomp; }
+ VMatrix matrix(int frameNo) const
+ {
+ return mTransform ? mTransform->matrix(frameNo, autoOrient())
+ : VMatrix{};
+ }
+ float opacity(int frameNo) const
+ {
+ return mTransform ? mTransform->opacity(frameNo) : 1.0f;
+ }
+ Asset *asset() const { return mExtra ? mExtra->mAsset : nullptr; }
+ struct Extra {
+ Color mSolidColor;
+ std::string mPreCompRefId;
+ Property<float> mTimeRemap; /* "tm" */
+ Composition * mCompRef{nullptr};
+ Asset * mAsset{nullptr};
+ std::vector<Mask *> mMasks;
+ };
+
+ Layer::Extra *extra()
+ {
+ if (!mExtra) mExtra = std::make_unique<Layer::Extra>();
+ return mExtra.get();
+ }
+
+public:
+ MatteType mMatteType{MatteType::None};
+ Type mLayerType{Layer::Type::Null};
+ BlendMode mBlendMode{BlendMode::Normal};
+ bool mHasRoundedCorner{false};
+ bool mHasPathOperator{false};
+ bool mHasMask{false};
+ bool mHasRepeater{false};
+ bool mHasGradient{false};
+ bool mAutoOrient{false};
+ VSize mLayerSize;
+ int mParentId{-1}; // Lottie the id of the parent in the composition
+ int mId{-1}; // Lottie the group id used for parenting.
+ float mTimeStreatch{1.0f};
+ int mInFrame{0};
+ int mOutFrame{0};
+ int mStartFrame{0};
+ std::unique_ptr<Extra> mExtra{nullptr};
+};
+
+/**
+ * TimeRemap has the value in time domain(in sec)
+ * To get the proper mapping first we get the mapped time at the current frame
+ * Number then we need to convert mapped time to frame number using the
+ * composition time line Ex: at frame 10 the mappend time is 0.5(500 ms) which
+ * will be convert to frame number 30 if the frame rate is 60. or will result to
+ * frame number 15 if the frame rate is 30.
+ */
+inline int Layer::timeRemap(int frameNo) const
+{
+ /*
+ * only consider startFrame() when there is no timeRemap.
+ * when a layer has timeremap bodymovin updates the startFrame()
+ * of all child layer so we don't have to take care of it.
+ */
+ if (!mExtra || mExtra->mTimeRemap.isStatic())
+ frameNo = frameNo - startFrame();
+ else
+ frameNo =
+ mExtra->mCompRef->frameAtTime(mExtra->mTimeRemap.value(frameNo));
+ /* Apply time streatch if it has any.
+ * Time streatch is just a factor by which the animation will speedup or
+ * slow down with respect to the overal animation. Time streach factor is
+ * already applied to the layers inFrame and outFrame.
+ * @TODO need to find out if timestreatch also affects the in and out frame
+ * of the child layers or not. */
+ return int(frameNo / mTimeStreatch);
+}
+
+class Stroke : public Object {
+public:
+ Stroke() : Object(Object::Type::Stroke) {}
+ Color color(int frameNo) const { return mColor.value(frameNo); }
+ float opacity(int frameNo) const
+ {
+ return mOpacity.value(frameNo) / 100.0f;
+ }
+ float strokeWidth(int frameNo) const { return mWidth.value(frameNo); }
+ CapStyle capStyle() const { return mCapStyle; }
+ JoinStyle joinStyle() const { return mJoinStyle; }
+ float miterLimit() const { return mMiterLimit; }
+ bool hasDashInfo() const { return !mDash.empty(); }
+ void getDashInfo(int frameNo, std::vector<float> &result) const
+ {
+ return mDash.getDashInfo(frameNo, result);
+ }
+
+public:
+ Property<Color> mColor; /* "c" */
+ Property<float> mOpacity{100}; /* "o" */
+ Property<float> mWidth{0}; /* "w" */
+ CapStyle mCapStyle{CapStyle::Flat}; /* "lc" */
+ JoinStyle mJoinStyle{JoinStyle::Miter}; /* "lj" */
+ float mMiterLimit{0}; /* "ml" */
+ Dash mDash;
+ bool mEnabled{true}; /* "fillEnabled" */
+};
+
+class Gradient : public Object {
+public:
+ class Data {
+ public:
+ friend inline Gradient::Data operator+(const Gradient::Data &g1,
+ const Gradient::Data &g2);
+ friend inline Gradient::Data operator-(const Gradient::Data &g1,
+ const Gradient::Data &g2);
+ friend inline Gradient::Data operator*(float m,
+ const Gradient::Data &g);
+
+ public:
+ std::vector<float> mGradient;
+ };
+ explicit Gradient(Object::Type type) : Object(type) {}
+ inline float opacity(int frameNo) const
+ {
+ return mOpacity.value(frameNo) / 100.0f;
+ }
+ void update(std::unique_ptr<VGradient> &grad, int frameNo);
+
+private:
+ void populate(VGradientStops &stops, int frameNo);
+
+public:
+ int mGradientType{1}; /* "t" Linear=1 , Radial = 2*/
+ Property<VPointF> mStartPoint; /* "s" */
+ Property<VPointF> mEndPoint; /* "e" */
+ Property<float> mHighlightLength{0}; /* "h" */
+ Property<float> mHighlightAngle{0}; /* "a" */
+ Property<float> mOpacity{100}; /* "o" */
+ Property<Gradient::Data> mGradient; /* "g" */
+ int mColorPoints{-1};
+ bool mEnabled{true}; /* "fillEnabled" */
+};
+
+class GradientStroke : public Gradient {
+public:
+ GradientStroke() : Gradient(Object::Type::GStroke) {}
+ float width(int frameNo) const { return mWidth.value(frameNo); }
+ CapStyle capStyle() const { return mCapStyle; }
+ JoinStyle joinStyle() const { return mJoinStyle; }
+ float miterLimit() const { return mMiterLimit; }
+ bool hasDashInfo() const { return !mDash.empty(); }
+ void getDashInfo(int frameNo, std::vector<float> &result) const
+ {
+ return mDash.getDashInfo(frameNo, result);
+ }
+
+public:
+ Property<float> mWidth; /* "w" */
+ CapStyle mCapStyle{CapStyle::Flat}; /* "lc" */
+ JoinStyle mJoinStyle{JoinStyle::Miter}; /* "lj" */
+ float mMiterLimit{0}; /* "ml" */
+ Dash mDash;
+};
+
+class GradientFill : public Gradient {
+public:
+ GradientFill() : Gradient(Object::Type::GFill) {}
+ FillRule fillRule() const { return mFillRule; }
+
+public:
+ FillRule mFillRule{FillRule::Winding}; /* "r" */
+};
+
+class Fill : public Object {
+public:
+ Fill() : Object(Object::Type::Fill) {}
+ Color color(int frameNo) const { return mColor.value(frameNo); }
+ float opacity(int frameNo) const
+ {
+ return mOpacity.value(frameNo) / 100.0f;
+ }
+ FillRule fillRule() const { return mFillRule; }
+
+public:
+ FillRule mFillRule{FillRule::Winding}; /* "r" */
+ bool mEnabled{true}; /* "fillEnabled" */
+ Property<Color> mColor; /* "c" */
+ Property<float> mOpacity{100}; /* "o" */
+};
+
+class Shape : public Object {
+public:
+ explicit Shape(Object::Type type) : Object(type) {}
+ VPath::Direction direction()
+ {
+ return (mDirection == 3) ? VPath::Direction::CCW : VPath::Direction::CW;
+ }
+
+public:
+ int mDirection{1};
+};
+
+class Path : public Shape {
+public:
+ Path() : Shape(Object::Type::Path) {}
+
+public:
+ Property<PathData> mShape;
+};
+
+class RoundedCorner : public Object {
+public:
+ RoundedCorner() : Object(Object::Type::RoundedCorner) {}
+ float radius(int frameNo) const { return mRadius.value(frameNo);}
+public:
+ Property<float> mRadius{0};
+};
+
+class Rect : public Shape {
+public:
+ Rect() : Shape(Object::Type::Rect) {}
+ float roundness(int frameNo)
+ {
+ return mRoundedCorner ? mRoundedCorner->radius(frameNo) :
+ mRound.value(frameNo);
+ }
+
+ bool roundnessChanged(int prevFrame, int curFrame)
+ {
+ return mRoundedCorner ? mRoundedCorner->mRadius.changed(prevFrame, curFrame) :
+ mRound.changed(prevFrame, curFrame);
+ }
+public:
+ RoundedCorner* mRoundedCorner{nullptr};
+ Property<VPointF> mPos;
+ Property<VPointF> mSize;
+ Property<float> mRound{0};
+};
+
+class Ellipse : public Shape {
+public:
+ Ellipse() : Shape(Object::Type::Ellipse) {}
+
+public:
+ Property<VPointF> mPos;
+ Property<VPointF> mSize;
+};
+
+class Polystar : public Shape {
+public:
+ enum class PolyType { Star = 1, Polygon = 2 };
+ Polystar() : Shape(Object::Type::Polystar) {}
+
+public:
+ Polystar::PolyType mPolyType{PolyType::Polygon};
+ Property<VPointF> mPos;
+ Property<float> mPointCount{0};
+ Property<float> mInnerRadius{0};
+ Property<float> mOuterRadius{0};
+ Property<float> mInnerRoundness{0};
+ Property<float> mOuterRoundness{0};
+ Property<float> mRotation{0};
+};
+
+class Repeater : public Object {
+public:
+ struct Transform {
+ VMatrix matrix(int frameNo, float multiplier) const;
+ float startOpacity(int frameNo) const
+ {
+ return mStartOpacity.value(frameNo) / 100;
+ }
+ float endOpacity(int frameNo) const
+ {
+ return mEndOpacity.value(frameNo) / 100;
+ }
+ bool isStatic() const
+ {
+ return mRotation.isStatic() && mScale.isStatic() &&
+ mPosition.isStatic() && mAnchor.isStatic() &&
+ mStartOpacity.isStatic() && mEndOpacity.isStatic();
+ }
+ Property<float> mRotation{0}; /* "r" */
+ Property<VPointF> mScale{{100, 100}}; /* "s" */
+ Property<VPointF> mPosition; /* "p" */
+ Property<VPointF> mAnchor; /* "a" */
+ Property<float> mStartOpacity{100}; /* "so" */
+ Property<float> mEndOpacity{100}; /* "eo" */
+ };
+ Repeater() : Object(Object::Type::Repeater) {}
+ Group *content() const { return mContent ? mContent : nullptr; }
+ void setContent(Group *content) { mContent = content; }
+ int maxCopies() const { return int(mMaxCopies); }
+ float copies(int frameNo) const { return mCopies.value(frameNo); }
+ float offset(int frameNo) const { return mOffset.value(frameNo); }
+ bool processed() const { return mProcessed; }
+ void markProcessed() { mProcessed = true; }
+
+public:
+ Group * mContent{nullptr};
+ Transform mTransform;
+ Property<float> mCopies{0};
+ Property<float> mOffset{0};
+ float mMaxCopies{0.0};
+ bool mProcessed{false};
+};
+
+class Trim : public Object {
+public:
+ struct Segment {
+ float start{0};
+ float end{0};
+ Segment() = default;
+ explicit Segment(float s, float e) : start(s), end(e) {}
+ };
+ enum class TrimType { Simultaneously, Individually };
+ Trim() : Object(Object::Type::Trim) {}
+ /*
+ * if start > end vector trims the path as a loop ( 2 segment)
+ * if start < end vector trims the path without loop ( 1 segment).
+ * if no offset then there is no loop.
+ */
+ Segment segment(int frameNo) const
+ {
+ float start = mStart.value(frameNo) / 100.0f;
+ float end = mEnd.value(frameNo) / 100.0f;
+ float offset = std::fmod(mOffset.value(frameNo), 360.0f) / 360.0f;
+
+ float diff = std::abs(start - end);
+ if (vCompare(diff, 0.0f)) return Segment(0, 0);
+ if (vCompare(diff, 1.0f)) return Segment(0, 1);
+
+ if (offset > 0) {
+ start += offset;
+ end += offset;
+ if (start <= 1 && end <= 1) {
+ return noloop(start, end);
+ } else if (start > 1 && end > 1) {
+ return noloop(start - 1, end - 1);
+ } else {
+ return (start > 1) ? loop(start - 1, end)
+ : loop(start, end - 1);
+ }
+ } else {
+ start += offset;
+ end += offset;
+ if (start >= 0 && end >= 0) {
+ return noloop(start, end);
+ } else if (start < 0 && end < 0) {
+ return noloop(1 + start, 1 + end);
+ } else {
+ return (start < 0) ? loop(1 + start, end)
+ : loop(start, 1 + end);
+ }
+ }
+ }
+ Trim::TrimType type() const { return mTrimType; }
+
+private:
+ Segment noloop(float start, float end) const
+ {
+ assert(start >= 0);
+ assert(end >= 0);
+ Segment s;
+ s.start = std::min(start, end);
+ s.end = std::max(start, end);
+ return s;
+ }
+ Segment loop(float start, float end) const
+ {
+ assert(start >= 0);
+ assert(end >= 0);
+ Segment s;
+ s.start = std::max(start, end);
+ s.end = std::min(start, end);
+ return s;
+ }
+
+public:
+ Property<float> mStart{0};
+ Property<float> mEnd{0};
+ Property<float> mOffset{0};
+ Trim::TrimType mTrimType{TrimType::Simultaneously};
+};
+
+inline Gradient::Data operator+(const Gradient::Data &g1,
+ const Gradient::Data &g2)
+{
+ if (g1.mGradient.size() != g2.mGradient.size()) return g1;
+
+ Gradient::Data newG;
+ newG.mGradient = g1.mGradient;
+
+ auto g2It = g2.mGradient.begin();
+ for (auto &i : newG.mGradient) {
+ i = i + *g2It;
+ g2It++;
+ }
+
+ return newG;
+}
+
+inline Gradient::Data operator-(const Gradient::Data &g1,
+ const Gradient::Data &g2)
+{
+ if (g1.mGradient.size() != g2.mGradient.size()) return g1;
+ Gradient::Data newG;
+ newG.mGradient = g1.mGradient;
+
+ auto g2It = g2.mGradient.begin();
+ for (auto &i : newG.mGradient) {
+ i = i - *g2It;
+ g2It++;
+ }
+
+ return newG;
+}
+
+inline Gradient::Data operator*(float m, const Gradient::Data &g)
+{
+ Gradient::Data newG;
+ newG.mGradient = g.mGradient;
+
+ for (auto &i : newG.mGradient) {
+ i = i * m;
+ }
+ return newG;
+}
+
+using ColorFilter = std::function<void(float &, float &, float &)>;
+
+void configureModelCacheSize(size_t cacheSize);
+
+std::shared_ptr<model::Composition> loadFromFile(const std::string &filePath,
+ bool cachePolicy);
+
+std::shared_ptr<model::Composition> loadFromData(std::string jsonData,
+ const std::string &key,
+ std::string resourcePath,
+ bool cachePolicy);
+
+std::shared_ptr<model::Composition> loadFromData(std::string jsonData,
+ std::string resourcePath,
+ ColorFilter filter);
+
+std::shared_ptr<model::Composition> parse(char *str, std::string dir_path,
+ ColorFilter filter = {});
+
+} // namespace model
+
+} // namespace internal
+
+} // namespace rlottie
+
+#endif // LOTModel_H
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottieparser.cpp b/vendor/github.com/Benau/go_rlottie/lottie_lottieparser.cpp
new file mode 100644
index 00000000..91839d41
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_lottieparser.cpp
@@ -0,0 +1,2390 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+//#define DEBUG_PARSER
+
+// This parser implements JSON token-by-token parsing with an API that is
+// more direct; we don't have to create handler object and
+// callbacks. Instead, we retrieve values from the JSON stream by calling
+// GetInt(), GetDouble(), GetString() and GetBool(), traverse into structures
+// by calling EnterObject() and EnterArray(), and skip over unwanted data by
+// calling SkipValue(). As we know the lottie file structure this way will be
+// the efficient way of parsing the file.
+//
+// If you aren't sure of what's next in the JSON data, you can use PeekType()
+// and PeekValue() to look ahead to the next object before reading it.
+//
+// If you call the wrong retrieval method--e.g. GetInt when the next JSON token
+// is not an int, EnterObject or EnterArray when there isn't actually an object
+// or array to read--the stream parsing will end immediately and no more data
+// will be delivered.
+//
+// After calling EnterObject, you retrieve keys via NextObjectKey() and values
+// via the normal getters. When NextObjectKey() returns null, you have exited
+// the object, or you can call SkipObject() to skip to the end of the object
+// immediately. If you fetch the entire object (i.e. NextObjectKey() returned
+// null), you should not call SkipObject().
+//
+// After calling EnterArray(), you must alternate between calling
+// NextArrayValue() to see if the array has more data, and then retrieving
+// values via the normal getters. You can call SkipArray() to skip to the end of
+// the array immediately. If you fetch the entire array (i.e. NextArrayValue()
+// returned null), you should not call SkipArray().
+//
+// This parser uses in-situ strings, so the JSON buffer will be altered during
+// the parse.
+
+#include <array>
+
+#include "lottie_lottiemodel.h"
+#include "lottie_rapidjson_document.h"
+
+RAPIDJSON_DIAG_PUSH
+#ifdef __GNUC__
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+
+using namespace rapidjson;
+
+using namespace rlottie::internal;
+
+class LookaheadParserHandler {
+public:
+ bool Null()
+ {
+ st_ = kHasNull;
+ v_.SetNull();
+ return true;
+ }
+ bool Bool(bool b)
+ {
+ st_ = kHasBool;
+ v_.SetBool(b);
+ return true;
+ }
+ bool Int(int i)
+ {
+ st_ = kHasNumber;
+ v_.SetInt(i);
+ return true;
+ }
+ bool Uint(unsigned u)
+ {
+ st_ = kHasNumber;
+ v_.SetUint(u);
+ return true;
+ }
+ bool Int64(int64_t i)
+ {
+ st_ = kHasNumber;
+ v_.SetInt64(i);
+ return true;
+ }
+ bool Uint64(int64_t u)
+ {
+ st_ = kHasNumber;
+ v_.SetUint64(u);
+ return true;
+ }
+ bool Double(double d)
+ {
+ st_ = kHasNumber;
+ v_.SetDouble(d);
+ return true;
+ }
+ bool RawNumber(const char *, SizeType, bool) { return false; }
+ bool String(const char *str, SizeType length, bool)
+ {
+ st_ = kHasString;
+ v_.SetString(str, length);
+ return true;
+ }
+ bool StartObject()
+ {
+ st_ = kEnteringObject;
+ return true;
+ }
+ bool Key(const char *str, SizeType length, bool)
+ {
+ st_ = kHasKey;
+ v_.SetString(str, length);
+ return true;
+ }
+ bool EndObject(SizeType)
+ {
+ st_ = kExitingObject;
+ return true;
+ }
+ bool StartArray()
+ {
+ st_ = kEnteringArray;
+ return true;
+ }
+ bool EndArray(SizeType)
+ {
+ st_ = kExitingArray;
+ return true;
+ }
+
+ void Error()
+ {
+ st_ = kError;
+ }
+protected:
+ explicit LookaheadParserHandler(char *str);
+
+protected:
+ enum LookaheadParsingState {
+ kInit,
+ kError,
+ kHasNull,
+ kHasBool,
+ kHasNumber,
+ kHasString,
+ kHasKey,
+ kEnteringObject,
+ kExitingObject,
+ kEnteringArray,
+ kExitingArray
+ };
+
+ Value v_;
+ LookaheadParsingState st_;
+ Reader r_;
+ InsituStringStream ss_;
+
+ static const int parseFlags = kParseDefaultFlags | kParseInsituFlag;
+};
+
+class LottieParserImpl : public LookaheadParserHandler {
+public:
+ LottieParserImpl(char *str, std::string dir_path, model::ColorFilter filter)
+ : LookaheadParserHandler(str),
+ mColorFilter(std::move(filter)),
+ mDirPath(std::move(dir_path))
+ {
+ }
+ bool VerifyType();
+ bool ParseNext();
+
+public:
+ VArenaAlloc &allocator() { return compRef->mArenaAlloc; }
+ bool EnterObject();
+ bool EnterArray();
+ const char * NextObjectKey();
+ bool NextArrayValue();
+ int GetInt();
+ double GetDouble();
+ const char * GetString();
+ std::string GetStringObject();
+ bool GetBool();
+ void GetNull();
+
+ void SkipObject();
+ void SkipArray();
+ void SkipValue();
+ Value *PeekValue();
+ int PeekType() const;
+ bool IsValid() { return st_ != kError; }
+
+ void Skip(const char *key);
+ model::BlendMode getBlendMode();
+ CapStyle getLineCap();
+ JoinStyle getLineJoin();
+ FillRule getFillRule();
+ model::Trim::TrimType getTrimType();
+ model::MatteType getMatteType();
+ model::Layer::Type getLayerType();
+
+ std::shared_ptr<model::Composition> composition() const
+ {
+ return mComposition;
+ }
+ void parseComposition();
+ void parseMarkers();
+ void parseMarker();
+ void parseAssets(model::Composition *comp);
+ model::Asset * parseAsset();
+ void parseLayers(model::Composition *comp);
+ model::Layer * parseLayer();
+ void parseMaskProperty(model::Layer *layer);
+ void parseShapesAttr(model::Layer *layer);
+ void parseObject(model::Group *parent);
+ model::Mask * parseMaskObject();
+ model::Object * parseObjectTypeAttr();
+ model::Object * parseGroupObject();
+ model::Rect * parseRectObject();
+ model::RoundedCorner * parseRoundedCorner();
+ void updateRoundedCorner(model::Group *parent, model::RoundedCorner *rc);
+
+ model::Ellipse * parseEllipseObject();
+ model::Path * parseShapeObject();
+ model::Polystar *parsePolystarObject();
+
+ model::Transform * parseTransformObject(bool ddd = false);
+ model::Fill * parseFillObject();
+ model::GradientFill * parseGFillObject();
+ model::Stroke * parseStrokeObject();
+ model::GradientStroke *parseGStrokeObject();
+ model::Trim * parseTrimObject();
+ model::Repeater * parseReapeaterObject();
+
+ void parseGradientProperty(model::Gradient *gradient, const char *key);
+
+ VPointF parseInperpolatorPoint();
+
+ void getValue(VPointF &pt);
+ void getValue(float &fval);
+ void getValue(model::Color &color);
+ void getValue(int &ival);
+ void getValue(model::PathData &shape);
+ void getValue(model::Gradient::Data &gradient);
+ void getValue(std::vector<VPointF> &v);
+ void getValue(model::Repeater::Transform &);
+
+ template <typename T, typename Tag>
+ bool parseKeyFrameValue(const char *, model::Value<T, Tag> &)
+ {
+ return false;
+ }
+
+ template <typename T>
+ bool parseKeyFrameValue(const char * key,
+ model::Value<T, model::Position> &value);
+ template <typename T, typename Tag>
+ void parseKeyFrame(model::KeyFrames<T, Tag> &obj);
+ template <typename T>
+ void parseProperty(model::Property<T> &obj);
+ template <typename T, typename Tag>
+ void parsePropertyHelper(model::Property<T, Tag> &obj);
+
+ void parseShapeProperty(model::Property<model::PathData> &obj);
+ void parseDashProperty(model::Dash &dash);
+
+ VInterpolator *interpolator(VPointF, VPointF, std::string);
+
+ model::Color toColor(const char *str);
+
+ void resolveLayerRefs();
+ void parsePathInfo();
+
+private:
+ model::ColorFilter mColorFilter;
+ struct {
+ std::vector<VPointF> mInPoint; /* "i" */
+ std::vector<VPointF> mOutPoint; /* "o" */
+ std::vector<VPointF> mVertices; /* "v" */
+ std::vector<VPointF> mResult;
+ bool mClosed{false};
+
+ void convert()
+ {
+ // shape data could be empty.
+ if (mInPoint.empty() || mOutPoint.empty() || mVertices.empty()) {
+ mResult.clear();
+ return;
+ }
+
+ /*
+ * Convert the AE shape format to
+ * list of bazier curves
+ * The final structure will be Move +size*Cubic + Cubic (if the path
+ * is closed one)
+ */
+ if (mInPoint.size() != mOutPoint.size() ||
+ mInPoint.size() != mVertices.size()) {
+ mResult.clear();
+ } else {
+ auto size = mVertices.size();
+ mResult.push_back(mVertices[0]);
+ for (size_t i = 1; i < size; i++) {
+ mResult.push_back(
+ mVertices[i - 1] +
+ mOutPoint[i - 1]); // CP1 = start + outTangent
+ mResult.push_back(mVertices[i] +
+ mInPoint[i]); // CP2 = end + inTangent
+ mResult.push_back(mVertices[i]); // end point
+ }
+
+ if (mClosed) {
+ mResult.push_back(
+ mVertices[size - 1] +
+ mOutPoint[size - 1]); // CP1 = start + outTangent
+ mResult.push_back(mVertices[0] +
+ mInPoint[0]); // CP2 = end + inTangent
+ mResult.push_back(mVertices[0]); // end point
+ }
+ }
+ }
+ void reset()
+ {
+ mInPoint.clear();
+ mOutPoint.clear();
+ mVertices.clear();
+ mResult.clear();
+ mClosed = false;
+ }
+ void updatePath(VPath &out)
+ {
+ if (mResult.empty()) return;
+
+ auto size = mResult.size();
+ auto points = mResult.data();
+ /* reserve exact memory requirement at once
+ * ptSize = size + 1(size + close)
+ * elmSize = size/3 cubic + 1 move + 1 close
+ */
+ out.reserve(size + 1, size / 3 + 2);
+ out.moveTo(points[0]);
+ for (size_t i = 1; i < size; i += 3) {
+ out.cubicTo(points[i], points[i + 1], points[i + 2]);
+ }
+ if (mClosed) out.close();
+ }
+ } mPathInfo;
+
+protected:
+ std::unordered_map<std::string, VInterpolator *> mInterpolatorCache;
+ std::shared_ptr<model::Composition> mComposition;
+ model::Composition * compRef{nullptr};
+ model::Layer * curLayerRef{nullptr};
+ std::vector<model::Layer *> mLayersToUpdate;
+ std::string mDirPath;
+ void SkipOut(int depth);
+};
+
+LookaheadParserHandler::LookaheadParserHandler(char *str)
+ : v_(), st_(kInit), ss_(str)
+{
+ r_.IterativeParseInit();
+}
+
+bool LottieParserImpl::VerifyType()
+{
+ /* Verify the media type is lottie json.
+ Could add more strict check. */
+ return ParseNext();
+}
+
+bool LottieParserImpl::ParseNext()
+{
+ if (r_.HasParseError()) {
+ st_ = kError;
+ return false;
+ }
+
+ if (!r_.IterativeParseNext<parseFlags>(ss_, *this)) {
+ vCritical << "Lottie file parsing error";
+ st_ = kError;
+ return false;
+ }
+ return true;
+}
+
+bool LottieParserImpl::EnterObject()
+{
+ if (st_ != kEnteringObject) {
+ st_ = kError;
+ return false;
+ }
+
+ ParseNext();
+ return true;
+}
+
+bool LottieParserImpl::EnterArray()
+{
+ if (st_ != kEnteringArray) {
+ st_ = kError;
+ return false;
+ }
+
+ ParseNext();
+ return true;
+}
+
+const char *LottieParserImpl::NextObjectKey()
+{
+ if (st_ == kHasKey) {
+ const char *result = v_.GetString();
+ ParseNext();
+ return result;
+ }
+
+ /* SPECIAL CASE
+ * The parser works with a prdefined rule that it will be only
+ * while (NextObjectKey()) for each object but in case of our nested group
+ * object we can call multiple time NextObjectKey() while exiting the object
+ * so ignore those and don't put parser in the error state.
+ * */
+ if (st_ == kExitingArray || st_ == kEnteringObject) {
+ // #ifdef DEBUG_PARSER
+ // vDebug<<"Object: Exiting nested loop";
+ // #endif
+ return nullptr;
+ }
+
+ if (st_ != kExitingObject) {
+ st_ = kError;
+ return nullptr;
+ }
+
+ ParseNext();
+ return nullptr;
+}
+
+bool LottieParserImpl::NextArrayValue()
+{
+ if (st_ == kExitingArray) {
+ ParseNext();
+ return false;
+ }
+
+ /* SPECIAL CASE
+ * same as NextObjectKey()
+ */
+ if (st_ == kExitingObject) {
+ return false;
+ }
+
+ if (st_ == kError || st_ == kHasKey) {
+ st_ = kError;
+ return false;
+ }
+
+ return true;
+}
+
+int LottieParserImpl::GetInt()
+{
+ if (st_ != kHasNumber || !v_.IsInt()) {
+ st_ = kError;
+ return 0;
+ }
+
+ int result = v_.GetInt();
+ ParseNext();
+ return result;
+}
+
+double LottieParserImpl::GetDouble()
+{
+ if (st_ != kHasNumber) {
+ st_ = kError;
+ return 0.;
+ }
+
+ double result = v_.GetDouble();
+ ParseNext();
+ return result;
+}
+
+bool LottieParserImpl::GetBool()
+{
+ if (st_ != kHasBool) {
+ st_ = kError;
+ return false;
+ }
+
+ bool result = v_.GetBool();
+ ParseNext();
+ return result;
+}
+
+void LottieParserImpl::GetNull()
+{
+ if (st_ != kHasNull) {
+ st_ = kError;
+ return;
+ }
+
+ ParseNext();
+}
+
+const char *LottieParserImpl::GetString()
+{
+ if (st_ != kHasString) {
+ st_ = kError;
+ return nullptr;
+ }
+
+ const char *result = v_.GetString();
+ ParseNext();
+ return result;
+}
+
+std::string LottieParserImpl::GetStringObject()
+{
+ auto str = GetString();
+
+ if (str) {
+ return std::string(str);
+ }
+
+ return {};
+}
+
+void LottieParserImpl::SkipOut(int depth)
+{
+ do {
+ if (st_ == kEnteringArray || st_ == kEnteringObject) {
+ ++depth;
+ } else if (st_ == kExitingArray || st_ == kExitingObject) {
+ --depth;
+ } else if (st_ == kError) {
+ return;
+ }
+
+ ParseNext();
+ } while (depth > 0);
+}
+
+void LottieParserImpl::SkipValue()
+{
+ SkipOut(0);
+}
+
+void LottieParserImpl::SkipArray()
+{
+ SkipOut(1);
+}
+
+void LottieParserImpl::SkipObject()
+{
+ SkipOut(1);
+}
+
+Value *LottieParserImpl::PeekValue()
+{
+ if (st_ >= kHasNull && st_ <= kHasKey) {
+ return &v_;
+ }
+
+ return nullptr;
+}
+
+// returns a rapidjson::Type, or -1 for no value (at end of
+// object/array)
+int LottieParserImpl::PeekType() const
+{
+ if (st_ >= kHasNull && st_ <= kHasKey) {
+ return v_.GetType();
+ }
+
+ if (st_ == kEnteringArray) {
+ return kArrayType;
+ }
+
+ if (st_ == kEnteringObject) {
+ return kObjectType;
+ }
+
+ return -1;
+}
+
+void LottieParserImpl::Skip(const char * /*key*/)
+{
+ if (PeekType() == kArrayType) {
+ EnterArray();
+ SkipArray();
+ } else if (PeekType() == kObjectType) {
+ EnterObject();
+ SkipObject();
+ } else {
+ SkipValue();
+ }
+}
+
+model::BlendMode LottieParserImpl::getBlendMode()
+{
+ auto mode = model::BlendMode::Normal;
+
+ switch (GetInt()) {
+ case 1:
+ mode = model::BlendMode::Multiply;
+ break;
+ case 2:
+ mode = model::BlendMode::Screen;
+ break;
+ case 3:
+ mode = model::BlendMode::OverLay;
+ break;
+ default:
+ break;
+ }
+ return mode;
+}
+
+void LottieParserImpl::resolveLayerRefs()
+{
+ for (const auto &layer : mLayersToUpdate) {
+ auto search = compRef->mAssets.find(layer->extra()->mPreCompRefId);
+ if (search != compRef->mAssets.end()) {
+ if (layer->mLayerType == model::Layer::Type::Image) {
+ layer->extra()->mAsset = search->second;
+ } else if (layer->mLayerType == model::Layer::Type::Precomp) {
+ layer->mChildren = search->second->mLayers;
+ layer->setStatic(layer->isStatic() &&
+ search->second->isStatic());
+ }
+ }
+ }
+}
+
+void LottieParserImpl::parseComposition()
+{
+ EnterObject();
+ std::shared_ptr<model::Composition> sharedComposition =
+ std::make_shared<model::Composition>();
+ model::Composition *comp = sharedComposition.get();
+ compRef = comp;
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "v")) {
+ comp->mVersion = GetStringObject();
+ } else if (0 == strcmp(key, "w")) {
+ comp->mSize.setWidth(GetInt());
+ } else if (0 == strcmp(key, "h")) {
+ comp->mSize.setHeight(GetInt());
+ } else if (0 == strcmp(key, "ip")) {
+ comp->mStartFrame = GetDouble();
+ } else if (0 == strcmp(key, "op")) {
+ comp->mEndFrame = GetDouble();
+ } else if (0 == strcmp(key, "fr")) {
+ comp->mFrameRate = GetDouble();
+ } else if (0 == strcmp(key, "assets")) {
+ parseAssets(comp);
+ } else if (0 == strcmp(key, "layers")) {
+ parseLayers(comp);
+ } else if (0 == strcmp(key, "markers")) {
+ parseMarkers();
+ } else {
+#ifdef DEBUG_PARSER
+ vWarning << "Composition Attribute Skipped : " << key;
+#endif
+ Skip(key);
+ }
+ }
+
+ if (comp->mVersion.empty() || !comp->mRootLayer) {
+ // don't have a valid bodymovin header
+ return;
+ }
+ if (comp->mStartFrame > comp->mEndFrame) {
+ // reveresed animation? missing data?
+ return;
+ }
+ if (!IsValid()) {
+ return;
+ }
+
+ resolveLayerRefs();
+ comp->setStatic(comp->mRootLayer->isStatic());
+ comp->mRootLayer->mInFrame = comp->mStartFrame;
+ comp->mRootLayer->mOutFrame = comp->mEndFrame;
+
+ mComposition = sharedComposition;
+}
+
+void LottieParserImpl::parseMarker()
+{
+ EnterObject();
+ std::string comment;
+ int timeframe{0};
+ int duration{0};
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "cm")) {
+ comment = GetStringObject();
+ } else if (0 == strcmp(key, "tm")) {
+ timeframe = GetDouble();
+ } else if (0 == strcmp(key, "dr")) {
+ duration = GetDouble();
+
+ } else {
+#ifdef DEBUG_PARSER
+ vWarning << "Marker Attribute Skipped : " << key;
+#endif
+ Skip(key);
+ }
+ }
+ compRef->mMarkers.emplace_back(std::move(comment), timeframe,
+ timeframe + duration);
+}
+
+void LottieParserImpl::parseMarkers()
+{
+ EnterArray();
+ while (NextArrayValue()) {
+ parseMarker();
+ }
+ // update the precomp layers with the actual layer object
+}
+
+void LottieParserImpl::parseAssets(model::Composition *composition)
+{
+ EnterArray();
+ while (NextArrayValue()) {
+ auto asset = parseAsset();
+ composition->mAssets[asset->mRefId] = asset;
+ }
+ // update the precomp layers with the actual layer object
+}
+
+static constexpr const unsigned char B64index[256] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 62, 63, 62, 62, 63, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 0, 0, 0, 0, 63, 0, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51};
+
+std::string b64decode(const char *data, const size_t len)
+{
+ auto p = reinterpret_cast<const unsigned char *>(data);
+ int pad = len > 0 && (len % 4 || p[len - 1] == '=');
+ const size_t L = ((len + 3) / 4 - pad) * 4;
+ std::string str(L / 4 * 3 + pad, '\0');
+
+ for (size_t i = 0, j = 0; i < L; i += 4) {
+ int n = B64index[p[i]] << 18 | B64index[p[i + 1]] << 12 |
+ B64index[p[i + 2]] << 6 | B64index[p[i + 3]];
+ str[j++] = n >> 16;
+ str[j++] = n >> 8 & 0xFF;
+ str[j++] = n & 0xFF;
+ }
+ if (pad) {
+ int n = B64index[p[L]] << 18 | B64index[p[L + 1]] << 12;
+ str[str.size() - 1] = n >> 16;
+
+ if (len > L + 2 && p[L + 2] != '=') {
+ n |= B64index[p[L + 2]] << 6;
+ str.push_back(n >> 8 & 0xFF);
+ }
+ }
+ return str;
+}
+
+static std::string convertFromBase64(const std::string &str)
+{
+ // usual header look like "data:image/png;base64,"
+ // so need to skip till ','.
+ size_t startIndex = str.find(",", 0);
+ startIndex += 1; // skip ","
+ size_t length = str.length() - startIndex;
+
+ const char *b64Data = str.c_str() + startIndex;
+
+ return b64decode(b64Data, length);
+}
+
+/*
+ * std::to_string() function is missing in VS2017
+ * so this is workaround for windows build
+ */
+#include <sstream>
+template <class T>
+static std::string toString(const T &value)
+{
+ std::ostringstream os;
+ os << value;
+ return os.str();
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/layers/shape.json
+ *
+ */
+model::Asset *LottieParserImpl::parseAsset()
+{
+ auto asset = allocator().make<model::Asset>();
+ std::string filename;
+ std::string relativePath;
+ bool embededResource = false;
+ EnterObject();
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "w")) {
+ asset->mWidth = GetInt();
+ } else if (0 == strcmp(key, "h")) {
+ asset->mHeight = GetInt();
+ } else if (0 == strcmp(key, "p")) { /* image name */
+ asset->mAssetType = model::Asset::Type::Image;
+ filename = GetStringObject();
+ } else if (0 == strcmp(key, "u")) { /* relative image path */
+ relativePath = GetStringObject();
+ } else if (0 == strcmp(key, "e")) { /* relative image path */
+ embededResource = GetInt();
+ } else if (0 == strcmp(key, "id")) { /* reference id*/
+ if (PeekType() == kStringType) {
+ asset->mRefId = GetStringObject();
+ } else {
+ asset->mRefId = toString(GetInt());
+ }
+ } else if (0 == strcmp(key, "layers")) {
+ asset->mAssetType = model::Asset::Type::Precomp;
+ EnterArray();
+ bool staticFlag = true;
+ while (NextArrayValue()) {
+ auto layer = parseLayer();
+ if (layer) {
+ staticFlag = staticFlag && layer->isStatic();
+ asset->mLayers.push_back(layer);
+ }
+ }
+ asset->setStatic(staticFlag);
+ } else {
+#ifdef DEBUG_PARSER
+ vWarning << "Asset Attribute Skipped : " << key;
+#endif
+ Skip(key);
+ }
+ }
+
+ if (asset->mAssetType == model::Asset::Type::Image) {
+ if (embededResource) {
+ // embeder resource should start with "data:"
+ if (filename.compare(0, 5, "data:") == 0) {
+ asset->loadImageData(convertFromBase64(filename));
+ }
+ } else {
+ asset->loadImagePath(mDirPath + relativePath + filename);
+ }
+ }
+
+ return asset;
+}
+
+void LottieParserImpl::parseLayers(model::Composition *comp)
+{
+ comp->mRootLayer = allocator().make<model::Layer>();
+ comp->mRootLayer->mLayerType = model::Layer::Type::Precomp;
+ comp->mRootLayer->setName("__");
+ bool staticFlag = true;
+ EnterArray();
+ while (NextArrayValue()) {
+ auto layer = parseLayer();
+ if (layer) {
+ staticFlag = staticFlag && layer->isStatic();
+ comp->mRootLayer->mChildren.push_back(layer);
+ }
+ }
+ comp->mRootLayer->setStatic(staticFlag);
+}
+
+model::Color LottieParserImpl::toColor(const char *str)
+{
+ if (!str) return {};
+
+ model::Color color;
+ auto len = strlen(str);
+
+ // some resource has empty color string
+ // return a default color for those cases.
+ if (len != 7 || str[0] != '#') return color;
+
+ char tmp[3] = {'\0', '\0', '\0'};
+ tmp[0] = str[1];
+ tmp[1] = str[2];
+ color.r = std::strtol(tmp, nullptr, 16) / 255.0f;
+
+ tmp[0] = str[3];
+ tmp[1] = str[4];
+ color.g = std::strtol(tmp, nullptr, 16) / 255.0f;
+
+ tmp[0] = str[5];
+ tmp[1] = str[6];
+ color.b = std::strtol(tmp, nullptr, 16) / 255.0f;
+
+ return color;
+}
+
+model::MatteType LottieParserImpl::getMatteType()
+{
+ switch (GetInt()) {
+ case 1:
+ return model::MatteType::Alpha;
+ break;
+ case 2:
+ return model::MatteType::AlphaInv;
+ break;
+ case 3:
+ return model::MatteType::Luma;
+ break;
+ case 4:
+ return model::MatteType::LumaInv;
+ break;
+ default:
+ return model::MatteType::None;
+ break;
+ }
+}
+
+model::Layer::Type LottieParserImpl::getLayerType()
+{
+ switch (GetInt()) {
+ case 0:
+ return model::Layer::Type::Precomp;
+ break;
+ case 1:
+ return model::Layer::Type::Solid;
+ break;
+ case 2:
+ return model::Layer::Type::Image;
+ break;
+ case 3:
+ return model::Layer::Type::Null;
+ break;
+ case 4:
+ return model::Layer::Type::Shape;
+ break;
+ case 5:
+ return model::Layer::Type::Text;
+ break;
+ default:
+ return model::Layer::Type::Null;
+ break;
+ }
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/layers/shape.json
+ *
+ */
+model::Layer *LottieParserImpl::parseLayer()
+{
+ model::Layer *layer = allocator().make<model::Layer>();
+ curLayerRef = layer;
+ bool ddd = true;
+ EnterObject();
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "ty")) { /* Type of layer*/
+ layer->mLayerType = getLayerType();
+ } else if (0 == strcmp(key, "nm")) { /*Layer name*/
+ layer->setName(GetString());
+ } else if (0 == strcmp(key, "ind")) { /*Layer index in AE. Used for
+ parenting and expressions.*/
+ layer->mId = GetInt();
+ } else if (0 == strcmp(key, "ddd")) { /*3d layer */
+ ddd = GetInt();
+ } else if (0 ==
+ strcmp(key,
+ "parent")) { /*Layer Parent. Uses "ind" of parent.*/
+ layer->mParentId = GetInt();
+ } else if (0 == strcmp(key, "refId")) { /*preComp Layer reference id*/
+ layer->extra()->mPreCompRefId = GetStringObject();
+ layer->mHasGradient = true;
+ mLayersToUpdate.push_back(layer);
+ } else if (0 == strcmp(key, "sr")) { // "Layer Time Stretching"
+ layer->mTimeStreatch = GetDouble();
+ } else if (0 == strcmp(key, "tm")) { // time remapping
+ parseProperty(layer->extra()->mTimeRemap);
+ } else if (0 == strcmp(key, "ip")) {
+ layer->mInFrame = std::lround(GetDouble());
+ } else if (0 == strcmp(key, "op")) {
+ layer->mOutFrame = std::lround(GetDouble());
+ } else if (0 == strcmp(key, "st")) {
+ layer->mStartFrame = GetDouble();
+ } else if (0 == strcmp(key, "bm")) {
+ layer->mBlendMode = getBlendMode();
+ } else if (0 == strcmp(key, "ks")) {
+ EnterObject();
+ layer->mTransform = parseTransformObject(ddd);
+ } else if (0 == strcmp(key, "shapes")) {
+ parseShapesAttr(layer);
+ } else if (0 == strcmp(key, "w")) {
+ layer->mLayerSize.setWidth(GetInt());
+ } else if (0 == strcmp(key, "h")) {
+ layer->mLayerSize.setHeight(GetInt());
+ } else if (0 == strcmp(key, "sw")) {
+ layer->mLayerSize.setWidth(GetInt());
+ } else if (0 == strcmp(key, "sh")) {
+ layer->mLayerSize.setHeight(GetInt());
+ } else if (0 == strcmp(key, "sc")) {
+ layer->extra()->mSolidColor = toColor(GetString());
+ } else if (0 == strcmp(key, "tt")) {
+ layer->mMatteType = getMatteType();
+ } else if (0 == strcmp(key, "hasMask")) {
+ layer->mHasMask = GetBool();
+ } else if (0 == strcmp(key, "masksProperties")) {
+ parseMaskProperty(layer);
+ } else if (0 == strcmp(key, "ao")) {
+ layer->mAutoOrient = GetInt();
+ } else if (0 == strcmp(key, "hd")) {
+ layer->setHidden(GetBool());
+ } else {
+#ifdef DEBUG_PARSER
+ vWarning << "Layer Attribute Skipped : " << key;
+#endif
+ Skip(key);
+ }
+ }
+
+ if (!layer->mTransform) {
+ // not a valid layer
+ return nullptr;
+ }
+
+ // make sure layer data is not corrupted.
+ if (layer->hasParent() && (layer->id() == layer->parentId()))
+ return nullptr;
+
+ if (layer->mExtra) layer->mExtra->mCompRef = compRef;
+
+ if (layer->hidden()) {
+ // if layer is hidden, only data that is usefull is its
+ // transform matrix(when it is a parent of some other layer)
+ // so force it to be a Null Layer and release all resource.
+ layer->setStatic(layer->mTransform->isStatic());
+ layer->mLayerType = model::Layer::Type::Null;
+ layer->mChildren = {};
+ return layer;
+ }
+
+ // update the static property of layer
+ bool staticFlag = true;
+ for (const auto &child : layer->mChildren) {
+ staticFlag &= child->isStatic();
+ }
+
+ if (layer->hasMask() && layer->mExtra) {
+ for (const auto &mask : layer->mExtra->mMasks) {
+ staticFlag &= mask->isStatic();
+ }
+ }
+
+ layer->setStatic(staticFlag && layer->mTransform->isStatic());
+
+ return layer;
+}
+
+void LottieParserImpl::parseMaskProperty(model::Layer *layer)
+{
+ EnterArray();
+ while (NextArrayValue()) {
+ layer->extra()->mMasks.push_back(parseMaskObject());
+ }
+}
+
+model::Mask *LottieParserImpl::parseMaskObject()
+{
+ auto obj = allocator().make<model::Mask>();
+
+ EnterObject();
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "inv")) {
+ obj->mInv = GetBool();
+ } else if (0 == strcmp(key, "mode")) {
+ const char *str = GetString();
+ if (!str) {
+ obj->mMode = model::Mask::Mode::None;
+ continue;
+ }
+ switch (str[0]) {
+ case 'n':
+ obj->mMode = model::Mask::Mode::None;
+ break;
+ case 'a':
+ obj->mMode = model::Mask::Mode::Add;
+ break;
+ case 's':
+ obj->mMode = model::Mask::Mode::Substarct;
+ break;
+ case 'i':
+ obj->mMode = model::Mask::Mode::Intersect;
+ break;
+ case 'f':
+ obj->mMode = model::Mask::Mode::Difference;
+ break;
+ default:
+ obj->mMode = model::Mask::Mode::None;
+ break;
+ }
+ } else if (0 == strcmp(key, "pt")) {
+ parseShapeProperty(obj->mShape);
+ } else if (0 == strcmp(key, "o")) {
+ parseProperty(obj->mOpacity);
+ } else {
+ Skip(key);
+ }
+ }
+ obj->mIsStatic = obj->mShape.isStatic() && obj->mOpacity.isStatic();
+ return obj;
+}
+
+void LottieParserImpl::parseShapesAttr(model::Layer *layer)
+{
+ EnterArray();
+ while (NextArrayValue()) {
+ parseObject(layer);
+ }
+}
+
+model::Object *LottieParserImpl::parseObjectTypeAttr()
+{
+ const char *type = GetString();
+ if (0 == strcmp(type, "gr")) {
+ return parseGroupObject();
+ } else if (0 == strcmp(type, "rc")) {
+ return parseRectObject();
+ } else if (0 == strcmp(type, "rd")) {
+ curLayerRef->mHasRoundedCorner = true;
+ return parseRoundedCorner();
+ } else if (0 == strcmp(type, "el")) {
+ return parseEllipseObject();
+ } else if (0 == strcmp(type, "tr")) {
+ return parseTransformObject();
+ } else if (0 == strcmp(type, "fl")) {
+ return parseFillObject();
+ } else if (0 == strcmp(type, "st")) {
+ return parseStrokeObject();
+ } else if (0 == strcmp(type, "gf")) {
+ curLayerRef->mHasGradient = true;
+ return parseGFillObject();
+ } else if (0 == strcmp(type, "gs")) {
+ curLayerRef->mHasGradient = true;
+ return parseGStrokeObject();
+ } else if (0 == strcmp(type, "sh")) {
+ return parseShapeObject();
+ } else if (0 == strcmp(type, "sr")) {
+ return parsePolystarObject();
+ } else if (0 == strcmp(type, "tm")) {
+ curLayerRef->mHasPathOperator = true;
+ return parseTrimObject();
+ } else if (0 == strcmp(type, "rp")) {
+ curLayerRef->mHasRepeater = true;
+ return parseReapeaterObject();
+ } else if (0 == strcmp(type, "mm")) {
+ vWarning << "Merge Path is not supported yet";
+ return nullptr;
+ } else {
+#ifdef DEBUG_PARSER
+ vDebug << "The Object Type not yet handled = " << type;
+#endif
+ return nullptr;
+ }
+}
+
+void LottieParserImpl::parseObject(model::Group *parent)
+{
+ EnterObject();
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "ty")) {
+ auto child = parseObjectTypeAttr();
+ if (child && !child->hidden()) {
+ if (child->type() == model::Object::Type::RoundedCorner) {
+ updateRoundedCorner(parent, static_cast<model::RoundedCorner *>(child));
+ }
+ parent->mChildren.push_back(child);
+ }
+ } else {
+ Skip(key);
+ }
+ }
+}
+
+void LottieParserImpl::updateRoundedCorner(model::Group *group, model::RoundedCorner *rc)
+{
+ for(auto &e : group->mChildren)
+ {
+ if (e->type() == model::Object::Type::Rect) {
+ static_cast<model::Rect *>(e)->mRoundedCorner = rc;
+ if (!rc->isStatic()) {
+ e->setStatic(false);
+ group->setStatic(false);
+ //@TODO need to propagate.
+ }
+ } else if ( e->type() == model::Object::Type::Group) {
+ updateRoundedCorner(static_cast<model::Group *>(e), rc);
+ }
+ }
+}
+
+model::Object *LottieParserImpl::parseGroupObject()
+{
+ auto group = allocator().make<model::Group>();
+
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "nm")) {
+ group->setName(GetString());
+ } else if (0 == strcmp(key, "it")) {
+ EnterArray();
+ while (NextArrayValue()) {
+ parseObject(group);
+ }
+ if (!group->mChildren.empty()
+ && group->mChildren.back()->type()
+ == model::Object::Type::Transform) {
+ group->mTransform =
+ static_cast<model::Transform *>(group->mChildren.back());
+ group->mChildren.pop_back();
+ }
+ } else {
+ Skip(key);
+ }
+ }
+ bool staticFlag = true;
+ for (const auto &child : group->mChildren) {
+ staticFlag &= child->isStatic();
+ }
+
+ if (group->mTransform) {
+ group->setStatic(staticFlag && group->mTransform->isStatic());
+ }
+
+ return group;
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/rect.json
+ */
+model::Rect *LottieParserImpl::parseRectObject()
+{
+ auto obj = allocator().make<model::Rect>();
+
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "nm")) {
+ obj->setName(GetString());
+ } else if (0 == strcmp(key, "p")) {
+ parseProperty(obj->mPos);
+ } else if (0 == strcmp(key, "s")) {
+ parseProperty(obj->mSize);
+ } else if (0 == strcmp(key, "r")) {
+ parseProperty(obj->mRound);
+ } else if (0 == strcmp(key, "d")) {
+ obj->mDirection = GetInt();
+ } else if (0 == strcmp(key, "hd")) {
+ obj->setHidden(GetBool());
+ } else {
+ Skip(key);
+ }
+ }
+ obj->setStatic(obj->mPos.isStatic() && obj->mSize.isStatic() &&
+ obj->mRound.isStatic());
+ return obj;
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/rect.json
+ */
+model::RoundedCorner *LottieParserImpl::parseRoundedCorner()
+{
+ auto obj = allocator().make<model::RoundedCorner>();
+
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "nm")) {
+ obj->setName(GetString());
+ } else if (0 == strcmp(key, "r")) {
+ parseProperty(obj->mRadius);
+ } else if (0 == strcmp(key, "hd")) {
+ obj->setHidden(GetBool());
+ } else {
+ Skip(key);
+ }
+ }
+ obj->setStatic(obj->mRadius.isStatic());
+ return obj;
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/ellipse.json
+ */
+model::Ellipse *LottieParserImpl::parseEllipseObject()
+{
+ auto obj = allocator().make<model::Ellipse>();
+
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "nm")) {
+ obj->setName(GetString());
+ } else if (0 == strcmp(key, "p")) {
+ parseProperty(obj->mPos);
+ } else if (0 == strcmp(key, "s")) {
+ parseProperty(obj->mSize);
+ } else if (0 == strcmp(key, "d")) {
+ obj->mDirection = GetInt();
+ } else if (0 == strcmp(key, "hd")) {
+ obj->setHidden(GetBool());
+ } else {
+ Skip(key);
+ }
+ }
+ obj->setStatic(obj->mPos.isStatic() && obj->mSize.isStatic());
+ return obj;
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/shape.json
+ */
+model::Path *LottieParserImpl::parseShapeObject()
+{
+ auto obj = allocator().make<model::Path>();
+
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "nm")) {
+ obj->setName(GetString());
+ } else if (0 == strcmp(key, "ks")) {
+ parseShapeProperty(obj->mShape);
+ } else if (0 == strcmp(key, "d")) {
+ obj->mDirection = GetInt();
+ } else if (0 == strcmp(key, "hd")) {
+ obj->setHidden(GetBool());
+ } else {
+#ifdef DEBUG_PARSER
+ vDebug << "Shape property ignored :" << key;
+#endif
+ Skip(key);
+ }
+ }
+ obj->setStatic(obj->mShape.isStatic());
+
+ return obj;
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/star.json
+ */
+model::Polystar *LottieParserImpl::parsePolystarObject()
+{
+ auto obj = allocator().make<model::Polystar>();
+
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "nm")) {
+ obj->setName(GetString());
+ } else if (0 == strcmp(key, "p")) {
+ parseProperty(obj->mPos);
+ } else if (0 == strcmp(key, "pt")) {
+ parseProperty(obj->mPointCount);
+ } else if (0 == strcmp(key, "ir")) {
+ parseProperty(obj->mInnerRadius);
+ } else if (0 == strcmp(key, "is")) {
+ parseProperty(obj->mInnerRoundness);
+ } else if (0 == strcmp(key, "or")) {
+ parseProperty(obj->mOuterRadius);
+ } else if (0 == strcmp(key, "os")) {
+ parseProperty(obj->mOuterRoundness);
+ } else if (0 == strcmp(key, "r")) {
+ parseProperty(obj->mRotation);
+ } else if (0 == strcmp(key, "sy")) {
+ int starType = GetInt();
+ if (starType == 1) obj->mPolyType = model::Polystar::PolyType::Star;
+ if (starType == 2)
+ obj->mPolyType = model::Polystar::PolyType::Polygon;
+ } else if (0 == strcmp(key, "d")) {
+ obj->mDirection = GetInt();
+ } else if (0 == strcmp(key, "hd")) {
+ obj->setHidden(GetBool());
+ } else {
+#ifdef DEBUG_PARSER
+ vDebug << "Polystar property ignored :" << key;
+#endif
+ Skip(key);
+ }
+ }
+ obj->setStatic(
+ obj->mPos.isStatic() && obj->mPointCount.isStatic() &&
+ obj->mInnerRadius.isStatic() && obj->mInnerRoundness.isStatic() &&
+ obj->mOuterRadius.isStatic() && obj->mOuterRoundness.isStatic() &&
+ obj->mRotation.isStatic());
+
+ return obj;
+}
+
+model::Trim::TrimType LottieParserImpl::getTrimType()
+{
+ switch (GetInt()) {
+ case 1:
+ return model::Trim::TrimType::Simultaneously;
+ break;
+ case 2:
+ return model::Trim::TrimType::Individually;
+ break;
+ default:
+ Error();
+ return model::Trim::TrimType::Simultaneously;
+ break;
+ }
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/trim.json
+ */
+model::Trim *LottieParserImpl::parseTrimObject()
+{
+ auto obj = allocator().make<model::Trim>();
+
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "nm")) {
+ obj->setName(GetString());
+ } else if (0 == strcmp(key, "s")) {
+ parseProperty(obj->mStart);
+ } else if (0 == strcmp(key, "e")) {
+ parseProperty(obj->mEnd);
+ } else if (0 == strcmp(key, "o")) {
+ parseProperty(obj->mOffset);
+ } else if (0 == strcmp(key, "m")) {
+ obj->mTrimType = getTrimType();
+ } else if (0 == strcmp(key, "hd")) {
+ obj->setHidden(GetBool());
+ } else {
+#ifdef DEBUG_PARSER
+ vDebug << "Trim property ignored :" << key;
+#endif
+ Skip(key);
+ }
+ }
+ obj->setStatic(obj->mStart.isStatic() && obj->mEnd.isStatic() &&
+ obj->mOffset.isStatic());
+ return obj;
+}
+
+void LottieParserImpl::getValue(model::Repeater::Transform &obj)
+{
+ EnterObject();
+
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "a")) {
+ parseProperty(obj.mAnchor);
+ } else if (0 == strcmp(key, "p")) {
+ parseProperty(obj.mPosition);
+ } else if (0 == strcmp(key, "r")) {
+ parseProperty(obj.mRotation);
+ } else if (0 == strcmp(key, "s")) {
+ parseProperty(obj.mScale);
+ } else if (0 == strcmp(key, "so")) {
+ parseProperty(obj.mStartOpacity);
+ } else if (0 == strcmp(key, "eo")) {
+ parseProperty(obj.mEndOpacity);
+ } else {
+ Skip(key);
+ }
+ }
+}
+
+model::Repeater *LottieParserImpl::parseReapeaterObject()
+{
+ auto obj = allocator().make<model::Repeater>();
+
+ obj->setContent(allocator().make<model::Group>());
+
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "nm")) {
+ obj->setName(GetString());
+ } else if (0 == strcmp(key, "c")) {
+ parseProperty(obj->mCopies);
+ float maxCopy = 0.0;
+ if (!obj->mCopies.isStatic()) {
+ for (auto &keyFrame : obj->mCopies.animation().frames_) {
+ if (maxCopy < keyFrame.value_.start_)
+ maxCopy = keyFrame.value_.start_;
+ if (maxCopy < keyFrame.value_.end_)
+ maxCopy = keyFrame.value_.end_;
+ }
+ } else {
+ maxCopy = obj->mCopies.value();
+ }
+ obj->mMaxCopies = maxCopy;
+ } else if (0 == strcmp(key, "o")) {
+ parseProperty(obj->mOffset);
+ } else if (0 == strcmp(key, "tr")) {
+ getValue(obj->mTransform);
+ } else if (0 == strcmp(key, "hd")) {
+ obj->setHidden(GetBool());
+ } else {
+#ifdef DEBUG_PARSER
+ vDebug << "Repeater property ignored :" << key;
+#endif
+ Skip(key);
+ }
+ }
+ obj->setStatic(obj->mCopies.isStatic() && obj->mOffset.isStatic() &&
+ obj->mTransform.isStatic());
+
+ return obj;
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/transform.json
+ */
+model::Transform *LottieParserImpl::parseTransformObject(bool ddd)
+{
+ auto objT = allocator().make<model::Transform>();
+
+ auto obj = allocator().make<model::Transform::Data>();
+ if (ddd) {
+ obj->createExtraData();
+ obj->mExtra->m3DData = true;
+ }
+
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "nm")) {
+ objT->setName(GetString());
+ } else if (0 == strcmp(key, "a")) {
+ parseProperty(obj->mAnchor);
+ } else if (0 == strcmp(key, "p")) {
+ EnterObject();
+ bool separate = false;
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "k")) {
+ parsePropertyHelper(obj->mPosition);
+ } else if (0 == strcmp(key, "s")) {
+ obj->createExtraData();
+ obj->mExtra->mSeparate = GetBool();
+ separate = true;
+ } else if (separate && (0 == strcmp(key, "x"))) {
+ parseProperty(obj->mExtra->mSeparateX);
+ } else if (separate && (0 == strcmp(key, "y"))) {
+ parseProperty(obj->mExtra->mSeparateY);
+ } else {
+ Skip(key);
+ }
+ }
+ } else if (0 == strcmp(key, "r")) {
+ parseProperty(obj->mRotation);
+ } else if (0 == strcmp(key, "s")) {
+ parseProperty(obj->mScale);
+ } else if (0 == strcmp(key, "o")) {
+ parseProperty(obj->mOpacity);
+ } else if (0 == strcmp(key, "hd")) {
+ objT->setHidden(GetBool());
+ } else if (0 == strcmp(key, "rx")) {
+ if (!obj->mExtra) return nullptr;
+ parseProperty(obj->mExtra->m3DRx);
+ } else if (0 == strcmp(key, "ry")) {
+ if (!obj->mExtra) return nullptr;
+ parseProperty(obj->mExtra->m3DRy);
+ } else if (0 == strcmp(key, "rz")) {
+ if (!obj->mExtra) return nullptr;
+ parseProperty(obj->mExtra->m3DRz);
+ } else {
+ Skip(key);
+ }
+ }
+ bool isStatic = obj->mAnchor.isStatic() && obj->mPosition.isStatic() &&
+ obj->mRotation.isStatic() && obj->mScale.isStatic() &&
+ obj->mOpacity.isStatic();
+ if (obj->mExtra) {
+ isStatic = isStatic && obj->mExtra->m3DRx.isStatic() &&
+ obj->mExtra->m3DRy.isStatic() &&
+ obj->mExtra->m3DRz.isStatic() &&
+ obj->mExtra->mSeparateX.isStatic() &&
+ obj->mExtra->mSeparateY.isStatic();
+ }
+
+ objT->set(obj, isStatic);
+
+ return objT;
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/fill.json
+ */
+model::Fill *LottieParserImpl::parseFillObject()
+{
+ auto obj = allocator().make<model::Fill>();
+
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "nm")) {
+ obj->setName(GetString());
+ } else if (0 == strcmp(key, "c")) {
+ parseProperty(obj->mColor);
+ } else if (0 == strcmp(key, "o")) {
+ parseProperty(obj->mOpacity);
+ } else if (0 == strcmp(key, "fillEnabled")) {
+ obj->mEnabled = GetBool();
+ } else if (0 == strcmp(key, "r")) {
+ obj->mFillRule = getFillRule();
+ } else if (0 == strcmp(key, "hd")) {
+ obj->setHidden(GetBool());
+ } else {
+#ifdef DEBUG_PARSER
+ vWarning << "Fill property skipped = " << key;
+#endif
+ Skip(key);
+ }
+ }
+ obj->setStatic(obj->mColor.isStatic() && obj->mOpacity.isStatic());
+
+ return obj;
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/helpers/lineCap.json
+ */
+CapStyle LottieParserImpl::getLineCap()
+{
+ switch (GetInt()) {
+ case 1:
+ return CapStyle::Flat;
+ break;
+ case 2:
+ return CapStyle::Round;
+ break;
+ default:
+ return CapStyle::Square;
+ break;
+ }
+}
+
+FillRule LottieParserImpl::getFillRule()
+{
+ switch (GetInt()) {
+ case 1:
+ return FillRule::Winding;
+ break;
+ case 2:
+ return FillRule::EvenOdd;
+ break;
+ default:
+ return FillRule::Winding;
+ break;
+ }
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/helpers/lineJoin.json
+ */
+JoinStyle LottieParserImpl::getLineJoin()
+{
+ switch (GetInt()) {
+ case 1:
+ return JoinStyle::Miter;
+ break;
+ case 2:
+ return JoinStyle::Round;
+ break;
+ default:
+ return JoinStyle::Bevel;
+ break;
+ }
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/stroke.json
+ */
+model::Stroke *LottieParserImpl::parseStrokeObject()
+{
+ auto obj = allocator().make<model::Stroke>();
+
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "nm")) {
+ obj->setName(GetString());
+ } else if (0 == strcmp(key, "c")) {
+ parseProperty(obj->mColor);
+ } else if (0 == strcmp(key, "o")) {
+ parseProperty(obj->mOpacity);
+ } else if (0 == strcmp(key, "w")) {
+ parseProperty(obj->mWidth);
+ } else if (0 == strcmp(key, "fillEnabled")) {
+ obj->mEnabled = GetBool();
+ } else if (0 == strcmp(key, "lc")) {
+ obj->mCapStyle = getLineCap();
+ } else if (0 == strcmp(key, "lj")) {
+ obj->mJoinStyle = getLineJoin();
+ } else if (0 == strcmp(key, "ml")) {
+ obj->mMiterLimit = GetDouble();
+ } else if (0 == strcmp(key, "d")) {
+ parseDashProperty(obj->mDash);
+ } else if (0 == strcmp(key, "hd")) {
+ obj->setHidden(GetBool());
+ } else {
+#ifdef DEBUG_PARSER
+ vWarning << "Stroke property skipped = " << key;
+#endif
+ Skip(key);
+ }
+ }
+ obj->setStatic(obj->mColor.isStatic() && obj->mOpacity.isStatic() &&
+ obj->mWidth.isStatic() && obj->mDash.isStatic());
+ return obj;
+}
+
+void LottieParserImpl::parseGradientProperty(model::Gradient *obj,
+ const char * key)
+{
+ if (0 == strcmp(key, "t")) {
+ obj->mGradientType = GetInt();
+ } else if (0 == strcmp(key, "o")) {
+ parseProperty(obj->mOpacity);
+ } else if (0 == strcmp(key, "s")) {
+ parseProperty(obj->mStartPoint);
+ } else if (0 == strcmp(key, "e")) {
+ parseProperty(obj->mEndPoint);
+ } else if (0 == strcmp(key, "h")) {
+ parseProperty(obj->mHighlightLength);
+ } else if (0 == strcmp(key, "a")) {
+ parseProperty(obj->mHighlightAngle);
+ } else if (0 == strcmp(key, "g")) {
+ EnterObject();
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "k")) {
+ parseProperty(obj->mGradient);
+ } else if (0 == strcmp(key, "p")) {
+ obj->mColorPoints = GetInt();
+ } else {
+ Skip(nullptr);
+ }
+ }
+ } else if (0 == strcmp(key, "hd")) {
+ obj->setHidden(GetBool());
+ } else {
+#ifdef DEBUG_PARSER
+ vWarning << "Gradient property skipped = " << key;
+#endif
+ Skip(key);
+ }
+ obj->setStatic(
+ obj->mOpacity.isStatic() && obj->mStartPoint.isStatic() &&
+ obj->mEndPoint.isStatic() && obj->mHighlightAngle.isStatic() &&
+ obj->mHighlightLength.isStatic() && obj->mGradient.isStatic());
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/gfill.json
+ */
+model::GradientFill *LottieParserImpl::parseGFillObject()
+{
+ auto obj = allocator().make<model::GradientFill>();
+
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "nm")) {
+ obj->setName(GetString());
+ } else if (0 == strcmp(key, "r")) {
+ obj->mFillRule = getFillRule();
+ } else {
+ parseGradientProperty(obj, key);
+ }
+ }
+ return obj;
+}
+
+void LottieParserImpl::parseDashProperty(model::Dash &dash)
+{
+ EnterArray();
+ while (NextArrayValue()) {
+ EnterObject();
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "v")) {
+ dash.mData.emplace_back();
+ parseProperty(dash.mData.back());
+ } else {
+ Skip(key);
+ }
+ }
+ }
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/gstroke.json
+ */
+model::GradientStroke *LottieParserImpl::parseGStrokeObject()
+{
+ auto obj = allocator().make<model::GradientStroke>();
+
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "nm")) {
+ obj->setName(GetString());
+ } else if (0 == strcmp(key, "w")) {
+ parseProperty(obj->mWidth);
+ } else if (0 == strcmp(key, "lc")) {
+ obj->mCapStyle = getLineCap();
+ } else if (0 == strcmp(key, "lj")) {
+ obj->mJoinStyle = getLineJoin();
+ } else if (0 == strcmp(key, "ml")) {
+ obj->mMiterLimit = GetDouble();
+ } else if (0 == strcmp(key, "d")) {
+ parseDashProperty(obj->mDash);
+ } else {
+ parseGradientProperty(obj, key);
+ }
+ }
+
+ obj->setStatic(obj->isStatic() && obj->mWidth.isStatic() &&
+ obj->mDash.isStatic());
+ return obj;
+}
+
+void LottieParserImpl::getValue(std::vector<VPointF> &v)
+{
+ EnterArray();
+ while (NextArrayValue()) {
+ EnterArray();
+ VPointF pt;
+ getValue(pt);
+ v.push_back(pt);
+ }
+}
+
+void LottieParserImpl::getValue(VPointF &pt)
+{
+ float val[4] = {0.f};
+ int i = 0;
+
+ if (PeekType() == kArrayType) EnterArray();
+
+ while (NextArrayValue()) {
+ const auto value = GetDouble();
+ if (i < 4) {
+ val[i++] = value;
+ }
+ }
+ pt.setX(val[0]);
+ pt.setY(val[1]);
+}
+
+void LottieParserImpl::getValue(float &val)
+{
+ if (PeekType() == kArrayType) {
+ EnterArray();
+ if (NextArrayValue()) val = GetDouble();
+ // discard rest
+ while (NextArrayValue()) {
+ GetDouble();
+ }
+ } else if (PeekType() == kNumberType) {
+ val = GetDouble();
+ } else {
+ Error();
+ }
+}
+
+void LottieParserImpl::getValue(model::Color &color)
+{
+ float val[4] = {0.f};
+ int i = 0;
+ if (PeekType() == kArrayType) EnterArray();
+
+ while (NextArrayValue()) {
+ const auto value = GetDouble();
+ if (i < 4) {
+ val[i++] = value;
+ }
+ }
+
+ if (mColorFilter) mColorFilter(val[0], val[1], val[2]);
+
+ color.r = val[0];
+ color.g = val[1];
+ color.b = val[2];
+}
+
+void LottieParserImpl::getValue(model::Gradient::Data &grad)
+{
+ if (PeekType() == kArrayType) EnterArray();
+
+ while (NextArrayValue()) {
+ grad.mGradient.push_back(GetDouble());
+ }
+}
+
+void LottieParserImpl::getValue(int &val)
+{
+ if (PeekType() == kArrayType) {
+ EnterArray();
+ while (NextArrayValue()) {
+ val = GetInt();
+ }
+ } else if (PeekType() == kNumberType) {
+ val = GetInt();
+ } else {
+ Error();
+ }
+}
+
+void LottieParserImpl::parsePathInfo()
+{
+ mPathInfo.reset();
+
+ /*
+ * The shape object could be wrapped by a array
+ * if its part of the keyframe object
+ */
+ bool arrayWrapper = (PeekType() == kArrayType);
+ if (arrayWrapper) EnterArray();
+
+ EnterObject();
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "i")) {
+ getValue(mPathInfo.mInPoint);
+ } else if (0 == strcmp(key, "o")) {
+ getValue(mPathInfo.mOutPoint);
+ } else if (0 == strcmp(key, "v")) {
+ getValue(mPathInfo.mVertices);
+ } else if (0 == strcmp(key, "c")) {
+ mPathInfo.mClosed = GetBool();
+ } else {
+ Error();
+ Skip(nullptr);
+ }
+ }
+ // exit properly from the array
+ if (arrayWrapper) NextArrayValue();
+
+ mPathInfo.convert();
+}
+
+void LottieParserImpl::getValue(model::PathData &obj)
+{
+ parsePathInfo();
+ obj.mPoints = mPathInfo.mResult;
+ obj.mClosed = mPathInfo.mClosed;
+}
+
+VPointF LottieParserImpl::parseInperpolatorPoint()
+{
+ VPointF cp;
+ EnterObject();
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "x")) {
+ getValue(cp.rx());
+ }
+ if (0 == strcmp(key, "y")) {
+ getValue(cp.ry());
+ }
+ }
+ return cp;
+}
+
+template <typename T>
+bool LottieParserImpl::parseKeyFrameValue(
+ const char *key, model::Value<T, model::Position> &value)
+{
+ if (0 == strcmp(key, "ti")) {
+ value.hasTangent_ = true;
+ getValue(value.inTangent_);
+ } else if (0 == strcmp(key, "to")) {
+ value.hasTangent_ = true;
+ getValue(value.outTangent_);
+ } else {
+ return false;
+ }
+ return true;
+}
+
+VInterpolator *LottieParserImpl::interpolator(VPointF inTangent,
+ VPointF outTangent,
+ std::string key)
+{
+ if (key.empty()) {
+ std::array<char, 20> temp;
+ snprintf(temp.data(), temp.size(), "%.2f_%.2f_%.2f_%.2f", inTangent.x(),
+ inTangent.y(), outTangent.x(), outTangent.y());
+ key = temp.data();
+ }
+
+ auto search = mInterpolatorCache.find(key);
+
+ if (search != mInterpolatorCache.end()) {
+ return search->second;
+ }
+
+ auto obj = allocator().make<VInterpolator>(outTangent, inTangent);
+ mInterpolatorCache[std::move(key)] = obj;
+ return obj;
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/properties/multiDimensionalKeyframed.json
+ */
+template <typename T, typename Tag>
+void LottieParserImpl::parseKeyFrame(model::KeyFrames<T, Tag> &obj)
+{
+ struct ParsedField {
+ std::string interpolatorKey;
+ bool interpolator{false};
+ bool value{false};
+ bool hold{false};
+ bool noEndValue{true};
+ };
+
+ EnterObject();
+ ParsedField parsed;
+ typename model::KeyFrames<T, Tag>::Frame keyframe;
+ VPointF inTangent;
+ VPointF outTangent;
+
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "i")) {
+ parsed.interpolator = true;
+ inTangent = parseInperpolatorPoint();
+ } else if (0 == strcmp(key, "o")) {
+ outTangent = parseInperpolatorPoint();
+ } else if (0 == strcmp(key, "t")) {
+ keyframe.start_ = GetDouble();
+ } else if (0 == strcmp(key, "s")) {
+ parsed.value = true;
+ getValue(keyframe.value_.start_);
+ continue;
+ } else if (0 == strcmp(key, "e")) {
+ parsed.noEndValue = false;
+ getValue(keyframe.value_.end_);
+ continue;
+ } else if (0 == strcmp(key, "n")) {
+ if (PeekType() == kStringType) {
+ parsed.interpolatorKey = GetStringObject();
+ } else {
+ EnterArray();
+ while (NextArrayValue()) {
+ if (parsed.interpolatorKey.empty()) {
+ parsed.interpolatorKey = GetStringObject();
+ } else {
+ // skip rest of the string
+ Skip(nullptr);
+ }
+ }
+ }
+ continue;
+ } else if (parseKeyFrameValue(key, keyframe.value_)) {
+ continue;
+ } else if (0 == strcmp(key, "h")) {
+ parsed.hold = GetInt();
+ continue;
+ } else {
+#ifdef DEBUG_PARSER
+ vDebug << "key frame property skipped = " << key;
+#endif
+ Skip(key);
+ }
+ }
+
+ auto &list = obj.frames_;
+ if (!list.empty()) {
+ // update the endFrame value of current keyframe
+ list.back().end_ = keyframe.start_;
+ // if no end value provided, copy start value to previous frame
+ if (parsed.value && parsed.noEndValue) {
+ list.back().value_.end_ = keyframe.value_.start_;
+ }
+ }
+
+ if (parsed.hold) {
+ keyframe.value_.end_ = keyframe.value_.start_;
+ keyframe.end_ = keyframe.start_;
+ list.push_back(std::move(keyframe));
+ } else if (parsed.interpolator) {
+ keyframe.interpolator_ = interpolator(
+ inTangent, outTangent, std::move(parsed.interpolatorKey));
+ list.push_back(std::move(keyframe));
+ } else {
+ // its the last frame discard.
+ }
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/properties/shapeKeyframed.json
+ */
+
+/*
+ * https://github.com/airbnb/lottie-web/blob/master/docs/json/properties/shape.json
+ */
+void LottieParserImpl::parseShapeProperty(model::Property<model::PathData> &obj)
+{
+ EnterObject();
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "k")) {
+ if (PeekType() == kArrayType) {
+ EnterArray();
+ while (NextArrayValue()) {
+ parseKeyFrame(obj.animation());
+ }
+ } else {
+ if (!obj.isStatic()) {
+ st_ = kError;
+ return;
+ }
+ getValue(obj.value());
+ }
+ } else {
+#ifdef DEBUG_PARSER
+ vDebug << "shape property ignored = " << key;
+#endif
+ Skip(nullptr);
+ }
+ }
+ obj.cache();
+}
+
+template <typename T, typename Tag>
+void LottieParserImpl::parsePropertyHelper(model::Property<T, Tag> &obj)
+{
+ if (PeekType() == kNumberType) {
+ if (!obj.isStatic()) {
+ st_ = kError;
+ return;
+ }
+ /*single value property with no animation*/
+ getValue(obj.value());
+ } else {
+ EnterArray();
+ while (NextArrayValue()) {
+ /* property with keyframe info*/
+ if (PeekType() == kObjectType) {
+ parseKeyFrame(obj.animation());
+ } else {
+ /* Read before modifying.
+ * as there is no way of knowing if the
+ * value of the array is either array of numbers
+ * or array of object without entering the array
+ * thats why this hack is there
+ */
+ if (!obj.isStatic()) {
+ st_ = kError;
+ return;
+ }
+ /*multi value property with no animation*/
+ getValue(obj.value());
+ /*break here as we already reached end of array*/
+ break;
+ }
+ }
+ obj.cache();
+ }
+}
+
+/*
+ * https://github.com/airbnb/lottie-web/tree/master/docs/json/properties
+ */
+template <typename T>
+void LottieParserImpl::parseProperty(model::Property<T> &obj)
+{
+ EnterObject();
+ while (const char *key = NextObjectKey()) {
+ if (0 == strcmp(key, "k")) {
+ parsePropertyHelper(obj);
+ } else {
+ Skip(key);
+ }
+ }
+}
+
+#ifdef LOTTIE_DUMP_TREE_SUPPORT
+
+class ObjectInspector {
+public:
+ void visit(model::Composition *obj, std::string level)
+ {
+ vDebug << " { " << level << "Composition:: a: " << !obj->isStatic()
+ << ", v: " << obj->mVersion << ", stFm: " << obj->startFrame()
+ << ", endFm: " << obj->endFrame()
+ << ", W: " << obj->size().width()
+ << ", H: " << obj->size().height() << "\n";
+ level.append("\t");
+ visit(obj->mRootLayer, level);
+ level.erase(level.end() - 1, level.end());
+ vDebug << " } " << level << "Composition End\n";
+ }
+ void visit(model::Layer *obj, std::string level)
+ {
+ vDebug << level << "{ " << layerType(obj->mLayerType)
+ << ", name: " << obj->name() << ", id:" << obj->mId
+ << " Pid:" << obj->mParentId << ", a:" << !obj->isStatic()
+ << ", " << matteType(obj->mMatteType)
+ << ", mask:" << obj->hasMask() << ", inFm:" << obj->mInFrame
+ << ", outFm:" << obj->mOutFrame << ", stFm:" << obj->mStartFrame
+ << ", ts:" << obj->mTimeStreatch << ", ao:" << obj->autoOrient()
+ << ", W:" << obj->layerSize().width()
+ << ", H:" << obj->layerSize().height();
+
+ if (obj->mLayerType == model::Layer::Type::Image)
+ vDebug << level << "\t{ "
+ << "ImageInfo:"
+ << " W :" << obj->extra()->mAsset->mWidth
+ << ", H :" << obj->extra()->mAsset->mHeight << " }"
+ << "\n";
+ else {
+ vDebug << level;
+ }
+ visitChildren(static_cast<model::Group *>(obj), level);
+ vDebug << level << "} " << layerType(obj->mLayerType).c_str()
+ << ", id: " << obj->mId << "\n";
+ }
+ void visitChildren(model::Group *obj, std::string level)
+ {
+ level.append("\t");
+ for (const auto &child : obj->mChildren) visit(child, level);
+ if (obj->mTransform) visit(obj->mTransform, level);
+ }
+
+ void visit(model::Object *obj, std::string level)
+ {
+ switch (obj->type()) {
+ case model::Object::Type::Repeater: {
+ auto r = static_cast<model::Repeater *>(obj);
+ vDebug << level << "{ Repeater: name: " << obj->name()
+ << " , a:" << !obj->isStatic()
+ << ", copies:" << r->maxCopies()
+ << ", offset:" << r->offset(0);
+ visitChildren(r->mContent, level);
+ vDebug << level << "} Repeater";
+ break;
+ }
+ case model::Object::Type::Group: {
+ vDebug << level << "{ Group: name: " << obj->name()
+ << " , a:" << !obj->isStatic();
+ visitChildren(static_cast<model::Group *>(obj), level);
+ vDebug << level << "} Group";
+ break;
+ }
+ case model::Object::Type::Layer: {
+ visit(static_cast<model::Layer *>(obj), level);
+ break;
+ }
+ case model::Object::Type::Trim: {
+ vDebug << level << "{ Trim: name: " << obj->name()
+ << " , a:" << !obj->isStatic() << " }";
+ break;
+ }
+ case model::Object::Type::Rect: {
+ vDebug << level << "{ Rect: name: " << obj->name()
+ << " , a:" << !obj->isStatic() << " }";
+ break;
+ }
+ case model::Object::Type::RoundedCorner: {
+ vDebug << level << "{ RoundedCorner: name: " << obj->name()
+ << " , a:" << !obj->isStatic() << " }";
+ break;
+ }
+ case model::Object::Type::Ellipse: {
+ vDebug << level << "{ Ellipse: name: " << obj->name()
+ << " , a:" << !obj->isStatic() << " }";
+ break;
+ }
+ case model::Object::Type::Path: {
+ vDebug << level << "{ Shape: name: " << obj->name()
+ << " , a:" << !obj->isStatic() << " }";
+ break;
+ }
+ case model::Object::Type::Polystar: {
+ vDebug << level << "{ Polystar: name: " << obj->name()
+ << " , a:" << !obj->isStatic() << " }";
+ break;
+ }
+ case model::Object::Type::Transform: {
+ vDebug << level << "{ Transform: name: " << obj->name()
+ << " , a: " << !obj->isStatic() << " }";
+ break;
+ }
+ case model::Object::Type::Stroke: {
+ vDebug << level << "{ Stroke: name: " << obj->name()
+ << " , a:" << !obj->isStatic() << " }";
+ break;
+ }
+ case model::Object::Type::GStroke: {
+ vDebug << level << "{ GStroke: name: " << obj->name()
+ << " , a:" << !obj->isStatic() << " }";
+ break;
+ }
+ case model::Object::Type::Fill: {
+ vDebug << level << "{ Fill: name: " << obj->name()
+ << " , a:" << !obj->isStatic() << " }";
+ break;
+ }
+ case model::Object::Type::GFill: {
+ auto f = static_cast<model::GradientFill *>(obj);
+ vDebug << level << "{ GFill: name: " << obj->name()
+ << " , a:" << !f->isStatic() << ", ty:" << f->mGradientType
+ << ", s:" << f->mStartPoint.value(0)
+ << ", e:" << f->mEndPoint.value(0) << " }";
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ std::string matteType(model::MatteType type)
+ {
+ switch (type) {
+ case model::MatteType::None:
+ return "Matte::None";
+ break;
+ case model::MatteType::Alpha:
+ return "Matte::Alpha";
+ break;
+ case model::MatteType::AlphaInv:
+ return "Matte::AlphaInv";
+ break;
+ case model::MatteType::Luma:
+ return "Matte::Luma";
+ break;
+ case model::MatteType::LumaInv:
+ return "Matte::LumaInv";
+ break;
+ default:
+ return "Matte::Unknown";
+ break;
+ }
+ }
+ std::string layerType(model::Layer::Type type)
+ {
+ switch (type) {
+ case model::Layer::Type::Precomp:
+ return "Layer::Precomp";
+ break;
+ case model::Layer::Type::Null:
+ return "Layer::Null";
+ break;
+ case model::Layer::Type::Shape:
+ return "Layer::Shape";
+ break;
+ case model::Layer::Type::Solid:
+ return "Layer::Solid";
+ break;
+ case model::Layer::Type::Image:
+ return "Layer::Image";
+ break;
+ case model::Layer::Type::Text:
+ return "Layer::Text";
+ break;
+ default:
+ return "Layer::Unknown";
+ break;
+ }
+ }
+};
+
+#endif
+
+std::shared_ptr<model::Composition> model::parse(char * str,
+ std::string dir_path,
+ model::ColorFilter filter)
+{
+ LottieParserImpl obj(str, std::move(dir_path), std::move(filter));
+
+ if (obj.VerifyType()) {
+ obj.parseComposition();
+ auto composition = obj.composition();
+ if (composition) {
+ composition->processRepeaterObjects();
+ composition->updateStats();
+
+#ifdef LOTTIE_DUMP_TREE_SUPPORT
+ ObjectInspector inspector;
+ inspector.visit(composition.get(), "");
+#endif
+
+ return composition;
+ }
+ }
+
+ vWarning << "Input data is not Lottie format!";
+ return {};
+}
+
+RAPIDJSON_DIAG_POP
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottieproxymodel.cpp b/vendor/github.com/Benau/go_rlottie/lottie_lottieproxymodel.cpp
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_lottieproxymodel.cpp
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_allocators.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_allocators.h
new file mode 100644
index 00000000..20109a19
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_allocators.h
@@ -0,0 +1,284 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_ALLOCATORS_H_
+#define RAPIDJSON_ALLOCATORS_H_
+
+#include "lottie_rapidjson_rapidjson.h"
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+///////////////////////////////////////////////////////////////////////////////
+// Allocator
+
+/*! \class rapidjson::Allocator
+ \brief Concept for allocating, resizing and freeing memory block.
+
+ Note that Malloc() and Realloc() are non-static but Free() is static.
+
+ So if an allocator need to support Free(), it needs to put its pointer in
+ the header of memory block.
+
+\code
+concept Allocator {
+ static const bool kNeedFree; //!< Whether this allocator needs to call Free().
+
+ // Allocate a memory block.
+ // \param size of the memory block in bytes.
+ // \returns pointer to the memory block.
+ void* Malloc(size_t size);
+
+ // Resize a memory block.
+ // \param originalPtr The pointer to current memory block. Null pointer is permitted.
+ // \param originalSize The current size in bytes. (Design issue: since some allocator may not book-keep this, explicitly pass to it can save memory.)
+ // \param newSize the new size in bytes.
+ void* Realloc(void* originalPtr, size_t originalSize, size_t newSize);
+
+ // Free a memory block.
+ // \param pointer to the memory block. Null pointer is permitted.
+ static void Free(void *ptr);
+};
+\endcode
+*/
+
+
+/*! \def RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY
+ \ingroup RAPIDJSON_CONFIG
+ \brief User-defined kDefaultChunkCapacity definition.
+
+ User can define this as any \c size that is a power of 2.
+*/
+
+#ifndef RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY
+#define RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY (64 * 1024)
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// CrtAllocator
+
+//! C-runtime library allocator.
+/*! This class is just wrapper for standard C library memory routines.
+ \note implements Allocator concept
+*/
+class CrtAllocator {
+public:
+ static const bool kNeedFree = true;
+ void* Malloc(size_t size) {
+ if (size) // behavior of malloc(0) is implementation defined.
+ return RAPIDJSON_MALLOC(size);
+ else
+ return NULL; // standardize to returning NULL.
+ }
+ void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) {
+ (void)originalSize;
+ if (newSize == 0) {
+ RAPIDJSON_FREE(originalPtr);
+ return NULL;
+ }
+ return RAPIDJSON_REALLOC(originalPtr, newSize);
+ }
+ static void Free(void *ptr) { RAPIDJSON_FREE(ptr); }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// MemoryPoolAllocator
+
+//! Default memory allocator used by the parser and DOM.
+/*! This allocator allocate memory blocks from pre-allocated memory chunks.
+
+ It does not free memory blocks. And Realloc() only allocate new memory.
+
+ The memory chunks are allocated by BaseAllocator, which is CrtAllocator by default.
+
+ User may also supply a buffer as the first chunk.
+
+ If the user-buffer is full then additional chunks are allocated by BaseAllocator.
+
+ The user-buffer is not deallocated by this allocator.
+
+ \tparam BaseAllocator the allocator type for allocating memory chunks. Default is CrtAllocator.
+ \note implements Allocator concept
+*/
+template <typename BaseAllocator = CrtAllocator>
+class MemoryPoolAllocator {
+public:
+ static const bool kNeedFree = false; //!< Tell users that no need to call Free() with this allocator. (concept Allocator)
+
+ //! Constructor with chunkSize.
+ /*! \param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
+ \param baseAllocator The allocator for allocating memory chunks.
+ */
+ MemoryPoolAllocator(size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
+ chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(0), baseAllocator_(baseAllocator), ownBaseAllocator_(0)
+ {
+ }
+
+ //! Constructor with user-supplied buffer.
+ /*! The user buffer will be used firstly. When it is full, memory pool allocates new chunk with chunk size.
+
+ The user buffer will not be deallocated when this allocator is destructed.
+
+ \param buffer User supplied buffer.
+ \param size Size of the buffer in bytes. It must at least larger than sizeof(ChunkHeader).
+ \param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
+ \param baseAllocator The allocator for allocating memory chunks.
+ */
+ MemoryPoolAllocator(void *buffer, size_t size, size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
+ chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(buffer), baseAllocator_(baseAllocator), ownBaseAllocator_(0)
+ {
+ RAPIDJSON_ASSERT(buffer != 0);
+ RAPIDJSON_ASSERT(size > sizeof(ChunkHeader));
+ chunkHead_ = reinterpret_cast<ChunkHeader*>(buffer);
+ chunkHead_->capacity = size - sizeof(ChunkHeader);
+ chunkHead_->size = 0;
+ chunkHead_->next = 0;
+ }
+
+ //! Destructor.
+ /*! This deallocates all memory chunks, excluding the user-supplied buffer.
+ */
+ ~MemoryPoolAllocator() {
+ Clear();
+ RAPIDJSON_DELETE(ownBaseAllocator_);
+ }
+
+ //! Deallocates all memory chunks, excluding the user-supplied buffer.
+ void Clear() {
+ while (chunkHead_ && chunkHead_ != userBuffer_) {
+ ChunkHeader* next = chunkHead_->next;
+ baseAllocator_->Free(chunkHead_);
+ chunkHead_ = next;
+ }
+ if (chunkHead_ && chunkHead_ == userBuffer_)
+ chunkHead_->size = 0; // Clear user buffer
+ }
+
+ //! Computes the total capacity of allocated memory chunks.
+ /*! \return total capacity in bytes.
+ */
+ size_t Capacity() const {
+ size_t capacity = 0;
+ for (ChunkHeader* c = chunkHead_; c != 0; c = c->next)
+ capacity += c->capacity;
+ return capacity;
+ }
+
+ //! Computes the memory blocks allocated.
+ /*! \return total used bytes.
+ */
+ size_t Size() const {
+ size_t size = 0;
+ for (ChunkHeader* c = chunkHead_; c != 0; c = c->next)
+ size += c->size;
+ return size;
+ }
+
+ //! Allocates a memory block. (concept Allocator)
+ void* Malloc(size_t size) {
+ if (!size)
+ return NULL;
+
+ size = RAPIDJSON_ALIGN(size);
+ if (chunkHead_ == 0 || chunkHead_->size + size > chunkHead_->capacity)
+ if (!AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size))
+ return NULL;
+
+ void *buffer = reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size;
+ chunkHead_->size += size;
+ return buffer;
+ }
+
+ //! Resizes a memory block (concept Allocator)
+ void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) {
+ if (originalPtr == 0)
+ return Malloc(newSize);
+
+ if (newSize == 0)
+ return NULL;
+
+ originalSize = RAPIDJSON_ALIGN(originalSize);
+ newSize = RAPIDJSON_ALIGN(newSize);
+
+ // Do not shrink if new size is smaller than original
+ if (originalSize >= newSize)
+ return originalPtr;
+
+ // Simply expand it if it is the last allocation and there is sufficient space
+ if (originalPtr == reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size - originalSize) {
+ size_t increment = static_cast<size_t>(newSize - originalSize);
+ if (chunkHead_->size + increment <= chunkHead_->capacity) {
+ chunkHead_->size += increment;
+ return originalPtr;
+ }
+ }
+
+ // Realloc process: allocate and copy memory, do not free original buffer.
+ if (void* newBuffer = Malloc(newSize)) {
+ if (originalSize)
+ std::memcpy(newBuffer, originalPtr, originalSize);
+ return newBuffer;
+ }
+ else
+ return NULL;
+ }
+
+ //! Frees a memory block (concept Allocator)
+ static void Free(void *ptr) { (void)ptr; } // Do nothing
+
+private:
+ //! Copy constructor is not permitted.
+ MemoryPoolAllocator(const MemoryPoolAllocator& rhs) /* = delete */;
+ //! Copy assignment operator is not permitted.
+ MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) /* = delete */;
+
+ //! Creates a new chunk.
+ /*! \param capacity Capacity of the chunk in bytes.
+ \return true if success.
+ */
+ bool AddChunk(size_t capacity) {
+ if (!baseAllocator_)
+ ownBaseAllocator_ = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator)();
+ if (ChunkHeader* chunk = reinterpret_cast<ChunkHeader*>(baseAllocator_->Malloc(RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + capacity))) {
+ chunk->capacity = capacity;
+ chunk->size = 0;
+ chunk->next = chunkHead_;
+ chunkHead_ = chunk;
+ return true;
+ }
+ else
+ return false;
+ }
+
+ static const int kDefaultChunkCapacity = RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY; //!< Default chunk capacity.
+
+ //! Chunk header for perpending to each chunk.
+ /*! Chunks are stored as a singly linked list.
+ */
+ struct ChunkHeader {
+ size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself).
+ size_t size; //!< Current size of allocated memory in bytes.
+ ChunkHeader *next; //!< Next chunk in the linked list.
+ };
+
+ ChunkHeader *chunkHead_; //!< Head of the chunk linked-list. Only the head chunk serves allocation.
+ size_t chunk_capacity_; //!< The minimum capacity of chunk when they are allocated.
+ void *userBuffer_; //!< User supplied buffer.
+ BaseAllocator* baseAllocator_; //!< base allocator for allocating memory chunks.
+ BaseAllocator* ownBaseAllocator_; //!< base allocator created by this object.
+};
+
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_ENCODINGS_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_cursorstreamwrapper.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_cursorstreamwrapper.h
new file mode 100644
index 00000000..36d8e4c6
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_cursorstreamwrapper.h
@@ -0,0 +1,78 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_CURSORSTREAMWRAPPER_H_
+#define RAPIDJSON_CURSORSTREAMWRAPPER_H_
+
+#include "lottie_rapidjson_stream.h"
+
+#if defined(__GNUC__)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+
+#if defined(_MSC_VER) && _MSC_VER <= 1800
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(4702) // unreachable code
+RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+
+//! Cursor stream wrapper for counting line and column number if error exists.
+/*!
+ \tparam InputStream Any stream that implements Stream Concept
+*/
+template <typename InputStream, typename Encoding = UTF8<> >
+class CursorStreamWrapper : public GenericStreamWrapper<InputStream, Encoding> {
+public:
+ typedef typename Encoding::Ch Ch;
+
+ CursorStreamWrapper(InputStream& is):
+ GenericStreamWrapper<InputStream, Encoding>(is), line_(1), col_(0) {}
+
+ // counting line and column number
+ Ch Take() {
+ Ch ch = this->is_.Take();
+ if(ch == '\n') {
+ line_ ++;
+ col_ = 0;
+ } else {
+ col_ ++;
+ }
+ return ch;
+ }
+
+ //! Get the error line number, if error exists.
+ size_t GetLine() const { return line_; }
+ //! Get the error column number, if error exists.
+ size_t GetColumn() const { return col_; }
+
+private:
+ size_t line_; //!< Current Line
+ size_t col_; //!< Current Column
+};
+
+#if defined(_MSC_VER) && _MSC_VER <= 1800
+RAPIDJSON_DIAG_POP
+#endif
+
+#if defined(__GNUC__)
+RAPIDJSON_DIAG_POP
+#endif
+
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_CURSORSTREAMWRAPPER_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_document.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_document.h
new file mode 100644
index 00000000..09cbf454
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_document.h
@@ -0,0 +1,2732 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_DOCUMENT_H_
+#define RAPIDJSON_DOCUMENT_H_
+
+/*! \file document.h */
+
+#include "lottie_rapidjson_reader.h"
+#include "lottie_rapidjson_internal_meta.h"
+#include "lottie_rapidjson_internal_strfunc.h"
+#include "lottie_rapidjson_memorystream.h"
+#include "lottie_rapidjson_encodedstream.h"
+#include <new> // placement new
+#include <limits>
+#ifdef __cpp_lib_three_way_comparison
+#include <compare>
+#endif
+
+RAPIDJSON_DIAG_PUSH
+#ifdef __clang__
+RAPIDJSON_DIAG_OFF(padded)
+RAPIDJSON_DIAG_OFF(switch-enum)
+RAPIDJSON_DIAG_OFF(c++98-compat)
+#elif defined(_MSC_VER)
+RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant
+RAPIDJSON_DIAG_OFF(4244) // conversion from kXxxFlags to 'uint16_t', possible loss of data
+#endif
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_OFF(effc++)
+#endif // __GNUC__
+
+#ifndef RAPIDJSON_NOMEMBERITERATORCLASS
+#include <iterator> // std::random_access_iterator_tag
+#endif
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+#include <utility> // std::move
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+// Forward declaration.
+template <typename Encoding, typename Allocator>
+class GenericValue;
+
+template <typename Encoding, typename Allocator, typename StackAllocator>
+class GenericDocument;
+
+/*! \def RAPIDJSON_DEFAULT_ALLOCATOR
+ \ingroup RAPIDJSON_CONFIG
+ \brief Allows to choose default allocator.
+
+ User can define this to use CrtAllocator or MemoryPoolAllocator.
+*/
+#ifndef RAPIDJSON_DEFAULT_ALLOCATOR
+#define RAPIDJSON_DEFAULT_ALLOCATOR MemoryPoolAllocator<CrtAllocator>
+#endif
+
+/*! \def RAPIDJSON_DEFAULT_STACK_ALLOCATOR
+ \ingroup RAPIDJSON_CONFIG
+ \brief Allows to choose default stack allocator for Document.
+
+ User can define this to use CrtAllocator or MemoryPoolAllocator.
+*/
+#ifndef RAPIDJSON_DEFAULT_STACK_ALLOCATOR
+#define RAPIDJSON_DEFAULT_STACK_ALLOCATOR CrtAllocator
+#endif
+
+/*! \def RAPIDJSON_VALUE_DEFAULT_OBJECT_CAPACITY
+ \ingroup RAPIDJSON_CONFIG
+ \brief User defined kDefaultObjectCapacity value.
+
+ User can define this as any natural number.
+*/
+#ifndef RAPIDJSON_VALUE_DEFAULT_OBJECT_CAPACITY
+// number of objects that rapidjson::Value allocates memory for by default
+#define RAPIDJSON_VALUE_DEFAULT_OBJECT_CAPACITY 16
+#endif
+
+/*! \def RAPIDJSON_VALUE_DEFAULT_ARRAY_CAPACITY
+ \ingroup RAPIDJSON_CONFIG
+ \brief User defined kDefaultArrayCapacity value.
+
+ User can define this as any natural number.
+*/
+#ifndef RAPIDJSON_VALUE_DEFAULT_ARRAY_CAPACITY
+// number of array elements that rapidjson::Value allocates memory for by default
+#define RAPIDJSON_VALUE_DEFAULT_ARRAY_CAPACITY 16
+#endif
+
+//! Name-value pair in a JSON object value.
+/*!
+ This class was internal to GenericValue. It used to be a inner struct.
+ But a compiler (IBM XL C/C++ for AIX) have reported to have problem with that so it moved as a namespace scope struct.
+ https://code.google.com/p/rapidjson/issues/detail?id=64
+*/
+template <typename Encoding, typename Allocator>
+class GenericMember {
+public:
+ GenericValue<Encoding, Allocator> name; //!< name of member (must be a string)
+ GenericValue<Encoding, Allocator> value; //!< value of member.
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ //! Move constructor in C++11
+ GenericMember(GenericMember&& rhs) RAPIDJSON_NOEXCEPT
+ : name(std::move(rhs.name)),
+ value(std::move(rhs.value))
+ {
+ }
+
+ //! Move assignment in C++11
+ GenericMember& operator=(GenericMember&& rhs) RAPIDJSON_NOEXCEPT {
+ return *this = static_cast<GenericMember&>(rhs);
+ }
+#endif
+
+ //! Assignment with move semantics.
+ /*! \param rhs Source of the assignment. Its name and value will become a null value after assignment.
+ */
+ GenericMember& operator=(GenericMember& rhs) RAPIDJSON_NOEXCEPT {
+ if (RAPIDJSON_LIKELY(this != &rhs)) {
+ name = rhs.name;
+ value = rhs.value;
+ }
+ return *this;
+ }
+
+ // swap() for std::sort() and other potential use in STL.
+ friend inline void swap(GenericMember& a, GenericMember& b) RAPIDJSON_NOEXCEPT {
+ a.name.Swap(b.name);
+ a.value.Swap(b.value);
+ }
+
+private:
+ //! Copy constructor is not permitted.
+ GenericMember(const GenericMember& rhs);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// GenericMemberIterator
+
+#ifndef RAPIDJSON_NOMEMBERITERATORCLASS
+
+//! (Constant) member iterator for a JSON object value
+/*!
+ \tparam Const Is this a constant iterator?
+ \tparam Encoding Encoding of the value. (Even non-string values need to have the same encoding in a document)
+ \tparam Allocator Allocator type for allocating memory of object, array and string.
+
+ This class implements a Random Access Iterator for GenericMember elements
+ of a GenericValue, see ISO/IEC 14882:2003(E) C++ standard, 24.1 [lib.iterator.requirements].
+
+ \note This iterator implementation is mainly intended to avoid implicit
+ conversions from iterator values to \c NULL,
+ e.g. from GenericValue::FindMember.
+
+ \note Define \c RAPIDJSON_NOMEMBERITERATORCLASS to fall back to a
+ pointer-based implementation, if your platform doesn't provide
+ the C++ <iterator> header.
+
+ \see GenericMember, GenericValue::MemberIterator, GenericValue::ConstMemberIterator
+ */
+template <bool Const, typename Encoding, typename Allocator>
+class GenericMemberIterator {
+
+ friend class GenericValue<Encoding,Allocator>;
+ template <bool, typename, typename> friend class GenericMemberIterator;
+
+ typedef GenericMember<Encoding,Allocator> PlainType;
+ typedef typename internal::MaybeAddConst<Const,PlainType>::Type ValueType;
+
+public:
+ //! Iterator type itself
+ typedef GenericMemberIterator Iterator;
+ //! Constant iterator type
+ typedef GenericMemberIterator<true,Encoding,Allocator> ConstIterator;
+ //! Non-constant iterator type
+ typedef GenericMemberIterator<false,Encoding,Allocator> NonConstIterator;
+
+ /** \name std::iterator_traits support */
+ //@{
+ typedef ValueType value_type;
+ typedef ValueType * pointer;
+ typedef ValueType & reference;
+ typedef std::ptrdiff_t difference_type;
+ typedef std::random_access_iterator_tag iterator_category;
+ //@}
+
+ //! Pointer to (const) GenericMember
+ typedef pointer Pointer;
+ //! Reference to (const) GenericMember
+ typedef reference Reference;
+ //! Signed integer type (e.g. \c ptrdiff_t)
+ typedef difference_type DifferenceType;
+
+ //! Default constructor (singular value)
+ /*! Creates an iterator pointing to no element.
+ \note All operations, except for comparisons, are undefined on such values.
+ */
+ GenericMemberIterator() : ptr_() {}
+
+ //! Iterator conversions to more const
+ /*!
+ \param it (Non-const) iterator to copy from
+
+ Allows the creation of an iterator from another GenericMemberIterator
+ that is "less const". Especially, creating a non-constant iterator
+ from a constant iterator are disabled:
+ \li const -> non-const (not ok)
+ \li const -> const (ok)
+ \li non-const -> const (ok)
+ \li non-const -> non-const (ok)
+
+ \note If the \c Const template parameter is already \c false, this
+ constructor effectively defines a regular copy-constructor.
+ Otherwise, the copy constructor is implicitly defined.
+ */
+ GenericMemberIterator(const NonConstIterator & it) : ptr_(it.ptr_) {}
+ Iterator& operator=(const NonConstIterator & it) { ptr_ = it.ptr_; return *this; }
+
+ //! @name stepping
+ //@{
+ Iterator& operator++(){ ++ptr_; return *this; }
+ Iterator& operator--(){ --ptr_; return *this; }
+ Iterator operator++(int){ Iterator old(*this); ++ptr_; return old; }
+ Iterator operator--(int){ Iterator old(*this); --ptr_; return old; }
+ //@}
+
+ //! @name increment/decrement
+ //@{
+ Iterator operator+(DifferenceType n) const { return Iterator(ptr_+n); }
+ Iterator operator-(DifferenceType n) const { return Iterator(ptr_-n); }
+
+ Iterator& operator+=(DifferenceType n) { ptr_+=n; return *this; }
+ Iterator& operator-=(DifferenceType n) { ptr_-=n; return *this; }
+ //@}
+
+ //! @name relations
+ //@{
+ template <bool Const_> bool operator==(const GenericMemberIterator<Const_, Encoding, Allocator>& that) const { return ptr_ == that.ptr_; }
+ template <bool Const_> bool operator!=(const GenericMemberIterator<Const_, Encoding, Allocator>& that) const { return ptr_ != that.ptr_; }
+ template <bool Const_> bool operator<=(const GenericMemberIterator<Const_, Encoding, Allocator>& that) const { return ptr_ <= that.ptr_; }
+ template <bool Const_> bool operator>=(const GenericMemberIterator<Const_, Encoding, Allocator>& that) const { return ptr_ >= that.ptr_; }
+ template <bool Const_> bool operator< (const GenericMemberIterator<Const_, Encoding, Allocator>& that) const { return ptr_ < that.ptr_; }
+ template <bool Const_> bool operator> (const GenericMemberIterator<Const_, Encoding, Allocator>& that) const { return ptr_ > that.ptr_; }
+
+#ifdef __cpp_lib_three_way_comparison
+ template <bool Const_> std::strong_ordering operator<=>(const GenericMemberIterator<Const_, Encoding, Allocator>& that) const { return ptr_ <=> that.ptr_; }
+#endif
+ //@}
+
+ //! @name dereference
+ //@{
+ Reference operator*() const { return *ptr_; }
+ Pointer operator->() const { return ptr_; }
+ Reference operator[](DifferenceType n) const { return ptr_[n]; }
+ //@}
+
+ //! Distance
+ DifferenceType operator-(ConstIterator that) const { return ptr_-that.ptr_; }
+
+private:
+ //! Internal constructor from plain pointer
+ explicit GenericMemberIterator(Pointer p) : ptr_(p) {}
+
+ Pointer ptr_; //!< raw pointer
+};
+
+#else // RAPIDJSON_NOMEMBERITERATORCLASS
+
+// class-based member iterator implementation disabled, use plain pointers
+
+template <bool Const, typename Encoding, typename Allocator>
+class GenericMemberIterator;
+
+//! non-const GenericMemberIterator
+template <typename Encoding, typename Allocator>
+class GenericMemberIterator<false,Encoding,Allocator> {
+ //! use plain pointer as iterator type
+ typedef GenericMember<Encoding,Allocator>* Iterator;
+};
+//! const GenericMemberIterator
+template <typename Encoding, typename Allocator>
+class GenericMemberIterator<true,Encoding,Allocator> {
+ //! use plain const pointer as iterator type
+ typedef const GenericMember<Encoding,Allocator>* Iterator;
+};
+
+#endif // RAPIDJSON_NOMEMBERITERATORCLASS
+
+///////////////////////////////////////////////////////////////////////////////
+// GenericStringRef
+
+//! Reference to a constant string (not taking a copy)
+/*!
+ \tparam CharType character type of the string
+
+ This helper class is used to automatically infer constant string
+ references for string literals, especially from \c const \b (!)
+ character arrays.
+
+ The main use is for creating JSON string values without copying the
+ source string via an \ref Allocator. This requires that the referenced
+ string pointers have a sufficient lifetime, which exceeds the lifetime
+ of the associated GenericValue.
+
+ \b Example
+ \code
+ Value v("foo"); // ok, no need to copy & calculate length
+ const char foo[] = "foo";
+ v.SetString(foo); // ok
+
+ const char* bar = foo;
+ // Value x(bar); // not ok, can't rely on bar's lifetime
+ Value x(StringRef(bar)); // lifetime explicitly guaranteed by user
+ Value y(StringRef(bar, 3)); // ok, explicitly pass length
+ \endcode
+
+ \see StringRef, GenericValue::SetString
+*/
+template<typename CharType>
+struct GenericStringRef {
+ typedef CharType Ch; //!< character type of the string
+
+ //! Create string reference from \c const character array
+#ifndef __clang__ // -Wdocumentation
+ /*!
+ This constructor implicitly creates a constant string reference from
+ a \c const character array. It has better performance than
+ \ref StringRef(const CharType*) by inferring the string \ref length
+ from the array length, and also supports strings containing null
+ characters.
+
+ \tparam N length of the string, automatically inferred
+
+ \param str Constant character array, lifetime assumed to be longer
+ than the use of the string in e.g. a GenericValue
+
+ \post \ref s == str
+
+ \note Constant complexity.
+ \note There is a hidden, private overload to disallow references to
+ non-const character arrays to be created via this constructor.
+ By this, e.g. function-scope arrays used to be filled via
+ \c snprintf are excluded from consideration.
+ In such cases, the referenced string should be \b copied to the
+ GenericValue instead.
+ */
+#endif
+ template<SizeType N>
+ GenericStringRef(const CharType (&str)[N]) RAPIDJSON_NOEXCEPT
+ : s(str), length(N-1) {}
+
+ //! Explicitly create string reference from \c const character pointer
+#ifndef __clang__ // -Wdocumentation
+ /*!
+ This constructor can be used to \b explicitly create a reference to
+ a constant string pointer.
+
+ \see StringRef(const CharType*)
+
+ \param str Constant character pointer, lifetime assumed to be longer
+ than the use of the string in e.g. a GenericValue
+
+ \post \ref s == str
+
+ \note There is a hidden, private overload to disallow references to
+ non-const character arrays to be created via this constructor.
+ By this, e.g. function-scope arrays used to be filled via
+ \c snprintf are excluded from consideration.
+ In such cases, the referenced string should be \b copied to the
+ GenericValue instead.
+ */
+#endif
+ explicit GenericStringRef(const CharType* str)
+ : s(str), length(NotNullStrLen(str)) {}
+
+ //! Create constant string reference from pointer and length
+#ifndef __clang__ // -Wdocumentation
+ /*! \param str constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue
+ \param len length of the string, excluding the trailing NULL terminator
+
+ \post \ref s == str && \ref length == len
+ \note Constant complexity.
+ */
+#endif
+ GenericStringRef(const CharType* str, SizeType len)
+ : s(RAPIDJSON_LIKELY(str) ? str : emptyString), length(len) { RAPIDJSON_ASSERT(str != 0 || len == 0u); }
+
+ GenericStringRef(const GenericStringRef& rhs) : s(rhs.s), length(rhs.length) {}
+
+ //! implicit conversion to plain CharType pointer
+ operator const Ch *() const { return s; }
+
+ const Ch* const s; //!< plain CharType pointer
+ const SizeType length; //!< length of the string (excluding the trailing NULL terminator)
+
+private:
+ SizeType NotNullStrLen(const CharType* str) {
+ RAPIDJSON_ASSERT(str != 0);
+ return internal::StrLen(str);
+ }
+
+ /// Empty string - used when passing in a NULL pointer
+ static const Ch emptyString[];
+
+ //! Disallow construction from non-const array
+ template<SizeType N>
+ GenericStringRef(CharType (&str)[N]) /* = delete */;
+ //! Copy assignment operator not permitted - immutable type
+ GenericStringRef& operator=(const GenericStringRef& rhs) /* = delete */;
+};
+
+template<typename CharType>
+const CharType GenericStringRef<CharType>::emptyString[] = { CharType() };
+
+//! Mark a character pointer as constant string
+/*! Mark a plain character pointer as a "string literal". This function
+ can be used to avoid copying a character string to be referenced as a
+ value in a JSON GenericValue object, if the string's lifetime is known
+ to be valid long enough.
+ \tparam CharType Character type of the string
+ \param str Constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue
+ \return GenericStringRef string reference object
+ \relatesalso GenericStringRef
+
+ \see GenericValue::GenericValue(StringRefType), GenericValue::operator=(StringRefType), GenericValue::SetString(StringRefType), GenericValue::PushBack(StringRefType, Allocator&), GenericValue::AddMember
+*/
+template<typename CharType>
+inline GenericStringRef<CharType> StringRef(const CharType* str) {
+ return GenericStringRef<CharType>(str);
+}
+
+//! Mark a character pointer as constant string
+/*! Mark a plain character pointer as a "string literal". This function
+ can be used to avoid copying a character string to be referenced as a
+ value in a JSON GenericValue object, if the string's lifetime is known
+ to be valid long enough.
+
+ This version has better performance with supplied length, and also
+ supports string containing null characters.
+
+ \tparam CharType character type of the string
+ \param str Constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue
+ \param length The length of source string.
+ \return GenericStringRef string reference object
+ \relatesalso GenericStringRef
+*/
+template<typename CharType>
+inline GenericStringRef<CharType> StringRef(const CharType* str, size_t length) {
+ return GenericStringRef<CharType>(str, SizeType(length));
+}
+
+#if RAPIDJSON_HAS_STDSTRING
+//! Mark a string object as constant string
+/*! Mark a string object (e.g. \c std::string) as a "string literal".
+ This function can be used to avoid copying a string to be referenced as a
+ value in a JSON GenericValue object, if the string's lifetime is known
+ to be valid long enough.
+
+ \tparam CharType character type of the string
+ \param str Constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue
+ \return GenericStringRef string reference object
+ \relatesalso GenericStringRef
+ \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING.
+*/
+template<typename CharType>
+inline GenericStringRef<CharType> StringRef(const std::basic_string<CharType>& str) {
+ return GenericStringRef<CharType>(str.data(), SizeType(str.size()));
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// GenericValue type traits
+namespace internal {
+
+template <typename T, typename Encoding = void, typename Allocator = void>
+struct IsGenericValueImpl : FalseType {};
+
+// select candidates according to nested encoding and allocator types
+template <typename T> struct IsGenericValueImpl<T, typename Void<typename T::EncodingType>::Type, typename Void<typename T::AllocatorType>::Type>
+ : IsBaseOf<GenericValue<typename T::EncodingType, typename T::AllocatorType>, T>::Type {};
+
+// helper to match arbitrary GenericValue instantiations, including derived classes
+template <typename T> struct IsGenericValue : IsGenericValueImpl<T>::Type {};
+
+} // namespace internal
+
+///////////////////////////////////////////////////////////////////////////////
+// TypeHelper
+
+namespace internal {
+
+template <typename ValueType, typename T>
+struct TypeHelper {};
+
+template<typename ValueType>
+struct TypeHelper<ValueType, bool> {
+ static bool Is(const ValueType& v) { return v.IsBool(); }
+ static bool Get(const ValueType& v) { return v.GetBool(); }
+ static ValueType& Set(ValueType& v, bool data) { return v.SetBool(data); }
+ static ValueType& Set(ValueType& v, bool data, typename ValueType::AllocatorType&) { return v.SetBool(data); }
+};
+
+template<typename ValueType>
+struct TypeHelper<ValueType, int> {
+ static bool Is(const ValueType& v) { return v.IsInt(); }
+ static int Get(const ValueType& v) { return v.GetInt(); }
+ static ValueType& Set(ValueType& v, int data) { return v.SetInt(data); }
+ static ValueType& Set(ValueType& v, int data, typename ValueType::AllocatorType&) { return v.SetInt(data); }
+};
+
+template<typename ValueType>
+struct TypeHelper<ValueType, unsigned> {
+ static bool Is(const ValueType& v) { return v.IsUint(); }
+ static unsigned Get(const ValueType& v) { return v.GetUint(); }
+ static ValueType& Set(ValueType& v, unsigned data) { return v.SetUint(data); }
+ static ValueType& Set(ValueType& v, unsigned data, typename ValueType::AllocatorType&) { return v.SetUint(data); }
+};
+
+#ifdef _MSC_VER
+RAPIDJSON_STATIC_ASSERT(sizeof(long) == sizeof(int));
+template<typename ValueType>
+struct TypeHelper<ValueType, long> {
+ static bool Is(const ValueType& v) { return v.IsInt(); }
+ static long Get(const ValueType& v) { return v.GetInt(); }
+ static ValueType& Set(ValueType& v, long data) { return v.SetInt(data); }
+ static ValueType& Set(ValueType& v, long data, typename ValueType::AllocatorType&) { return v.SetInt(data); }
+};
+
+RAPIDJSON_STATIC_ASSERT(sizeof(unsigned long) == sizeof(unsigned));
+template<typename ValueType>
+struct TypeHelper<ValueType, unsigned long> {
+ static bool Is(const ValueType& v) { return v.IsUint(); }
+ static unsigned long Get(const ValueType& v) { return v.GetUint(); }
+ static ValueType& Set(ValueType& v, unsigned long data) { return v.SetUint(data); }
+ static ValueType& Set(ValueType& v, unsigned long data, typename ValueType::AllocatorType&) { return v.SetUint(data); }
+};
+#endif
+
+template<typename ValueType>
+struct TypeHelper<ValueType, int64_t> {
+ static bool Is(const ValueType& v) { return v.IsInt64(); }
+ static int64_t Get(const ValueType& v) { return v.GetInt64(); }
+ static ValueType& Set(ValueType& v, int64_t data) { return v.SetInt64(data); }
+ static ValueType& Set(ValueType& v, int64_t data, typename ValueType::AllocatorType&) { return v.SetInt64(data); }
+};
+
+template<typename ValueType>
+struct TypeHelper<ValueType, uint64_t> {
+ static bool Is(const ValueType& v) { return v.IsUint64(); }
+ static uint64_t Get(const ValueType& v) { return v.GetUint64(); }
+ static ValueType& Set(ValueType& v, uint64_t data) { return v.SetUint64(data); }
+ static ValueType& Set(ValueType& v, uint64_t data, typename ValueType::AllocatorType&) { return v.SetUint64(data); }
+};
+
+template<typename ValueType>
+struct TypeHelper<ValueType, double> {
+ static bool Is(const ValueType& v) { return v.IsDouble(); }
+ static double Get(const ValueType& v) { return v.GetDouble(); }
+ static ValueType& Set(ValueType& v, double data) { return v.SetDouble(data); }
+ static ValueType& Set(ValueType& v, double data, typename ValueType::AllocatorType&) { return v.SetDouble(data); }
+};
+
+template<typename ValueType>
+struct TypeHelper<ValueType, float> {
+ static bool Is(const ValueType& v) { return v.IsFloat(); }
+ static float Get(const ValueType& v) { return v.GetFloat(); }
+ static ValueType& Set(ValueType& v, float data) { return v.SetFloat(data); }
+ static ValueType& Set(ValueType& v, float data, typename ValueType::AllocatorType&) { return v.SetFloat(data); }
+};
+
+template<typename ValueType>
+struct TypeHelper<ValueType, const typename ValueType::Ch*> {
+ typedef const typename ValueType::Ch* StringType;
+ static bool Is(const ValueType& v) { return v.IsString(); }
+ static StringType Get(const ValueType& v) { return v.GetString(); }
+ static ValueType& Set(ValueType& v, const StringType data) { return v.SetString(typename ValueType::StringRefType(data)); }
+ static ValueType& Set(ValueType& v, const StringType data, typename ValueType::AllocatorType& a) { return v.SetString(data, a); }
+};
+
+#if RAPIDJSON_HAS_STDSTRING
+template<typename ValueType>
+struct TypeHelper<ValueType, std::basic_string<typename ValueType::Ch> > {
+ typedef std::basic_string<typename ValueType::Ch> StringType;
+ static bool Is(const ValueType& v) { return v.IsString(); }
+ static StringType Get(const ValueType& v) { return StringType(v.GetString(), v.GetStringLength()); }
+ static ValueType& Set(ValueType& v, const StringType& data, typename ValueType::AllocatorType& a) { return v.SetString(data, a); }
+};
+#endif
+
+template<typename ValueType>
+struct TypeHelper<ValueType, typename ValueType::Array> {
+ typedef typename ValueType::Array ArrayType;
+ static bool Is(const ValueType& v) { return v.IsArray(); }
+ static ArrayType Get(ValueType& v) { return v.GetArray(); }
+ static ValueType& Set(ValueType& v, ArrayType data) { return v = data; }
+ static ValueType& Set(ValueType& v, ArrayType data, typename ValueType::AllocatorType&) { return v = data; }
+};
+
+template<typename ValueType>
+struct TypeHelper<ValueType, typename ValueType::ConstArray> {
+ typedef typename ValueType::ConstArray ArrayType;
+ static bool Is(const ValueType& v) { return v.IsArray(); }
+ static ArrayType Get(const ValueType& v) { return v.GetArray(); }
+};
+
+template<typename ValueType>
+struct TypeHelper<ValueType, typename ValueType::Object> {
+ typedef typename ValueType::Object ObjectType;
+ static bool Is(const ValueType& v) { return v.IsObject(); }
+ static ObjectType Get(ValueType& v) { return v.GetObject(); }
+ static ValueType& Set(ValueType& v, ObjectType data) { return v = data; }
+ static ValueType& Set(ValueType& v, ObjectType data, typename ValueType::AllocatorType&) { return v = data; }
+};
+
+template<typename ValueType>
+struct TypeHelper<ValueType, typename ValueType::ConstObject> {
+ typedef typename ValueType::ConstObject ObjectType;
+ static bool Is(const ValueType& v) { return v.IsObject(); }
+ static ObjectType Get(const ValueType& v) { return v.GetObject(); }
+};
+
+} // namespace internal
+
+// Forward declarations
+template <bool, typename> class GenericArray;
+template <bool, typename> class GenericObject;
+
+///////////////////////////////////////////////////////////////////////////////
+// GenericValue
+
+//! Represents a JSON value. Use Value for UTF8 encoding and default allocator.
+/*!
+ A JSON value can be one of 7 types. This class is a variant type supporting
+ these types.
+
+ Use the Value if UTF8 and default allocator
+
+ \tparam Encoding Encoding of the value. (Even non-string values need to have the same encoding in a document)
+ \tparam Allocator Allocator type for allocating memory of object, array and string.
+*/
+template <typename Encoding, typename Allocator = RAPIDJSON_DEFAULT_ALLOCATOR >
+class GenericValue {
+public:
+ //! Name-value pair in an object.
+ typedef GenericMember<Encoding, Allocator> Member;
+ typedef Encoding EncodingType; //!< Encoding type from template parameter.
+ typedef Allocator AllocatorType; //!< Allocator type from template parameter.
+ typedef typename Encoding::Ch Ch; //!< Character type derived from Encoding.
+ typedef GenericStringRef<Ch> StringRefType; //!< Reference to a constant string
+ typedef typename GenericMemberIterator<false,Encoding,Allocator>::Iterator MemberIterator; //!< Member iterator for iterating in object.
+ typedef typename GenericMemberIterator<true,Encoding,Allocator>::Iterator ConstMemberIterator; //!< Constant member iterator for iterating in object.
+ typedef GenericValue* ValueIterator; //!< Value iterator for iterating in array.
+ typedef const GenericValue* ConstValueIterator; //!< Constant value iterator for iterating in array.
+ typedef GenericValue<Encoding, Allocator> ValueType; //!< Value type of itself.
+ typedef GenericArray<false, ValueType> Array;
+ typedef GenericArray<true, ValueType> ConstArray;
+ typedef GenericObject<false, ValueType> Object;
+ typedef GenericObject<true, ValueType> ConstObject;
+
+ //!@name Constructors and destructor.
+ //@{
+
+ //! Default constructor creates a null value.
+ GenericValue() RAPIDJSON_NOEXCEPT : data_() { data_.f.flags = kNullFlag; }
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ //! Move constructor in C++11
+ GenericValue(GenericValue&& rhs) RAPIDJSON_NOEXCEPT : data_(rhs.data_) {
+ rhs.data_.f.flags = kNullFlag; // give up contents
+ }
+#endif
+
+private:
+ //! Copy constructor is not permitted.
+ GenericValue(const GenericValue& rhs);
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ //! Moving from a GenericDocument is not permitted.
+ template <typename StackAllocator>
+ GenericValue(GenericDocument<Encoding,Allocator,StackAllocator>&& rhs);
+
+ //! Move assignment from a GenericDocument is not permitted.
+ template <typename StackAllocator>
+ GenericValue& operator=(GenericDocument<Encoding,Allocator,StackAllocator>&& rhs);
+#endif
+
+public:
+
+ //! Constructor with JSON value type.
+ /*! This creates a Value of specified type with default content.
+ \param type Type of the value.
+ \note Default content for number is zero.
+ */
+ explicit GenericValue(Type type) RAPIDJSON_NOEXCEPT : data_() {
+ static const uint16_t defaultFlags[] = {
+ kNullFlag, kFalseFlag, kTrueFlag, kObjectFlag, kArrayFlag, kShortStringFlag,
+ kNumberAnyFlag
+ };
+ RAPIDJSON_NOEXCEPT_ASSERT(type >= kNullType && type <= kNumberType);
+ data_.f.flags = defaultFlags[type];
+
+ // Use ShortString to store empty string.
+ if (type == kStringType)
+ data_.ss.SetLength(0);
+ }
+
+ //! Explicit copy constructor (with allocator)
+ /*! Creates a copy of a Value by using the given Allocator
+ \tparam SourceAllocator allocator of \c rhs
+ \param rhs Value to copy from (read-only)
+ \param allocator Allocator for allocating copied elements and buffers. Commonly use GenericDocument::GetAllocator().
+ \param copyConstStrings Force copying of constant strings (e.g. referencing an in-situ buffer)
+ \see CopyFrom()
+ */
+ template <typename SourceAllocator>
+ GenericValue(const GenericValue<Encoding,SourceAllocator>& rhs, Allocator& allocator, bool copyConstStrings = false) {
+ switch (rhs.GetType()) {
+ case kObjectType: {
+ SizeType count = rhs.data_.o.size;
+ Member* lm = reinterpret_cast<Member*>(allocator.Malloc(count * sizeof(Member)));
+ const typename GenericValue<Encoding,SourceAllocator>::Member* rm = rhs.GetMembersPointer();
+ for (SizeType i = 0; i < count; i++) {
+ new (&lm[i].name) GenericValue(rm[i].name, allocator, copyConstStrings);
+ new (&lm[i].value) GenericValue(rm[i].value, allocator, copyConstStrings);
+ }
+ data_.f.flags = kObjectFlag;
+ data_.o.size = data_.o.capacity = count;
+ SetMembersPointer(lm);
+ }
+ break;
+ case kArrayType: {
+ SizeType count = rhs.data_.a.size;
+ GenericValue* le = reinterpret_cast<GenericValue*>(allocator.Malloc(count * sizeof(GenericValue)));
+ const GenericValue<Encoding,SourceAllocator>* re = rhs.GetElementsPointer();
+ for (SizeType i = 0; i < count; i++)
+ new (&le[i]) GenericValue(re[i], allocator, copyConstStrings);
+ data_.f.flags = kArrayFlag;
+ data_.a.size = data_.a.capacity = count;
+ SetElementsPointer(le);
+ }
+ break;
+ case kStringType:
+ if (rhs.data_.f.flags == kConstStringFlag && !copyConstStrings) {
+ data_.f.flags = rhs.data_.f.flags;
+ data_ = *reinterpret_cast<const Data*>(&rhs.data_);
+ }
+ else
+ SetStringRaw(StringRef(rhs.GetString(), rhs.GetStringLength()), allocator);
+ break;
+ default:
+ data_.f.flags = rhs.data_.f.flags;
+ data_ = *reinterpret_cast<const Data*>(&rhs.data_);
+ break;
+ }
+ }
+
+ //! Constructor for boolean value.
+ /*! \param b Boolean value
+ \note This constructor is limited to \em real boolean values and rejects
+ implicitly converted types like arbitrary pointers. Use an explicit cast
+ to \c bool, if you want to construct a boolean JSON value in such cases.
+ */
+#ifndef RAPIDJSON_DOXYGEN_RUNNING // hide SFINAE from Doxygen
+ template <typename T>
+ explicit GenericValue(T b, RAPIDJSON_ENABLEIF((internal::IsSame<bool, T>))) RAPIDJSON_NOEXCEPT // See #472
+#else
+ explicit GenericValue(bool b) RAPIDJSON_NOEXCEPT
+#endif
+ : data_() {
+ // safe-guard against failing SFINAE
+ RAPIDJSON_STATIC_ASSERT((internal::IsSame<bool,T>::Value));
+ data_.f.flags = b ? kTrueFlag : kFalseFlag;
+ }
+
+ //! Constructor for int value.
+ explicit GenericValue(int i) RAPIDJSON_NOEXCEPT : data_() {
+ data_.n.i64 = i;
+ data_.f.flags = (i >= 0) ? (kNumberIntFlag | kUintFlag | kUint64Flag) : kNumberIntFlag;
+ }
+
+ //! Constructor for unsigned value.
+ explicit GenericValue(unsigned u) RAPIDJSON_NOEXCEPT : data_() {
+ data_.n.u64 = u;
+ data_.f.flags = (u & 0x80000000) ? kNumberUintFlag : (kNumberUintFlag | kIntFlag | kInt64Flag);
+ }
+
+ //! Constructor for int64_t value.
+ explicit GenericValue(int64_t i64) RAPIDJSON_NOEXCEPT : data_() {
+ data_.n.i64 = i64;
+ data_.f.flags = kNumberInt64Flag;
+ if (i64 >= 0) {
+ data_.f.flags |= kNumberUint64Flag;
+ if (!(static_cast<uint64_t>(i64) & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x00000000)))
+ data_.f.flags |= kUintFlag;
+ if (!(static_cast<uint64_t>(i64) & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x80000000)))
+ data_.f.flags |= kIntFlag;
+ }
+ else if (i64 >= static_cast<int64_t>(RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x80000000)))
+ data_.f.flags |= kIntFlag;
+ }
+
+ //! Constructor for uint64_t value.
+ explicit GenericValue(uint64_t u64) RAPIDJSON_NOEXCEPT : data_() {
+ data_.n.u64 = u64;
+ data_.f.flags = kNumberUint64Flag;
+ if (!(u64 & RAPIDJSON_UINT64_C2(0x80000000, 0x00000000)))
+ data_.f.flags |= kInt64Flag;
+ if (!(u64 & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x00000000)))
+ data_.f.flags |= kUintFlag;
+ if (!(u64 & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x80000000)))
+ data_.f.flags |= kIntFlag;
+ }
+
+ //! Constructor for double value.
+ explicit GenericValue(double d) RAPIDJSON_NOEXCEPT : data_() { data_.n.d = d; data_.f.flags = kNumberDoubleFlag; }
+
+ //! Constructor for float value.
+ explicit GenericValue(float f) RAPIDJSON_NOEXCEPT : data_() { data_.n.d = static_cast<double>(f); data_.f.flags = kNumberDoubleFlag; }
+
+ //! Constructor for constant string (i.e. do not make a copy of string)
+ GenericValue(const Ch* s, SizeType length) RAPIDJSON_NOEXCEPT : data_() { SetStringRaw(StringRef(s, length)); }
+
+ //! Constructor for constant string (i.e. do not make a copy of string)
+ explicit GenericValue(StringRefType s) RAPIDJSON_NOEXCEPT : data_() { SetStringRaw(s); }
+
+ //! Constructor for copy-string (i.e. do make a copy of string)
+ GenericValue(const Ch* s, SizeType length, Allocator& allocator) : data_() { SetStringRaw(StringRef(s, length), allocator); }
+
+ //! Constructor for copy-string (i.e. do make a copy of string)
+ GenericValue(const Ch*s, Allocator& allocator) : data_() { SetStringRaw(StringRef(s), allocator); }
+
+#if RAPIDJSON_HAS_STDSTRING
+ //! Constructor for copy-string from a string object (i.e. do make a copy of string)
+ /*! \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING.
+ */
+ GenericValue(const std::basic_string<Ch>& s, Allocator& allocator) : data_() { SetStringRaw(StringRef(s), allocator); }
+#endif
+
+ //! Constructor for Array.
+ /*!
+ \param a An array obtained by \c GetArray().
+ \note \c Array is always pass-by-value.
+ \note the source array is moved into this value and the sourec array becomes empty.
+ */
+ GenericValue(Array a) RAPIDJSON_NOEXCEPT : data_(a.value_.data_) {
+ a.value_.data_ = Data();
+ a.value_.data_.f.flags = kArrayFlag;
+ }
+
+ //! Constructor for Object.
+ /*!
+ \param o An object obtained by \c GetObject().
+ \note \c Object is always pass-by-value.
+ \note the source object is moved into this value and the sourec object becomes empty.
+ */
+ GenericValue(Object o) RAPIDJSON_NOEXCEPT : data_(o.value_.data_) {
+ o.value_.data_ = Data();
+ o.value_.data_.f.flags = kObjectFlag;
+ }
+
+ //! Destructor.
+ /*! Need to destruct elements of array, members of object, or copy-string.
+ */
+ ~GenericValue() {
+ if (Allocator::kNeedFree) { // Shortcut by Allocator's trait
+ switch(data_.f.flags) {
+ case kArrayFlag:
+ {
+ GenericValue* e = GetElementsPointer();
+ for (GenericValue* v = e; v != e + data_.a.size; ++v)
+ v->~GenericValue();
+ Allocator::Free(e);
+ }
+ break;
+
+ case kObjectFlag:
+ for (MemberIterator m = MemberBegin(); m != MemberEnd(); ++m)
+ m->~Member();
+ Allocator::Free(GetMembersPointer());
+ break;
+
+ case kCopyStringFlag:
+ Allocator::Free(const_cast<Ch*>(GetStringPointer()));
+ break;
+
+ default:
+ break; // Do nothing for other types.
+ }
+ }
+ }
+
+ //@}
+
+ //!@name Assignment operators
+ //@{
+
+ //! Assignment with move semantics.
+ /*! \param rhs Source of the assignment. It will become a null value after assignment.
+ */
+ GenericValue& operator=(GenericValue& rhs) RAPIDJSON_NOEXCEPT {
+ if (RAPIDJSON_LIKELY(this != &rhs)) {
+ this->~GenericValue();
+ RawAssign(rhs);
+ }
+ return *this;
+ }
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ //! Move assignment in C++11
+ GenericValue& operator=(GenericValue&& rhs) RAPIDJSON_NOEXCEPT {
+ return *this = rhs.Move();
+ }
+#endif
+
+ //! Assignment of constant string reference (no copy)
+ /*! \param str Constant string reference to be assigned
+ \note This overload is needed to avoid clashes with the generic primitive type assignment overload below.
+ \see GenericStringRef, operator=(T)
+ */
+ GenericValue& operator=(StringRefType str) RAPIDJSON_NOEXCEPT {
+ GenericValue s(str);
+ return *this = s;
+ }
+
+ //! Assignment with primitive types.
+ /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t
+ \param value The value to be assigned.
+
+ \note The source type \c T explicitly disallows all pointer types,
+ especially (\c const) \ref Ch*. This helps avoiding implicitly
+ referencing character strings with insufficient lifetime, use
+ \ref SetString(const Ch*, Allocator&) (for copying) or
+ \ref StringRef() (to explicitly mark the pointer as constant) instead.
+ All other pointer types would implicitly convert to \c bool,
+ use \ref SetBool() instead.
+ */
+ template <typename T>
+ RAPIDJSON_DISABLEIF_RETURN((internal::IsPointer<T>), (GenericValue&))
+ operator=(T value) {
+ GenericValue v(value);
+ return *this = v;
+ }
+
+ //! Deep-copy assignment from Value
+ /*! Assigns a \b copy of the Value to the current Value object
+ \tparam SourceAllocator Allocator type of \c rhs
+ \param rhs Value to copy from (read-only)
+ \param allocator Allocator to use for copying
+ \param copyConstStrings Force copying of constant strings (e.g. referencing an in-situ buffer)
+ */
+ template <typename SourceAllocator>
+ GenericValue& CopyFrom(const GenericValue<Encoding, SourceAllocator>& rhs, Allocator& allocator, bool copyConstStrings = false) {
+ RAPIDJSON_ASSERT(static_cast<void*>(this) != static_cast<void const*>(&rhs));
+ this->~GenericValue();
+ new (this) GenericValue(rhs, allocator, copyConstStrings);
+ return *this;
+ }
+
+ //! Exchange the contents of this value with those of other.
+ /*!
+ \param other Another value.
+ \note Constant complexity.
+ */
+ GenericValue& Swap(GenericValue& other) RAPIDJSON_NOEXCEPT {
+ GenericValue temp;
+ temp.RawAssign(*this);
+ RawAssign(other);
+ other.RawAssign(temp);
+ return *this;
+ }
+
+ //! free-standing swap function helper
+ /*!
+ Helper function to enable support for common swap implementation pattern based on \c std::swap:
+ \code
+ void swap(MyClass& a, MyClass& b) {
+ using std::swap;
+ swap(a.value, b.value);
+ // ...
+ }
+ \endcode
+ \see Swap()
+ */
+ friend inline void swap(GenericValue& a, GenericValue& b) RAPIDJSON_NOEXCEPT { a.Swap(b); }
+
+ //! Prepare Value for move semantics
+ /*! \return *this */
+ GenericValue& Move() RAPIDJSON_NOEXCEPT { return *this; }
+ //@}
+
+ //!@name Equal-to and not-equal-to operators
+ //@{
+ //! Equal-to operator
+ /*!
+ \note If an object contains duplicated named member, comparing equality with any object is always \c false.
+ \note Complexity is quadratic in Object's member number and linear for the rest (number of all values in the subtree and total lengths of all strings).
+ */
+ template <typename SourceAllocator>
+ bool operator==(const GenericValue<Encoding, SourceAllocator>& rhs) const {
+ typedef GenericValue<Encoding, SourceAllocator> RhsType;
+ if (GetType() != rhs.GetType())
+ return false;
+
+ switch (GetType()) {
+ case kObjectType: // Warning: O(n^2) inner-loop
+ if (data_.o.size != rhs.data_.o.size)
+ return false;
+ for (ConstMemberIterator lhsMemberItr = MemberBegin(); lhsMemberItr != MemberEnd(); ++lhsMemberItr) {
+ typename RhsType::ConstMemberIterator rhsMemberItr = rhs.FindMember(lhsMemberItr->name);
+ if (rhsMemberItr == rhs.MemberEnd() || lhsMemberItr->value != rhsMemberItr->value)
+ return false;
+ }
+ return true;
+
+ case kArrayType:
+ if (data_.a.size != rhs.data_.a.size)
+ return false;
+ for (SizeType i = 0; i < data_.a.size; i++)
+ if ((*this)[i] != rhs[i])
+ return false;
+ return true;
+
+ case kStringType:
+ return StringEqual(rhs);
+
+ case kNumberType:
+ if (IsDouble() || rhs.IsDouble()) {
+ double a = GetDouble(); // May convert from integer to double.
+ double b = rhs.GetDouble(); // Ditto
+ return a >= b && a <= b; // Prevent -Wfloat-equal
+ }
+ else
+ return data_.n.u64 == rhs.data_.n.u64;
+
+ default:
+ return true;
+ }
+ }
+
+ //! Equal-to operator with const C-string pointer
+ bool operator==(const Ch* rhs) const { return *this == GenericValue(StringRef(rhs)); }
+
+#if RAPIDJSON_HAS_STDSTRING
+ //! Equal-to operator with string object
+ /*! \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING.
+ */
+ bool operator==(const std::basic_string<Ch>& rhs) const { return *this == GenericValue(StringRef(rhs)); }
+#endif
+
+ //! Equal-to operator with primitive types
+ /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c double, \c true, \c false
+ */
+ template <typename T> RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>,internal::IsGenericValue<T> >), (bool)) operator==(const T& rhs) const { return *this == GenericValue(rhs); }
+
+ //! Not-equal-to operator
+ /*! \return !(*this == rhs)
+ */
+ template <typename SourceAllocator>
+ bool operator!=(const GenericValue<Encoding, SourceAllocator>& rhs) const { return !(*this == rhs); }
+
+ //! Not-equal-to operator with const C-string pointer
+ bool operator!=(const Ch* rhs) const { return !(*this == rhs); }
+
+ //! Not-equal-to operator with arbitrary types
+ /*! \return !(*this == rhs)
+ */
+ template <typename T> RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue<T>), (bool)) operator!=(const T& rhs) const { return !(*this == rhs); }
+
+ //! Equal-to operator with arbitrary types (symmetric version)
+ /*! \return (rhs == lhs)
+ */
+ template <typename T> friend RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue<T>), (bool)) operator==(const T& lhs, const GenericValue& rhs) { return rhs == lhs; }
+
+ //! Not-Equal-to operator with arbitrary types (symmetric version)
+ /*! \return !(rhs == lhs)
+ */
+ template <typename T> friend RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue<T>), (bool)) operator!=(const T& lhs, const GenericValue& rhs) { return !(rhs == lhs); }
+ //@}
+
+ //!@name Type
+ //@{
+
+ Type GetType() const { return static_cast<Type>(data_.f.flags & kTypeMask); }
+ bool IsNull() const { return data_.f.flags == kNullFlag; }
+ bool IsFalse() const { return data_.f.flags == kFalseFlag; }
+ bool IsTrue() const { return data_.f.flags == kTrueFlag; }
+ bool IsBool() const { return (data_.f.flags & kBoolFlag) != 0; }
+ bool IsObject() const { return data_.f.flags == kObjectFlag; }
+ bool IsArray() const { return data_.f.flags == kArrayFlag; }
+ bool IsNumber() const { return (data_.f.flags & kNumberFlag) != 0; }
+ bool IsInt() const { return (data_.f.flags & kIntFlag) != 0; }
+ bool IsUint() const { return (data_.f.flags & kUintFlag) != 0; }
+ bool IsInt64() const { return (data_.f.flags & kInt64Flag) != 0; }
+ bool IsUint64() const { return (data_.f.flags & kUint64Flag) != 0; }
+ bool IsDouble() const { return (data_.f.flags & kDoubleFlag) != 0; }
+ bool IsString() const { return (data_.f.flags & kStringFlag) != 0; }
+
+ // Checks whether a number can be losslessly converted to a double.
+ bool IsLosslessDouble() const {
+ if (!IsNumber()) return false;
+ if (IsUint64()) {
+ uint64_t u = GetUint64();
+ volatile double d = static_cast<double>(u);
+ return (d >= 0.0)
+ && (d < static_cast<double>((std::numeric_limits<uint64_t>::max)()))
+ && (u == static_cast<uint64_t>(d));
+ }
+ if (IsInt64()) {
+ int64_t i = GetInt64();
+ volatile double d = static_cast<double>(i);
+ return (d >= static_cast<double>((std::numeric_limits<int64_t>::min)()))
+ && (d < static_cast<double>((std::numeric_limits<int64_t>::max)()))
+ && (i == static_cast<int64_t>(d));
+ }
+ return true; // double, int, uint are always lossless
+ }
+
+ // Checks whether a number is a float (possible lossy).
+ bool IsFloat() const {
+ if ((data_.f.flags & kDoubleFlag) == 0)
+ return false;
+ double d = GetDouble();
+ return d >= -3.4028234e38 && d <= 3.4028234e38;
+ }
+ // Checks whether a number can be losslessly converted to a float.
+ bool IsLosslessFloat() const {
+ if (!IsNumber()) return false;
+ double a = GetDouble();
+ if (a < static_cast<double>(-(std::numeric_limits<float>::max)())
+ || a > static_cast<double>((std::numeric_limits<float>::max)()))
+ return false;
+ double b = static_cast<double>(static_cast<float>(a));
+ return a >= b && a <= b; // Prevent -Wfloat-equal
+ }
+
+ //@}
+
+ //!@name Null
+ //@{
+
+ GenericValue& SetNull() { this->~GenericValue(); new (this) GenericValue(); return *this; }
+
+ //@}
+
+ //!@name Bool
+ //@{
+
+ bool GetBool() const { RAPIDJSON_ASSERT(IsBool()); return data_.f.flags == kTrueFlag; }
+ //!< Set boolean value
+ /*! \post IsBool() == true */
+ GenericValue& SetBool(bool b) { this->~GenericValue(); new (this) GenericValue(b); return *this; }
+
+ //@}
+
+ //!@name Object
+ //@{
+
+ //! Set this value as an empty object.
+ /*! \post IsObject() == true */
+ GenericValue& SetObject() { this->~GenericValue(); new (this) GenericValue(kObjectType); return *this; }
+
+ //! Get the number of members in the object.
+ SizeType MemberCount() const { RAPIDJSON_ASSERT(IsObject()); return data_.o.size; }
+
+ //! Get the capacity of object.
+ SizeType MemberCapacity() const { RAPIDJSON_ASSERT(IsObject()); return data_.o.capacity; }
+
+ //! Check whether the object is empty.
+ bool ObjectEmpty() const { RAPIDJSON_ASSERT(IsObject()); return data_.o.size == 0; }
+
+ //! Get a value from an object associated with the name.
+ /*! \pre IsObject() == true
+ \tparam T Either \c Ch or \c const \c Ch (template used for disambiguation with \ref operator[](SizeType))
+ \note In version 0.1x, if the member is not found, this function returns a null value. This makes issue 7.
+ Since 0.2, if the name is not correct, it will assert.
+ If user is unsure whether a member exists, user should use HasMember() first.
+ A better approach is to use FindMember().
+ \note Linear time complexity.
+ */
+ template <typename T>
+ RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr<internal::IsSame<typename internal::RemoveConst<T>::Type, Ch> >),(GenericValue&)) operator[](T* name) {
+ GenericValue n(StringRef(name));
+ return (*this)[n];
+ }
+ template <typename T>
+ RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr<internal::IsSame<typename internal::RemoveConst<T>::Type, Ch> >),(const GenericValue&)) operator[](T* name) const { return const_cast<GenericValue&>(*this)[name]; }
+
+ //! Get a value from an object associated with the name.
+ /*! \pre IsObject() == true
+ \tparam SourceAllocator Allocator of the \c name value
+
+ \note Compared to \ref operator[](T*), this version is faster because it does not need a StrLen().
+ And it can also handle strings with embedded null characters.
+
+ \note Linear time complexity.
+ */
+ template <typename SourceAllocator>
+ GenericValue& operator[](const GenericValue<Encoding, SourceAllocator>& name) {
+ MemberIterator member = FindMember(name);
+ if (member != MemberEnd())
+ return member->value;
+ else {
+ RAPIDJSON_ASSERT(false); // see above note
+
+ // This will generate -Wexit-time-destructors in clang
+ // static GenericValue NullValue;
+ // return NullValue;
+
+ // Use static buffer and placement-new to prevent destruction
+ static char buffer[sizeof(GenericValue)];
+ return *new (buffer) GenericValue();
+ }
+ }
+ template <typename SourceAllocator>
+ const GenericValue& operator[](const GenericValue<Encoding, SourceAllocator>& name) const { return const_cast<GenericValue&>(*this)[name]; }
+
+#if RAPIDJSON_HAS_STDSTRING
+ //! Get a value from an object associated with name (string object).
+ GenericValue& operator[](const std::basic_string<Ch>& name) { return (*this)[GenericValue(StringRef(name))]; }
+ const GenericValue& operator[](const std::basic_string<Ch>& name) const { return (*this)[GenericValue(StringRef(name))]; }
+#endif
+
+ //! Const member iterator
+ /*! \pre IsObject() == true */
+ ConstMemberIterator MemberBegin() const { RAPIDJSON_ASSERT(IsObject()); return ConstMemberIterator(GetMembersPointer()); }
+ //! Const \em past-the-end member iterator
+ /*! \pre IsObject() == true */
+ ConstMemberIterator MemberEnd() const { RAPIDJSON_ASSERT(IsObject()); return ConstMemberIterator(GetMembersPointer() + data_.o.size); }
+ //! Member iterator
+ /*! \pre IsObject() == true */
+ MemberIterator MemberBegin() { RAPIDJSON_ASSERT(IsObject()); return MemberIterator(GetMembersPointer()); }
+ //! \em Past-the-end member iterator
+ /*! \pre IsObject() == true */
+ MemberIterator MemberEnd() { RAPIDJSON_ASSERT(IsObject()); return MemberIterator(GetMembersPointer() + data_.o.size); }
+
+ //! Request the object to have enough capacity to store members.
+ /*! \param newCapacity The capacity that the object at least need to have.
+ \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator().
+ \return The value itself for fluent API.
+ \note Linear time complexity.
+ */
+ GenericValue& MemberReserve(SizeType newCapacity, Allocator &allocator) {
+ RAPIDJSON_ASSERT(IsObject());
+ if (newCapacity > data_.o.capacity) {
+ SetMembersPointer(reinterpret_cast<Member*>(allocator.Realloc(GetMembersPointer(), data_.o.capacity * sizeof(Member), newCapacity * sizeof(Member))));
+ data_.o.capacity = newCapacity;
+ }
+ return *this;
+ }
+
+ //! Check whether a member exists in the object.
+ /*!
+ \param name Member name to be searched.
+ \pre IsObject() == true
+ \return Whether a member with that name exists.
+ \note It is better to use FindMember() directly if you need the obtain the value as well.
+ \note Linear time complexity.
+ */
+ bool HasMember(const Ch* name) const { return FindMember(name) != MemberEnd(); }
+
+#if RAPIDJSON_HAS_STDSTRING
+ //! Check whether a member exists in the object with string object.
+ /*!
+ \param name Member name to be searched.
+ \pre IsObject() == true
+ \return Whether a member with that name exists.
+ \note It is better to use FindMember() directly if you need the obtain the value as well.
+ \note Linear time complexity.
+ */
+ bool HasMember(const std::basic_string<Ch>& name) const { return FindMember(name) != MemberEnd(); }
+#endif
+
+ //! Check whether a member exists in the object with GenericValue name.
+ /*!
+ This version is faster because it does not need a StrLen(). It can also handle string with null character.
+ \param name Member name to be searched.
+ \pre IsObject() == true
+ \return Whether a member with that name exists.
+ \note It is better to use FindMember() directly if you need the obtain the value as well.
+ \note Linear time complexity.
+ */
+ template <typename SourceAllocator>
+ bool HasMember(const GenericValue<Encoding, SourceAllocator>& name) const { return FindMember(name) != MemberEnd(); }
+
+ //! Find member by name.
+ /*!
+ \param name Member name to be searched.
+ \pre IsObject() == true
+ \return Iterator to member, if it exists.
+ Otherwise returns \ref MemberEnd().
+
+ \note Earlier versions of Rapidjson returned a \c NULL pointer, in case
+ the requested member doesn't exist. For consistency with e.g.
+ \c std::map, this has been changed to MemberEnd() now.
+ \note Linear time complexity.
+ */
+ MemberIterator FindMember(const Ch* name) {
+ GenericValue n(StringRef(name));
+ return FindMember(n);
+ }
+
+ ConstMemberIterator FindMember(const Ch* name) const { return const_cast<GenericValue&>(*this).FindMember(name); }
+
+ //! Find member by name.
+ /*!
+ This version is faster because it does not need a StrLen(). It can also handle string with null character.
+ \param name Member name to be searched.
+ \pre IsObject() == true
+ \return Iterator to member, if it exists.
+ Otherwise returns \ref MemberEnd().
+
+ \note Earlier versions of Rapidjson returned a \c NULL pointer, in case
+ the requested member doesn't exist. For consistency with e.g.
+ \c std::map, this has been changed to MemberEnd() now.
+ \note Linear time complexity.
+ */
+ template <typename SourceAllocator>
+ MemberIterator FindMember(const GenericValue<Encoding, SourceAllocator>& name) {
+ RAPIDJSON_ASSERT(IsObject());
+ RAPIDJSON_ASSERT(name.IsString());
+ MemberIterator member = MemberBegin();
+ for ( ; member != MemberEnd(); ++member)
+ if (name.StringEqual(member->name))
+ break;
+ return member;
+ }
+ template <typename SourceAllocator> ConstMemberIterator FindMember(const GenericValue<Encoding, SourceAllocator>& name) const { return const_cast<GenericValue&>(*this).FindMember(name); }
+
+#if RAPIDJSON_HAS_STDSTRING
+ //! Find member by string object name.
+ /*!
+ \param name Member name to be searched.
+ \pre IsObject() == true
+ \return Iterator to member, if it exists.
+ Otherwise returns \ref MemberEnd().
+ */
+ MemberIterator FindMember(const std::basic_string<Ch>& name) { return FindMember(GenericValue(StringRef(name))); }
+ ConstMemberIterator FindMember(const std::basic_string<Ch>& name) const { return FindMember(GenericValue(StringRef(name))); }
+#endif
+
+ //! Add a member (name-value pair) to the object.
+ /*! \param name A string value as name of member.
+ \param value Value of any type.
+ \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator().
+ \return The value itself for fluent API.
+ \note The ownership of \c name and \c value will be transferred to this object on success.
+ \pre IsObject() && name.IsString()
+ \post name.IsNull() && value.IsNull()
+ \note Amortized Constant time complexity.
+ */
+ GenericValue& AddMember(GenericValue& name, GenericValue& value, Allocator& allocator) {
+ RAPIDJSON_ASSERT(IsObject());
+ RAPIDJSON_ASSERT(name.IsString());
+
+ ObjectData& o = data_.o;
+ if (o.size >= o.capacity)
+ MemberReserve(o.capacity == 0 ? kDefaultObjectCapacity : (o.capacity + (o.capacity + 1) / 2), allocator);
+ Member* members = GetMembersPointer();
+ members[o.size].name.RawAssign(name);
+ members[o.size].value.RawAssign(value);
+ o.size++;
+ return *this;
+ }
+
+ //! Add a constant string value as member (name-value pair) to the object.
+ /*! \param name A string value as name of member.
+ \param value constant string reference as value of member.
+ \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator().
+ \return The value itself for fluent API.
+ \pre IsObject()
+ \note This overload is needed to avoid clashes with the generic primitive type AddMember(GenericValue&,T,Allocator&) overload below.
+ \note Amortized Constant time complexity.
+ */
+ GenericValue& AddMember(GenericValue& name, StringRefType value, Allocator& allocator) {
+ GenericValue v(value);
+ return AddMember(name, v, allocator);
+ }
+
+#if RAPIDJSON_HAS_STDSTRING
+ //! Add a string object as member (name-value pair) to the object.
+ /*! \param name A string value as name of member.
+ \param value constant string reference as value of member.
+ \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator().
+ \return The value itself for fluent API.
+ \pre IsObject()
+ \note This overload is needed to avoid clashes with the generic primitive type AddMember(GenericValue&,T,Allocator&) overload below.
+ \note Amortized Constant time complexity.
+ */
+ GenericValue& AddMember(GenericValue& name, std::basic_string<Ch>& value, Allocator& allocator) {
+ GenericValue v(value, allocator);
+ return AddMember(name, v, allocator);
+ }
+#endif
+
+ //! Add any primitive value as member (name-value pair) to the object.
+ /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t
+ \param name A string value as name of member.
+ \param value Value of primitive type \c T as value of member
+ \param allocator Allocator for reallocating memory. Commonly use GenericDocument::GetAllocator().
+ \return The value itself for fluent API.
+ \pre IsObject()
+
+ \note The source type \c T explicitly disallows all pointer types,
+ especially (\c const) \ref Ch*. This helps avoiding implicitly
+ referencing character strings with insufficient lifetime, use
+ \ref AddMember(StringRefType, GenericValue&, Allocator&) or \ref
+ AddMember(StringRefType, StringRefType, Allocator&).
+ All other pointer types would implicitly convert to \c bool,
+ use an explicit cast instead, if needed.
+ \note Amortized Constant time complexity.
+ */
+ template <typename T>
+ RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (GenericValue&))
+ AddMember(GenericValue& name, T value, Allocator& allocator) {
+ GenericValue v(value);
+ return AddMember(name, v, allocator);
+ }
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ GenericValue& AddMember(GenericValue&& name, GenericValue&& value, Allocator& allocator) {
+ return AddMember(name, value, allocator);
+ }
+ GenericValue& AddMember(GenericValue&& name, GenericValue& value, Allocator& allocator) {
+ return AddMember(name, value, allocator);
+ }
+ GenericValue& AddMember(GenericValue& name, GenericValue&& value, Allocator& allocator) {
+ return AddMember(name, value, allocator);
+ }
+ GenericValue& AddMember(StringRefType name, GenericValue&& value, Allocator& allocator) {
+ GenericValue n(name);
+ return AddMember(n, value, allocator);
+ }
+#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS
+
+
+ //! Add a member (name-value pair) to the object.
+ /*! \param name A constant string reference as name of member.
+ \param value Value of any type.
+ \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator().
+ \return The value itself for fluent API.
+ \note The ownership of \c value will be transferred to this object on success.
+ \pre IsObject()
+ \post value.IsNull()
+ \note Amortized Constant time complexity.
+ */
+ GenericValue& AddMember(StringRefType name, GenericValue& value, Allocator& allocator) {
+ GenericValue n(name);
+ return AddMember(n, value, allocator);
+ }
+
+ //! Add a constant string value as member (name-value pair) to the object.
+ /*! \param name A constant string reference as name of member.
+ \param value constant string reference as value of member.
+ \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator().
+ \return The value itself for fluent API.
+ \pre IsObject()
+ \note This overload is needed to avoid clashes with the generic primitive type AddMember(StringRefType,T,Allocator&) overload below.
+ \note Amortized Constant time complexity.
+ */
+ GenericValue& AddMember(StringRefType name, StringRefType value, Allocator& allocator) {
+ GenericValue v(value);
+ return AddMember(name, v, allocator);
+ }
+
+ //! Add any primitive value as member (name-value pair) to the object.
+ /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t
+ \param name A constant string reference as name of member.
+ \param value Value of primitive type \c T as value of member
+ \param allocator Allocator for reallocating memory. Commonly use GenericDocument::GetAllocator().
+ \return The value itself for fluent API.
+ \pre IsObject()
+
+ \note The source type \c T explicitly disallows all pointer types,
+ especially (\c const) \ref Ch*. This helps avoiding implicitly
+ referencing character strings with insufficient lifetime, use
+ \ref AddMember(StringRefType, GenericValue&, Allocator&) or \ref
+ AddMember(StringRefType, StringRefType, Allocator&).
+ All other pointer types would implicitly convert to \c bool,
+ use an explicit cast instead, if needed.
+ \note Amortized Constant time complexity.
+ */
+ template <typename T>
+ RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (GenericValue&))
+ AddMember(StringRefType name, T value, Allocator& allocator) {
+ GenericValue n(name);
+ return AddMember(n, value, allocator);
+ }
+
+ //! Remove all members in the object.
+ /*! This function do not deallocate memory in the object, i.e. the capacity is unchanged.
+ \note Linear time complexity.
+ */
+ void RemoveAllMembers() {
+ RAPIDJSON_ASSERT(IsObject());
+ for (MemberIterator m = MemberBegin(); m != MemberEnd(); ++m)
+ m->~Member();
+ data_.o.size = 0;
+ }
+
+ //! Remove a member in object by its name.
+ /*! \param name Name of member to be removed.
+ \return Whether the member existed.
+ \note This function may reorder the object members. Use \ref
+ EraseMember(ConstMemberIterator) if you need to preserve the
+ relative order of the remaining members.
+ \note Linear time complexity.
+ */
+ bool RemoveMember(const Ch* name) {
+ GenericValue n(StringRef(name));
+ return RemoveMember(n);
+ }
+
+#if RAPIDJSON_HAS_STDSTRING
+ bool RemoveMember(const std::basic_string<Ch>& name) { return RemoveMember(GenericValue(StringRef(name))); }
+#endif
+
+ template <typename SourceAllocator>
+ bool RemoveMember(const GenericValue<Encoding, SourceAllocator>& name) {
+ MemberIterator m = FindMember(name);
+ if (m != MemberEnd()) {
+ RemoveMember(m);
+ return true;
+ }
+ else
+ return false;
+ }
+
+ //! Remove a member in object by iterator.
+ /*! \param m member iterator (obtained by FindMember() or MemberBegin()).
+ \return the new iterator after removal.
+ \note This function may reorder the object members. Use \ref
+ EraseMember(ConstMemberIterator) if you need to preserve the
+ relative order of the remaining members.
+ \note Constant time complexity.
+ */
+ MemberIterator RemoveMember(MemberIterator m) {
+ RAPIDJSON_ASSERT(IsObject());
+ RAPIDJSON_ASSERT(data_.o.size > 0);
+ RAPIDJSON_ASSERT(GetMembersPointer() != 0);
+ RAPIDJSON_ASSERT(m >= MemberBegin() && m < MemberEnd());
+
+ MemberIterator last(GetMembersPointer() + (data_.o.size - 1));
+ if (data_.o.size > 1 && m != last)
+ *m = *last; // Move the last one to this place
+ else
+ m->~Member(); // Only one left, just destroy
+ --data_.o.size;
+ return m;
+ }
+
+ //! Remove a member from an object by iterator.
+ /*! \param pos iterator to the member to remove
+ \pre IsObject() == true && \ref MemberBegin() <= \c pos < \ref MemberEnd()
+ \return Iterator following the removed element.
+ If the iterator \c pos refers to the last element, the \ref MemberEnd() iterator is returned.
+ \note This function preserves the relative order of the remaining object
+ members. If you do not need this, use the more efficient \ref RemoveMember(MemberIterator).
+ \note Linear time complexity.
+ */
+ MemberIterator EraseMember(ConstMemberIterator pos) {
+ return EraseMember(pos, pos +1);
+ }
+
+ //! Remove members in the range [first, last) from an object.
+ /*! \param first iterator to the first member to remove
+ \param last iterator following the last member to remove
+ \pre IsObject() == true && \ref MemberBegin() <= \c first <= \c last <= \ref MemberEnd()
+ \return Iterator following the last removed element.
+ \note This function preserves the relative order of the remaining object
+ members.
+ \note Linear time complexity.
+ */
+ MemberIterator EraseMember(ConstMemberIterator first, ConstMemberIterator last) {
+ RAPIDJSON_ASSERT(IsObject());
+ RAPIDJSON_ASSERT(data_.o.size > 0);
+ RAPIDJSON_ASSERT(GetMembersPointer() != 0);
+ RAPIDJSON_ASSERT(first >= MemberBegin());
+ RAPIDJSON_ASSERT(first <= last);
+ RAPIDJSON_ASSERT(last <= MemberEnd());
+
+ MemberIterator pos = MemberBegin() + (first - MemberBegin());
+ for (MemberIterator itr = pos; itr != last; ++itr)
+ itr->~Member();
+ std::memmove(static_cast<void*>(&*pos), &*last, static_cast<size_t>(MemberEnd() - last) * sizeof(Member));
+ data_.o.size -= static_cast<SizeType>(last - first);
+ return pos;
+ }
+
+ //! Erase a member in object by its name.
+ /*! \param name Name of member to be removed.
+ \return Whether the member existed.
+ \note Linear time complexity.
+ */
+ bool EraseMember(const Ch* name) {
+ GenericValue n(StringRef(name));
+ return EraseMember(n);
+ }
+
+#if RAPIDJSON_HAS_STDSTRING
+ bool EraseMember(const std::basic_string<Ch>& name) { return EraseMember(GenericValue(StringRef(name))); }
+#endif
+
+ template <typename SourceAllocator>
+ bool EraseMember(const GenericValue<Encoding, SourceAllocator>& name) {
+ MemberIterator m = FindMember(name);
+ if (m != MemberEnd()) {
+ EraseMember(m);
+ return true;
+ }
+ else
+ return false;
+ }
+
+ Object GetObject() { RAPIDJSON_ASSERT(IsObject()); return Object(*this); }
+ ConstObject GetObject() const { RAPIDJSON_ASSERT(IsObject()); return ConstObject(*this); }
+
+ //@}
+
+ //!@name Array
+ //@{
+
+ //! Set this value as an empty array.
+ /*! \post IsArray == true */
+ GenericValue& SetArray() { this->~GenericValue(); new (this) GenericValue(kArrayType); return *this; }
+
+ //! Get the number of elements in array.
+ SizeType Size() const { RAPIDJSON_ASSERT(IsArray()); return data_.a.size; }
+
+ //! Get the capacity of array.
+ SizeType Capacity() const { RAPIDJSON_ASSERT(IsArray()); return data_.a.capacity; }
+
+ //! Check whether the array is empty.
+ bool Empty() const { RAPIDJSON_ASSERT(IsArray()); return data_.a.size == 0; }
+
+ //! Remove all elements in the array.
+ /*! This function do not deallocate memory in the array, i.e. the capacity is unchanged.
+ \note Linear time complexity.
+ */
+ void Clear() {
+ RAPIDJSON_ASSERT(IsArray());
+ GenericValue* e = GetElementsPointer();
+ for (GenericValue* v = e; v != e + data_.a.size; ++v)
+ v->~GenericValue();
+ data_.a.size = 0;
+ }
+
+ //! Get an element from array by index.
+ /*! \pre IsArray() == true
+ \param index Zero-based index of element.
+ \see operator[](T*)
+ */
+ GenericValue& operator[](SizeType index) {
+ RAPIDJSON_ASSERT(IsArray());
+ RAPIDJSON_ASSERT(index < data_.a.size);
+ return GetElementsPointer()[index];
+ }
+ const GenericValue& operator[](SizeType index) const { return const_cast<GenericValue&>(*this)[index]; }
+
+ //! Element iterator
+ /*! \pre IsArray() == true */
+ ValueIterator Begin() { RAPIDJSON_ASSERT(IsArray()); return GetElementsPointer(); }
+ //! \em Past-the-end element iterator
+ /*! \pre IsArray() == true */
+ ValueIterator End() { RAPIDJSON_ASSERT(IsArray()); return GetElementsPointer() + data_.a.size; }
+ //! Constant element iterator
+ /*! \pre IsArray() == true */
+ ConstValueIterator Begin() const { return const_cast<GenericValue&>(*this).Begin(); }
+ //! Constant \em past-the-end element iterator
+ /*! \pre IsArray() == true */
+ ConstValueIterator End() const { return const_cast<GenericValue&>(*this).End(); }
+
+ //! Request the array to have enough capacity to store elements.
+ /*! \param newCapacity The capacity that the array at least need to have.
+ \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator().
+ \return The value itself for fluent API.
+ \note Linear time complexity.
+ */
+ GenericValue& Reserve(SizeType newCapacity, Allocator &allocator) {
+ RAPIDJSON_ASSERT(IsArray());
+ if (newCapacity > data_.a.capacity) {
+ SetElementsPointer(reinterpret_cast<GenericValue*>(allocator.Realloc(GetElementsPointer(), data_.a.capacity * sizeof(GenericValue), newCapacity * sizeof(GenericValue))));
+ data_.a.capacity = newCapacity;
+ }
+ return *this;
+ }
+
+ //! Append a GenericValue at the end of the array.
+ /*! \param value Value to be appended.
+ \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator().
+ \pre IsArray() == true
+ \post value.IsNull() == true
+ \return The value itself for fluent API.
+ \note The ownership of \c value will be transferred to this array on success.
+ \note If the number of elements to be appended is known, calls Reserve() once first may be more efficient.
+ \note Amortized constant time complexity.
+ */
+ GenericValue& PushBack(GenericValue& value, Allocator& allocator) {
+ RAPIDJSON_ASSERT(IsArray());
+ if (data_.a.size >= data_.a.capacity)
+ Reserve(data_.a.capacity == 0 ? kDefaultArrayCapacity : (data_.a.capacity + (data_.a.capacity + 1) / 2), allocator);
+ GetElementsPointer()[data_.a.size++].RawAssign(value);
+ return *this;
+ }
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ GenericValue& PushBack(GenericValue&& value, Allocator& allocator) {
+ return PushBack(value, allocator);
+ }
+#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS
+
+ //! Append a constant string reference at the end of the array.
+ /*! \param value Constant string reference to be appended.
+ \param allocator Allocator for reallocating memory. It must be the same one used previously. Commonly use GenericDocument::GetAllocator().
+ \pre IsArray() == true
+ \return The value itself for fluent API.
+ \note If the number of elements to be appended is known, calls Reserve() once first may be more efficient.
+ \note Amortized constant time complexity.
+ \see GenericStringRef
+ */
+ GenericValue& PushBack(StringRefType value, Allocator& allocator) {
+ return (*this).template PushBack<StringRefType>(value, allocator);
+ }
+
+ //! Append a primitive value at the end of the array.
+ /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t
+ \param value Value of primitive type T to be appended.
+ \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator().
+ \pre IsArray() == true
+ \return The value itself for fluent API.
+ \note If the number of elements to be appended is known, calls Reserve() once first may be more efficient.
+
+ \note The source type \c T explicitly disallows all pointer types,
+ especially (\c const) \ref Ch*. This helps avoiding implicitly
+ referencing character strings with insufficient lifetime, use
+ \ref PushBack(GenericValue&, Allocator&) or \ref
+ PushBack(StringRefType, Allocator&).
+ All other pointer types would implicitly convert to \c bool,
+ use an explicit cast instead, if needed.
+ \note Amortized constant time complexity.
+ */
+ template <typename T>
+ RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (GenericValue&))
+ PushBack(T value, Allocator& allocator) {
+ GenericValue v(value);
+ return PushBack(v, allocator);
+ }
+
+ //! Remove the last element in the array.
+ /*!
+ \note Constant time complexity.
+ */
+ GenericValue& PopBack() {
+ RAPIDJSON_ASSERT(IsArray());
+ RAPIDJSON_ASSERT(!Empty());
+ GetElementsPointer()[--data_.a.size].~GenericValue();
+ return *this;
+ }
+
+ //! Remove an element of array by iterator.
+ /*!
+ \param pos iterator to the element to remove
+ \pre IsArray() == true && \ref Begin() <= \c pos < \ref End()
+ \return Iterator following the removed element. If the iterator pos refers to the last element, the End() iterator is returned.
+ \note Linear time complexity.
+ */
+ ValueIterator Erase(ConstValueIterator pos) {
+ return Erase(pos, pos + 1);
+ }
+
+ //! Remove elements in the range [first, last) of the array.
+ /*!
+ \param first iterator to the first element to remove
+ \param last iterator following the last element to remove
+ \pre IsArray() == true && \ref Begin() <= \c first <= \c last <= \ref End()
+ \return Iterator following the last removed element.
+ \note Linear time complexity.
+ */
+ ValueIterator Erase(ConstValueIterator first, ConstValueIterator last) {
+ RAPIDJSON_ASSERT(IsArray());
+ RAPIDJSON_ASSERT(data_.a.size > 0);
+ RAPIDJSON_ASSERT(GetElementsPointer() != 0);
+ RAPIDJSON_ASSERT(first >= Begin());
+ RAPIDJSON_ASSERT(first <= last);
+ RAPIDJSON_ASSERT(last <= End());
+ ValueIterator pos = Begin() + (first - Begin());
+ for (ValueIterator itr = pos; itr != last; ++itr)
+ itr->~GenericValue();
+ std::memmove(static_cast<void*>(pos), last, static_cast<size_t>(End() - last) * sizeof(GenericValue));
+ data_.a.size -= static_cast<SizeType>(last - first);
+ return pos;
+ }
+
+ Array GetArray() { RAPIDJSON_ASSERT(IsArray()); return Array(*this); }
+ ConstArray GetArray() const { RAPIDJSON_ASSERT(IsArray()); return ConstArray(*this); }
+
+ //@}
+
+ //!@name Number
+ //@{
+
+ int GetInt() const { RAPIDJSON_ASSERT(data_.f.flags & kIntFlag); return data_.n.i.i; }
+ unsigned GetUint() const { RAPIDJSON_ASSERT(data_.f.flags & kUintFlag); return data_.n.u.u; }
+ int64_t GetInt64() const { RAPIDJSON_ASSERT(data_.f.flags & kInt64Flag); return data_.n.i64; }
+ uint64_t GetUint64() const { RAPIDJSON_ASSERT(data_.f.flags & kUint64Flag); return data_.n.u64; }
+
+ //! Get the value as double type.
+ /*! \note If the value is 64-bit integer type, it may lose precision. Use \c IsLosslessDouble() to check whether the converison is lossless.
+ */
+ double GetDouble() const {
+ RAPIDJSON_ASSERT(IsNumber());
+ if ((data_.f.flags & kDoubleFlag) != 0) return data_.n.d; // exact type, no conversion.
+ if ((data_.f.flags & kIntFlag) != 0) return data_.n.i.i; // int -> double
+ if ((data_.f.flags & kUintFlag) != 0) return data_.n.u.u; // unsigned -> double
+ if ((data_.f.flags & kInt64Flag) != 0) return static_cast<double>(data_.n.i64); // int64_t -> double (may lose precision)
+ RAPIDJSON_ASSERT((data_.f.flags & kUint64Flag) != 0); return static_cast<double>(data_.n.u64); // uint64_t -> double (may lose precision)
+ }
+
+ //! Get the value as float type.
+ /*! \note If the value is 64-bit integer type, it may lose precision. Use \c IsLosslessFloat() to check whether the converison is lossless.
+ */
+ float GetFloat() const {
+ return static_cast<float>(GetDouble());
+ }
+
+ GenericValue& SetInt(int i) { this->~GenericValue(); new (this) GenericValue(i); return *this; }
+ GenericValue& SetUint(unsigned u) { this->~GenericValue(); new (this) GenericValue(u); return *this; }
+ GenericValue& SetInt64(int64_t i64) { this->~GenericValue(); new (this) GenericValue(i64); return *this; }
+ GenericValue& SetUint64(uint64_t u64) { this->~GenericValue(); new (this) GenericValue(u64); return *this; }
+ GenericValue& SetDouble(double d) { this->~GenericValue(); new (this) GenericValue(d); return *this; }
+ GenericValue& SetFloat(float f) { this->~GenericValue(); new (this) GenericValue(static_cast<double>(f)); return *this; }
+
+ //@}
+
+ //!@name String
+ //@{
+
+ const Ch* GetString() const { RAPIDJSON_ASSERT(IsString()); return (data_.f.flags & kInlineStrFlag) ? data_.ss.str : GetStringPointer(); }
+
+ //! Get the length of string.
+ /*! Since rapidjson permits "\\u0000" in the json string, strlen(v.GetString()) may not equal to v.GetStringLength().
+ */
+ SizeType GetStringLength() const { RAPIDJSON_ASSERT(IsString()); return ((data_.f.flags & kInlineStrFlag) ? (data_.ss.GetLength()) : data_.s.length); }
+
+ //! Set this value as a string without copying source string.
+ /*! This version has better performance with supplied length, and also support string containing null character.
+ \param s source string pointer.
+ \param length The length of source string, excluding the trailing null terminator.
+ \return The value itself for fluent API.
+ \post IsString() == true && GetString() == s && GetStringLength() == length
+ \see SetString(StringRefType)
+ */
+ GenericValue& SetString(const Ch* s, SizeType length) { return SetString(StringRef(s, length)); }
+
+ //! Set this value as a string without copying source string.
+ /*! \param s source string reference
+ \return The value itself for fluent API.
+ \post IsString() == true && GetString() == s && GetStringLength() == s.length
+ */
+ GenericValue& SetString(StringRefType s) { this->~GenericValue(); SetStringRaw(s); return *this; }
+
+ //! Set this value as a string by copying from source string.
+ /*! This version has better performance with supplied length, and also support string containing null character.
+ \param s source string.
+ \param length The length of source string, excluding the trailing null terminator.
+ \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator().
+ \return The value itself for fluent API.
+ \post IsString() == true && GetString() != s && strcmp(GetString(),s) == 0 && GetStringLength() == length
+ */
+ GenericValue& SetString(const Ch* s, SizeType length, Allocator& allocator) { return SetString(StringRef(s, length), allocator); }
+
+ //! Set this value as a string by copying from source string.
+ /*! \param s source string.
+ \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator().
+ \return The value itself for fluent API.
+ \post IsString() == true && GetString() != s && strcmp(GetString(),s) == 0 && GetStringLength() == length
+ */
+ GenericValue& SetString(const Ch* s, Allocator& allocator) { return SetString(StringRef(s), allocator); }
+
+ //! Set this value as a string by copying from source string.
+ /*! \param s source string reference
+ \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator().
+ \return The value itself for fluent API.
+ \post IsString() == true && GetString() != s.s && strcmp(GetString(),s) == 0 && GetStringLength() == length
+ */
+ GenericValue& SetString(StringRefType s, Allocator& allocator) { this->~GenericValue(); SetStringRaw(s, allocator); return *this; }
+
+#if RAPIDJSON_HAS_STDSTRING
+ //! Set this value as a string by copying from source string.
+ /*! \param s source string.
+ \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator().
+ \return The value itself for fluent API.
+ \post IsString() == true && GetString() != s.data() && strcmp(GetString(),s.data() == 0 && GetStringLength() == s.size()
+ \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING.
+ */
+ GenericValue& SetString(const std::basic_string<Ch>& s, Allocator& allocator) { return SetString(StringRef(s), allocator); }
+#endif
+
+ //@}
+
+ //!@name Array
+ //@{
+
+ //! Templated version for checking whether this value is type T.
+ /*!
+ \tparam T Either \c bool, \c int, \c unsigned, \c int64_t, \c uint64_t, \c double, \c float, \c const \c char*, \c std::basic_string<Ch>
+ */
+ template <typename T>
+ bool Is() const { return internal::TypeHelper<ValueType, T>::Is(*this); }
+
+ template <typename T>
+ T Get() const { return internal::TypeHelper<ValueType, T>::Get(*this); }
+
+ template <typename T>
+ T Get() { return internal::TypeHelper<ValueType, T>::Get(*this); }
+
+ template<typename T>
+ ValueType& Set(const T& data) { return internal::TypeHelper<ValueType, T>::Set(*this, data); }
+
+ template<typename T>
+ ValueType& Set(const T& data, AllocatorType& allocator) { return internal::TypeHelper<ValueType, T>::Set(*this, data, allocator); }
+
+ //@}
+
+ //! Generate events of this value to a Handler.
+ /*! This function adopts the GoF visitor pattern.
+ Typical usage is to output this JSON value as JSON text via Writer, which is a Handler.
+ It can also be used to deep clone this value via GenericDocument, which is also a Handler.
+ \tparam Handler type of handler.
+ \param handler An object implementing concept Handler.
+ */
+ template <typename Handler>
+ bool Accept(Handler& handler) const {
+ switch(GetType()) {
+ case kNullType: return handler.Null();
+ case kFalseType: return handler.Bool(false);
+ case kTrueType: return handler.Bool(true);
+
+ case kObjectType:
+ if (RAPIDJSON_UNLIKELY(!handler.StartObject()))
+ return false;
+ for (ConstMemberIterator m = MemberBegin(); m != MemberEnd(); ++m) {
+ RAPIDJSON_ASSERT(m->name.IsString()); // User may change the type of name by MemberIterator.
+ if (RAPIDJSON_UNLIKELY(!handler.Key(m->name.GetString(), m->name.GetStringLength(), (m->name.data_.f.flags & kCopyFlag) != 0)))
+ return false;
+ if (RAPIDJSON_UNLIKELY(!m->value.Accept(handler)))
+ return false;
+ }
+ return handler.EndObject(data_.o.size);
+
+ case kArrayType:
+ if (RAPIDJSON_UNLIKELY(!handler.StartArray()))
+ return false;
+ for (const GenericValue* v = Begin(); v != End(); ++v)
+ if (RAPIDJSON_UNLIKELY(!v->Accept(handler)))
+ return false;
+ return handler.EndArray(data_.a.size);
+
+ case kStringType:
+ return handler.String(GetString(), GetStringLength(), (data_.f.flags & kCopyFlag) != 0);
+
+ default:
+ RAPIDJSON_ASSERT(GetType() == kNumberType);
+ if (IsDouble()) return handler.Double(data_.n.d);
+ else if (IsInt()) return handler.Int(data_.n.i.i);
+ else if (IsUint()) return handler.Uint(data_.n.u.u);
+ else if (IsInt64()) return handler.Int64(data_.n.i64);
+ else return handler.Uint64(data_.n.u64);
+ }
+ }
+
+private:
+ template <typename, typename> friend class GenericValue;
+ template <typename, typename, typename> friend class GenericDocument;
+
+ enum {
+ kBoolFlag = 0x0008,
+ kNumberFlag = 0x0010,
+ kIntFlag = 0x0020,
+ kUintFlag = 0x0040,
+ kInt64Flag = 0x0080,
+ kUint64Flag = 0x0100,
+ kDoubleFlag = 0x0200,
+ kStringFlag = 0x0400,
+ kCopyFlag = 0x0800,
+ kInlineStrFlag = 0x1000,
+
+ // Initial flags of different types.
+ kNullFlag = kNullType,
+ kTrueFlag = kTrueType | kBoolFlag,
+ kFalseFlag = kFalseType | kBoolFlag,
+ kNumberIntFlag = kNumberType | kNumberFlag | kIntFlag | kInt64Flag,
+ kNumberUintFlag = kNumberType | kNumberFlag | kUintFlag | kUint64Flag | kInt64Flag,
+ kNumberInt64Flag = kNumberType | kNumberFlag | kInt64Flag,
+ kNumberUint64Flag = kNumberType | kNumberFlag | kUint64Flag,
+ kNumberDoubleFlag = kNumberType | kNumberFlag | kDoubleFlag,
+ kNumberAnyFlag = kNumberType | kNumberFlag | kIntFlag | kInt64Flag | kUintFlag | kUint64Flag | kDoubleFlag,
+ kConstStringFlag = kStringType | kStringFlag,
+ kCopyStringFlag = kStringType | kStringFlag | kCopyFlag,
+ kShortStringFlag = kStringType | kStringFlag | kCopyFlag | kInlineStrFlag,
+ kObjectFlag = kObjectType,
+ kArrayFlag = kArrayType,
+
+ kTypeMask = 0x07
+ };
+
+ static const SizeType kDefaultArrayCapacity = RAPIDJSON_VALUE_DEFAULT_ARRAY_CAPACITY;
+ static const SizeType kDefaultObjectCapacity = RAPIDJSON_VALUE_DEFAULT_OBJECT_CAPACITY;
+
+ struct Flag {
+#if RAPIDJSON_48BITPOINTER_OPTIMIZATION
+ char payload[sizeof(SizeType) * 2 + 6]; // 2 x SizeType + lower 48-bit pointer
+#elif RAPIDJSON_64BIT
+ char payload[sizeof(SizeType) * 2 + sizeof(void*) + 6]; // 6 padding bytes
+#else
+ char payload[sizeof(SizeType) * 2 + sizeof(void*) + 2]; // 2 padding bytes
+#endif
+ uint16_t flags;
+ };
+
+ struct String {
+ SizeType length;
+ SizeType hashcode; //!< reserved
+ const Ch* str;
+ }; // 12 bytes in 32-bit mode, 16 bytes in 64-bit mode
+
+ // implementation detail: ShortString can represent zero-terminated strings up to MaxSize chars
+ // (excluding the terminating zero) and store a value to determine the length of the contained
+ // string in the last character str[LenPos] by storing "MaxSize - length" there. If the string
+ // to store has the maximal length of MaxSize then str[LenPos] will be 0 and therefore act as
+ // the string terminator as well. For getting the string length back from that value just use
+ // "MaxSize - str[LenPos]".
+ // This allows to store 13-chars strings in 32-bit mode, 21-chars strings in 64-bit mode,
+ // 13-chars strings for RAPIDJSON_48BITPOINTER_OPTIMIZATION=1 inline (for `UTF8`-encoded strings).
+ struct ShortString {
+ enum { MaxChars = sizeof(static_cast<Flag*>(0)->payload) / sizeof(Ch), MaxSize = MaxChars - 1, LenPos = MaxSize };
+ Ch str[MaxChars];
+
+ inline static bool Usable(SizeType len) { return (MaxSize >= len); }
+ inline void SetLength(SizeType len) { str[LenPos] = static_cast<Ch>(MaxSize - len); }
+ inline SizeType GetLength() const { return static_cast<SizeType>(MaxSize - str[LenPos]); }
+ }; // at most as many bytes as "String" above => 12 bytes in 32-bit mode, 16 bytes in 64-bit mode
+
+ // By using proper binary layout, retrieval of different integer types do not need conversions.
+ union Number {
+#if RAPIDJSON_ENDIAN == RAPIDJSON_LITTLEENDIAN
+ struct I {
+ int i;
+ char padding[4];
+ }i;
+ struct U {
+ unsigned u;
+ char padding2[4];
+ }u;
+#else
+ struct I {
+ char padding[4];
+ int i;
+ }i;
+ struct U {
+ char padding2[4];
+ unsigned u;
+ }u;
+#endif
+ int64_t i64;
+ uint64_t u64;
+ double d;
+ }; // 8 bytes
+
+ struct ObjectData {
+ SizeType size;
+ SizeType capacity;
+ Member* members;
+ }; // 12 bytes in 32-bit mode, 16 bytes in 64-bit mode
+
+ struct ArrayData {
+ SizeType size;
+ SizeType capacity;
+ GenericValue* elements;
+ }; // 12 bytes in 32-bit mode, 16 bytes in 64-bit mode
+
+ union Data {
+ String s;
+ ShortString ss;
+ Number n;
+ ObjectData o;
+ ArrayData a;
+ Flag f;
+ }; // 16 bytes in 32-bit mode, 24 bytes in 64-bit mode, 16 bytes in 64-bit with RAPIDJSON_48BITPOINTER_OPTIMIZATION
+
+ RAPIDJSON_FORCEINLINE const Ch* GetStringPointer() const { return RAPIDJSON_GETPOINTER(Ch, data_.s.str); }
+ RAPIDJSON_FORCEINLINE const Ch* SetStringPointer(const Ch* str) { return RAPIDJSON_SETPOINTER(Ch, data_.s.str, str); }
+ RAPIDJSON_FORCEINLINE GenericValue* GetElementsPointer() const { return RAPIDJSON_GETPOINTER(GenericValue, data_.a.elements); }
+ RAPIDJSON_FORCEINLINE GenericValue* SetElementsPointer(GenericValue* elements) { return RAPIDJSON_SETPOINTER(GenericValue, data_.a.elements, elements); }
+ RAPIDJSON_FORCEINLINE Member* GetMembersPointer() const { return RAPIDJSON_GETPOINTER(Member, data_.o.members); }
+ RAPIDJSON_FORCEINLINE Member* SetMembersPointer(Member* members) { return RAPIDJSON_SETPOINTER(Member, data_.o.members, members); }
+
+ // Initialize this value as array with initial data, without calling destructor.
+ void SetArrayRaw(GenericValue* values, SizeType count, Allocator& allocator) {
+ data_.f.flags = kArrayFlag;
+ if (count) {
+ GenericValue* e = static_cast<GenericValue*>(allocator.Malloc(count * sizeof(GenericValue)));
+ SetElementsPointer(e);
+ std::memcpy(static_cast<void*>(e), values, count * sizeof(GenericValue));
+ }
+ else
+ SetElementsPointer(0);
+ data_.a.size = data_.a.capacity = count;
+ }
+
+ //! Initialize this value as object with initial data, without calling destructor.
+ void SetObjectRaw(Member* members, SizeType count, Allocator& allocator) {
+ data_.f.flags = kObjectFlag;
+ if (count) {
+ Member* m = static_cast<Member*>(allocator.Malloc(count * sizeof(Member)));
+ SetMembersPointer(m);
+ std::memcpy(static_cast<void*>(m), members, count * sizeof(Member));
+ }
+ else
+ SetMembersPointer(0);
+ data_.o.size = data_.o.capacity = count;
+ }
+
+ //! Initialize this value as constant string, without calling destructor.
+ void SetStringRaw(StringRefType s) RAPIDJSON_NOEXCEPT {
+ data_.f.flags = kConstStringFlag;
+ SetStringPointer(s);
+ data_.s.length = s.length;
+ }
+
+ //! Initialize this value as copy string with initial data, without calling destructor.
+ void SetStringRaw(StringRefType s, Allocator& allocator) {
+ Ch* str = 0;
+ if (ShortString::Usable(s.length)) {
+ data_.f.flags = kShortStringFlag;
+ data_.ss.SetLength(s.length);
+ str = data_.ss.str;
+ } else {
+ data_.f.flags = kCopyStringFlag;
+ data_.s.length = s.length;
+ str = static_cast<Ch *>(allocator.Malloc((s.length + 1) * sizeof(Ch)));
+ SetStringPointer(str);
+ }
+ std::memcpy(str, s, s.length * sizeof(Ch));
+ str[s.length] = '\0';
+ }
+
+ //! Assignment without calling destructor
+ void RawAssign(GenericValue& rhs) RAPIDJSON_NOEXCEPT {
+ data_ = rhs.data_;
+ // data_.f.flags = rhs.data_.f.flags;
+ rhs.data_.f.flags = kNullFlag;
+ }
+
+ template <typename SourceAllocator>
+ bool StringEqual(const GenericValue<Encoding, SourceAllocator>& rhs) const {
+ RAPIDJSON_ASSERT(IsString());
+ RAPIDJSON_ASSERT(rhs.IsString());
+
+ const SizeType len1 = GetStringLength();
+ const SizeType len2 = rhs.GetStringLength();
+ if(len1 != len2) { return false; }
+
+ const Ch* const str1 = GetString();
+ const Ch* const str2 = rhs.GetString();
+ if(str1 == str2) { return true; } // fast path for constant string
+
+ return (std::memcmp(str1, str2, sizeof(Ch) * len1) == 0);
+ }
+
+ Data data_;
+};
+
+//! GenericValue with UTF8 encoding
+typedef GenericValue<UTF8<> > Value;
+
+///////////////////////////////////////////////////////////////////////////////
+// GenericDocument
+
+//! A document for parsing JSON text as DOM.
+/*!
+ \note implements Handler concept
+ \tparam Encoding Encoding for both parsing and string storage.
+ \tparam Allocator Allocator for allocating memory for the DOM
+ \tparam StackAllocator Allocator for allocating memory for stack during parsing.
+ \warning Although GenericDocument inherits from GenericValue, the API does \b not provide any virtual functions, especially no virtual destructor. To avoid memory leaks, do not \c delete a GenericDocument object via a pointer to a GenericValue.
+*/
+template <typename Encoding, typename Allocator = RAPIDJSON_DEFAULT_ALLOCATOR, typename StackAllocator = RAPIDJSON_DEFAULT_STACK_ALLOCATOR >
+class GenericDocument : public GenericValue<Encoding, Allocator> {
+public:
+ typedef typename Encoding::Ch Ch; //!< Character type derived from Encoding.
+ typedef GenericValue<Encoding, Allocator> ValueType; //!< Value type of the document.
+ typedef Allocator AllocatorType; //!< Allocator type from template parameter.
+
+ //! Constructor
+ /*! Creates an empty document of specified type.
+ \param type Mandatory type of object to create.
+ \param allocator Optional allocator for allocating memory.
+ \param stackCapacity Optional initial capacity of stack in bytes.
+ \param stackAllocator Optional allocator for allocating memory for stack.
+ */
+ explicit GenericDocument(Type type, Allocator* allocator = 0, size_t stackCapacity = kDefaultStackCapacity, StackAllocator* stackAllocator = 0) :
+ GenericValue<Encoding, Allocator>(type), allocator_(allocator), ownAllocator_(0), stack_(stackAllocator, stackCapacity), parseResult_()
+ {
+ if (!allocator_)
+ ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)();
+ }
+
+ //! Constructor
+ /*! Creates an empty document which type is Null.
+ \param allocator Optional allocator for allocating memory.
+ \param stackCapacity Optional initial capacity of stack in bytes.
+ \param stackAllocator Optional allocator for allocating memory for stack.
+ */
+ GenericDocument(Allocator* allocator = 0, size_t stackCapacity = kDefaultStackCapacity, StackAllocator* stackAllocator = 0) :
+ allocator_(allocator), ownAllocator_(0), stack_(stackAllocator, stackCapacity), parseResult_()
+ {
+ if (!allocator_)
+ ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)();
+ }
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ //! Move constructor in C++11
+ GenericDocument(GenericDocument&& rhs) RAPIDJSON_NOEXCEPT
+ : ValueType(std::forward<ValueType>(rhs)), // explicit cast to avoid prohibited move from Document
+ allocator_(rhs.allocator_),
+ ownAllocator_(rhs.ownAllocator_),
+ stack_(std::move(rhs.stack_)),
+ parseResult_(rhs.parseResult_)
+ {
+ rhs.allocator_ = 0;
+ rhs.ownAllocator_ = 0;
+ rhs.parseResult_ = ParseResult();
+ }
+#endif
+
+ ~GenericDocument() {
+ Destroy();
+ }
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ //! Move assignment in C++11
+ GenericDocument& operator=(GenericDocument&& rhs) RAPIDJSON_NOEXCEPT
+ {
+ // The cast to ValueType is necessary here, because otherwise it would
+ // attempt to call GenericValue's templated assignment operator.
+ ValueType::operator=(std::forward<ValueType>(rhs));
+
+ // Calling the destructor here would prematurely call stack_'s destructor
+ Destroy();
+
+ allocator_ = rhs.allocator_;
+ ownAllocator_ = rhs.ownAllocator_;
+ stack_ = std::move(rhs.stack_);
+ parseResult_ = rhs.parseResult_;
+
+ rhs.allocator_ = 0;
+ rhs.ownAllocator_ = 0;
+ rhs.parseResult_ = ParseResult();
+
+ return *this;
+ }
+#endif
+
+ //! Exchange the contents of this document with those of another.
+ /*!
+ \param rhs Another document.
+ \note Constant complexity.
+ \see GenericValue::Swap
+ */
+ GenericDocument& Swap(GenericDocument& rhs) RAPIDJSON_NOEXCEPT {
+ ValueType::Swap(rhs);
+ stack_.Swap(rhs.stack_);
+ internal::Swap(allocator_, rhs.allocator_);
+ internal::Swap(ownAllocator_, rhs.ownAllocator_);
+ internal::Swap(parseResult_, rhs.parseResult_);
+ return *this;
+ }
+
+ // Allow Swap with ValueType.
+ // Refer to Effective C++ 3rd Edition/Item 33: Avoid hiding inherited names.
+ using ValueType::Swap;
+
+ //! free-standing swap function helper
+ /*!
+ Helper function to enable support for common swap implementation pattern based on \c std::swap:
+ \code
+ void swap(MyClass& a, MyClass& b) {
+ using std::swap;
+ swap(a.doc, b.doc);
+ // ...
+ }
+ \endcode
+ \see Swap()
+ */
+ friend inline void swap(GenericDocument& a, GenericDocument& b) RAPIDJSON_NOEXCEPT { a.Swap(b); }
+
+ //! Populate this document by a generator which produces SAX events.
+ /*! \tparam Generator A functor with <tt>bool f(Handler)</tt> prototype.
+ \param g Generator functor which sends SAX events to the parameter.
+ \return The document itself for fluent API.
+ */
+ template <typename Generator>
+ GenericDocument& Populate(Generator& g) {
+ ClearStackOnExit scope(*this);
+ if (g(*this)) {
+ RAPIDJSON_ASSERT(stack_.GetSize() == sizeof(ValueType)); // Got one and only one root object
+ ValueType::operator=(*stack_.template Pop<ValueType>(1));// Move value from stack to document
+ }
+ return *this;
+ }
+
+ //!@name Parse from stream
+ //!@{
+
+ //! Parse JSON text from an input stream (with Encoding conversion)
+ /*! \tparam parseFlags Combination of \ref ParseFlag.
+ \tparam SourceEncoding Encoding of input stream
+ \tparam InputStream Type of input stream, implementing Stream concept
+ \param is Input stream to be parsed.
+ \return The document itself for fluent API.
+ */
+ template <unsigned parseFlags, typename SourceEncoding, typename InputStream>
+ GenericDocument& ParseStream(InputStream& is) {
+ GenericReader<SourceEncoding, Encoding, StackAllocator> reader(
+ stack_.HasAllocator() ? &stack_.GetAllocator() : 0);
+ ClearStackOnExit scope(*this);
+ parseResult_ = reader.template Parse<parseFlags>(is, *this);
+ if (parseResult_) {
+ RAPIDJSON_ASSERT(stack_.GetSize() == sizeof(ValueType)); // Got one and only one root object
+ ValueType::operator=(*stack_.template Pop<ValueType>(1));// Move value from stack to document
+ }
+ return *this;
+ }
+
+ //! Parse JSON text from an input stream
+ /*! \tparam parseFlags Combination of \ref ParseFlag.
+ \tparam InputStream Type of input stream, implementing Stream concept
+ \param is Input stream to be parsed.
+ \return The document itself for fluent API.
+ */
+ template <unsigned parseFlags, typename InputStream>
+ GenericDocument& ParseStream(InputStream& is) {
+ return ParseStream<parseFlags, Encoding, InputStream>(is);
+ }
+
+ //! Parse JSON text from an input stream (with \ref kParseDefaultFlags)
+ /*! \tparam InputStream Type of input stream, implementing Stream concept
+ \param is Input stream to be parsed.
+ \return The document itself for fluent API.
+ */
+ template <typename InputStream>
+ GenericDocument& ParseStream(InputStream& is) {
+ return ParseStream<kParseDefaultFlags, Encoding, InputStream>(is);
+ }
+ //!@}
+
+ //!@name Parse in-place from mutable string
+ //!@{
+
+ //! Parse JSON text from a mutable string
+ /*! \tparam parseFlags Combination of \ref ParseFlag.
+ \param str Mutable zero-terminated string to be parsed.
+ \return The document itself for fluent API.
+ */
+ template <unsigned parseFlags>
+ GenericDocument& ParseInsitu(Ch* str) {
+ GenericInsituStringStream<Encoding> s(str);
+ return ParseStream<parseFlags | kParseInsituFlag>(s);
+ }
+
+ //! Parse JSON text from a mutable string (with \ref kParseDefaultFlags)
+ /*! \param str Mutable zero-terminated string to be parsed.
+ \return The document itself for fluent API.
+ */
+ GenericDocument& ParseInsitu(Ch* str) {
+ return ParseInsitu<kParseDefaultFlags>(str);
+ }
+ //!@}
+
+ //!@name Parse from read-only string
+ //!@{
+
+ //! Parse JSON text from a read-only string (with Encoding conversion)
+ /*! \tparam parseFlags Combination of \ref ParseFlag (must not contain \ref kParseInsituFlag).
+ \tparam SourceEncoding Transcoding from input Encoding
+ \param str Read-only zero-terminated string to be parsed.
+ */
+ template <unsigned parseFlags, typename SourceEncoding>
+ GenericDocument& Parse(const typename SourceEncoding::Ch* str) {
+ RAPIDJSON_ASSERT(!(parseFlags & kParseInsituFlag));
+ GenericStringStream<SourceEncoding> s(str);
+ return ParseStream<parseFlags, SourceEncoding>(s);
+ }
+
+ //! Parse JSON text from a read-only string
+ /*! \tparam parseFlags Combination of \ref ParseFlag (must not contain \ref kParseInsituFlag).
+ \param str Read-only zero-terminated string to be parsed.
+ */
+ template <unsigned parseFlags>
+ GenericDocument& Parse(const Ch* str) {
+ return Parse<parseFlags, Encoding>(str);
+ }
+
+ //! Parse JSON text from a read-only string (with \ref kParseDefaultFlags)
+ /*! \param str Read-only zero-terminated string to be parsed.
+ */
+ GenericDocument& Parse(const Ch* str) {
+ return Parse<kParseDefaultFlags>(str);
+ }
+
+ template <unsigned parseFlags, typename SourceEncoding>
+ GenericDocument& Parse(const typename SourceEncoding::Ch* str, size_t length) {
+ RAPIDJSON_ASSERT(!(parseFlags & kParseInsituFlag));
+ MemoryStream ms(reinterpret_cast<const char*>(str), length * sizeof(typename SourceEncoding::Ch));
+ EncodedInputStream<SourceEncoding, MemoryStream> is(ms);
+ ParseStream<parseFlags, SourceEncoding>(is);
+ return *this;
+ }
+
+ template <unsigned parseFlags>
+ GenericDocument& Parse(const Ch* str, size_t length) {
+ return Parse<parseFlags, Encoding>(str, length);
+ }
+
+ GenericDocument& Parse(const Ch* str, size_t length) {
+ return Parse<kParseDefaultFlags>(str, length);
+ }
+
+#if RAPIDJSON_HAS_STDSTRING
+ template <unsigned parseFlags, typename SourceEncoding>
+ GenericDocument& Parse(const std::basic_string<typename SourceEncoding::Ch>& str) {
+ // c_str() is constant complexity according to standard. Should be faster than Parse(const char*, size_t)
+ return Parse<parseFlags, SourceEncoding>(str.c_str());
+ }
+
+ template <unsigned parseFlags>
+ GenericDocument& Parse(const std::basic_string<Ch>& str) {
+ return Parse<parseFlags, Encoding>(str.c_str());
+ }
+
+ GenericDocument& Parse(const std::basic_string<Ch>& str) {
+ return Parse<kParseDefaultFlags>(str);
+ }
+#endif // RAPIDJSON_HAS_STDSTRING
+
+ //!@}
+
+ //!@name Handling parse errors
+ //!@{
+
+ //! Whether a parse error has occurred in the last parsing.
+ bool HasParseError() const { return parseResult_.IsError(); }
+
+ //! Get the \ref ParseErrorCode of last parsing.
+ ParseErrorCode GetParseError() const { return parseResult_.Code(); }
+
+ //! Get the position of last parsing error in input, 0 otherwise.
+ size_t GetErrorOffset() const { return parseResult_.Offset(); }
+
+ //! Implicit conversion to get the last parse result
+#ifndef __clang // -Wdocumentation
+ /*! \return \ref ParseResult of the last parse operation
+
+ \code
+ Document doc;
+ ParseResult ok = doc.Parse(json);
+ if (!ok)
+ printf( "JSON parse error: %s (%u)\n", GetParseError_En(ok.Code()), ok.Offset());
+ \endcode
+ */
+#endif
+ operator ParseResult() const { return parseResult_; }
+ //!@}
+
+ //! Get the allocator of this document.
+ Allocator& GetAllocator() {
+ RAPIDJSON_ASSERT(allocator_);
+ return *allocator_;
+ }
+
+ //! Get the capacity of stack in bytes.
+ size_t GetStackCapacity() const { return stack_.GetCapacity(); }
+
+private:
+ // clear stack on any exit from ParseStream, e.g. due to exception
+ struct ClearStackOnExit {
+ explicit ClearStackOnExit(GenericDocument& d) : d_(d) {}
+ ~ClearStackOnExit() { d_.ClearStack(); }
+ private:
+ ClearStackOnExit(const ClearStackOnExit&);
+ ClearStackOnExit& operator=(const ClearStackOnExit&);
+ GenericDocument& d_;
+ };
+
+ // callers of the following private Handler functions
+ // template <typename,typename,typename> friend class GenericReader; // for parsing
+ template <typename, typename> friend class GenericValue; // for deep copying
+
+public:
+ // Implementation of Handler
+ bool Null() { new (stack_.template Push<ValueType>()) ValueType(); return true; }
+ bool Bool(bool b) { new (stack_.template Push<ValueType>()) ValueType(b); return true; }
+ bool Int(int i) { new (stack_.template Push<ValueType>()) ValueType(i); return true; }
+ bool Uint(unsigned i) { new (stack_.template Push<ValueType>()) ValueType(i); return true; }
+ bool Int64(int64_t i) { new (stack_.template Push<ValueType>()) ValueType(i); return true; }
+ bool Uint64(uint64_t i) { new (stack_.template Push<ValueType>()) ValueType(i); return true; }
+ bool Double(double d) { new (stack_.template Push<ValueType>()) ValueType(d); return true; }
+
+ bool RawNumber(const Ch* str, SizeType length, bool copy) {
+ if (copy)
+ new (stack_.template Push<ValueType>()) ValueType(str, length, GetAllocator());
+ else
+ new (stack_.template Push<ValueType>()) ValueType(str, length);
+ return true;
+ }
+
+ bool String(const Ch* str, SizeType length, bool copy) {
+ if (copy)
+ new (stack_.template Push<ValueType>()) ValueType(str, length, GetAllocator());
+ else
+ new (stack_.template Push<ValueType>()) ValueType(str, length);
+ return true;
+ }
+
+ bool StartObject() { new (stack_.template Push<ValueType>()) ValueType(kObjectType); return true; }
+
+ bool Key(const Ch* str, SizeType length, bool copy) { return String(str, length, copy); }
+
+ bool EndObject(SizeType memberCount) {
+ typename ValueType::Member* members = stack_.template Pop<typename ValueType::Member>(memberCount);
+ stack_.template Top<ValueType>()->SetObjectRaw(members, memberCount, GetAllocator());
+ return true;
+ }
+
+ bool StartArray() { new (stack_.template Push<ValueType>()) ValueType(kArrayType); return true; }
+
+ bool EndArray(SizeType elementCount) {
+ ValueType* elements = stack_.template Pop<ValueType>(elementCount);
+ stack_.template Top<ValueType>()->SetArrayRaw(elements, elementCount, GetAllocator());
+ return true;
+ }
+
+private:
+ //! Prohibit copying
+ GenericDocument(const GenericDocument&);
+ //! Prohibit assignment
+ GenericDocument& operator=(const GenericDocument&);
+
+ void ClearStack() {
+ if (Allocator::kNeedFree)
+ while (stack_.GetSize() > 0) // Here assumes all elements in stack array are GenericValue (Member is actually 2 GenericValue objects)
+ (stack_.template Pop<ValueType>(1))->~ValueType();
+ else
+ stack_.Clear();
+ stack_.ShrinkToFit();
+ }
+
+ void Destroy() {
+ RAPIDJSON_DELETE(ownAllocator_);
+ }
+
+ static const size_t kDefaultStackCapacity = 1024;
+ Allocator* allocator_;
+ Allocator* ownAllocator_;
+ internal::Stack<StackAllocator> stack_;
+ ParseResult parseResult_;
+};
+
+//! GenericDocument with UTF8 encoding
+typedef GenericDocument<UTF8<> > Document;
+
+
+//! Helper class for accessing Value of array type.
+/*!
+ Instance of this helper class is obtained by \c GenericValue::GetArray().
+ In addition to all APIs for array type, it provides range-based for loop if \c RAPIDJSON_HAS_CXX11_RANGE_FOR=1.
+*/
+template <bool Const, typename ValueT>
+class GenericArray {
+public:
+ typedef GenericArray<true, ValueT> ConstArray;
+ typedef GenericArray<false, ValueT> Array;
+ typedef ValueT PlainType;
+ typedef typename internal::MaybeAddConst<Const,PlainType>::Type ValueType;
+ typedef ValueType* ValueIterator; // This may be const or non-const iterator
+ typedef const ValueT* ConstValueIterator;
+ typedef typename ValueType::AllocatorType AllocatorType;
+ typedef typename ValueType::StringRefType StringRefType;
+
+ template <typename, typename>
+ friend class GenericValue;
+
+ GenericArray(const GenericArray& rhs) : value_(rhs.value_) {}
+ GenericArray& operator=(const GenericArray& rhs) { value_ = rhs.value_; return *this; }
+ ~GenericArray() {}
+
+ SizeType Size() const { return value_.Size(); }
+ SizeType Capacity() const { return value_.Capacity(); }
+ bool Empty() const { return value_.Empty(); }
+ void Clear() const { value_.Clear(); }
+ ValueType& operator[](SizeType index) const { return value_[index]; }
+ ValueIterator Begin() const { return value_.Begin(); }
+ ValueIterator End() const { return value_.End(); }
+ GenericArray Reserve(SizeType newCapacity, AllocatorType &allocator) const { value_.Reserve(newCapacity, allocator); return *this; }
+ GenericArray PushBack(ValueType& value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; }
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ GenericArray PushBack(ValueType&& value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; }
+#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ GenericArray PushBack(StringRefType value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; }
+ template <typename T> RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (const GenericArray&)) PushBack(T value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; }
+ GenericArray PopBack() const { value_.PopBack(); return *this; }
+ ValueIterator Erase(ConstValueIterator pos) const { return value_.Erase(pos); }
+ ValueIterator Erase(ConstValueIterator first, ConstValueIterator last) const { return value_.Erase(first, last); }
+
+#if RAPIDJSON_HAS_CXX11_RANGE_FOR
+ ValueIterator begin() const { return value_.Begin(); }
+ ValueIterator end() const { return value_.End(); }
+#endif
+
+private:
+ GenericArray();
+ GenericArray(ValueType& value) : value_(value) {}
+ ValueType& value_;
+};
+
+//! Helper class for accessing Value of object type.
+/*!
+ Instance of this helper class is obtained by \c GenericValue::GetObject().
+ In addition to all APIs for array type, it provides range-based for loop if \c RAPIDJSON_HAS_CXX11_RANGE_FOR=1.
+*/
+template <bool Const, typename ValueT>
+class GenericObject {
+public:
+ typedef GenericObject<true, ValueT> ConstObject;
+ typedef GenericObject<false, ValueT> Object;
+ typedef ValueT PlainType;
+ typedef typename internal::MaybeAddConst<Const,PlainType>::Type ValueType;
+ typedef GenericMemberIterator<Const, typename ValueT::EncodingType, typename ValueT::AllocatorType> MemberIterator; // This may be const or non-const iterator
+ typedef GenericMemberIterator<true, typename ValueT::EncodingType, typename ValueT::AllocatorType> ConstMemberIterator;
+ typedef typename ValueType::AllocatorType AllocatorType;
+ typedef typename ValueType::StringRefType StringRefType;
+ typedef typename ValueType::EncodingType EncodingType;
+ typedef typename ValueType::Ch Ch;
+
+ template <typename, typename>
+ friend class GenericValue;
+
+ GenericObject(const GenericObject& rhs) : value_(rhs.value_) {}
+ GenericObject& operator=(const GenericObject& rhs) { value_ = rhs.value_; return *this; }
+ ~GenericObject() {}
+
+ SizeType MemberCount() const { return value_.MemberCount(); }
+ SizeType MemberCapacity() const { return value_.MemberCapacity(); }
+ bool ObjectEmpty() const { return value_.ObjectEmpty(); }
+ template <typename T> ValueType& operator[](T* name) const { return value_[name]; }
+ template <typename SourceAllocator> ValueType& operator[](const GenericValue<EncodingType, SourceAllocator>& name) const { return value_[name]; }
+#if RAPIDJSON_HAS_STDSTRING
+ ValueType& operator[](const std::basic_string<Ch>& name) const { return value_[name]; }
+#endif
+ MemberIterator MemberBegin() const { return value_.MemberBegin(); }
+ MemberIterator MemberEnd() const { return value_.MemberEnd(); }
+ GenericObject MemberReserve(SizeType newCapacity, AllocatorType &allocator) const { value_.MemberReserve(newCapacity, allocator); return *this; }
+ bool HasMember(const Ch* name) const { return value_.HasMember(name); }
+#if RAPIDJSON_HAS_STDSTRING
+ bool HasMember(const std::basic_string<Ch>& name) const { return value_.HasMember(name); }
+#endif
+ template <typename SourceAllocator> bool HasMember(const GenericValue<EncodingType, SourceAllocator>& name) const { return value_.HasMember(name); }
+ MemberIterator FindMember(const Ch* name) const { return value_.FindMember(name); }
+ template <typename SourceAllocator> MemberIterator FindMember(const GenericValue<EncodingType, SourceAllocator>& name) const { return value_.FindMember(name); }
+#if RAPIDJSON_HAS_STDSTRING
+ MemberIterator FindMember(const std::basic_string<Ch>& name) const { return value_.FindMember(name); }
+#endif
+ GenericObject AddMember(ValueType& name, ValueType& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; }
+ GenericObject AddMember(ValueType& name, StringRefType value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; }
+#if RAPIDJSON_HAS_STDSTRING
+ GenericObject AddMember(ValueType& name, std::basic_string<Ch>& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; }
+#endif
+ template <typename T> RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (ValueType&)) AddMember(ValueType& name, T value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; }
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ GenericObject AddMember(ValueType&& name, ValueType&& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; }
+ GenericObject AddMember(ValueType&& name, ValueType& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; }
+ GenericObject AddMember(ValueType& name, ValueType&& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; }
+ GenericObject AddMember(StringRefType name, ValueType&& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; }
+#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ GenericObject AddMember(StringRefType name, ValueType& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; }
+ GenericObject AddMember(StringRefType name, StringRefType value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; }
+ template <typename T> RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (GenericObject)) AddMember(StringRefType name, T value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; }
+ void RemoveAllMembers() { value_.RemoveAllMembers(); }
+ bool RemoveMember(const Ch* name) const { return value_.RemoveMember(name); }
+#if RAPIDJSON_HAS_STDSTRING
+ bool RemoveMember(const std::basic_string<Ch>& name) const { return value_.RemoveMember(name); }
+#endif
+ template <typename SourceAllocator> bool RemoveMember(const GenericValue<EncodingType, SourceAllocator>& name) const { return value_.RemoveMember(name); }
+ MemberIterator RemoveMember(MemberIterator m) const { return value_.RemoveMember(m); }
+ MemberIterator EraseMember(ConstMemberIterator pos) const { return value_.EraseMember(pos); }
+ MemberIterator EraseMember(ConstMemberIterator first, ConstMemberIterator last) const { return value_.EraseMember(first, last); }
+ bool EraseMember(const Ch* name) const { return value_.EraseMember(name); }
+#if RAPIDJSON_HAS_STDSTRING
+ bool EraseMember(const std::basic_string<Ch>& name) const { return EraseMember(ValueType(StringRef(name))); }
+#endif
+ template <typename SourceAllocator> bool EraseMember(const GenericValue<EncodingType, SourceAllocator>& name) const { return value_.EraseMember(name); }
+
+#if RAPIDJSON_HAS_CXX11_RANGE_FOR
+ MemberIterator begin() const { return value_.MemberBegin(); }
+ MemberIterator end() const { return value_.MemberEnd(); }
+#endif
+
+private:
+ GenericObject();
+ GenericObject(ValueType& value) : value_(value) {}
+ ValueType& value_;
+};
+
+RAPIDJSON_NAMESPACE_END
+RAPIDJSON_DIAG_POP
+
+#endif // RAPIDJSON_DOCUMENT_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_encodedstream.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_encodedstream.h
new file mode 100644
index 00000000..98dc96dc
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_encodedstream.h
@@ -0,0 +1,299 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_ENCODEDSTREAM_H_
+#define RAPIDJSON_ENCODEDSTREAM_H_
+
+#include "lottie_rapidjson_stream.h"
+#include "lottie_rapidjson_memorystream.h"
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(padded)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+//! Input byte stream wrapper with a statically bound encoding.
+/*!
+ \tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE.
+ \tparam InputByteStream Type of input byte stream. For example, FileReadStream.
+*/
+template <typename Encoding, typename InputByteStream>
+class EncodedInputStream {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
+public:
+ typedef typename Encoding::Ch Ch;
+
+ EncodedInputStream(InputByteStream& is) : is_(is) {
+ current_ = Encoding::TakeBOM(is_);
+ }
+
+ Ch Peek() const { return current_; }
+ Ch Take() { Ch c = current_; current_ = Encoding::Take(is_); return c; }
+ size_t Tell() const { return is_.Tell(); }
+
+ // Not implemented
+ void Put(Ch) { RAPIDJSON_ASSERT(false); }
+ void Flush() { RAPIDJSON_ASSERT(false); }
+ Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
+ size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
+
+private:
+ EncodedInputStream(const EncodedInputStream&);
+ EncodedInputStream& operator=(const EncodedInputStream&);
+
+ InputByteStream& is_;
+ Ch current_;
+};
+
+//! Specialized for UTF8 MemoryStream.
+template <>
+class EncodedInputStream<UTF8<>, MemoryStream> {
+public:
+ typedef UTF8<>::Ch Ch;
+
+ EncodedInputStream(MemoryStream& is) : is_(is) {
+ if (static_cast<unsigned char>(is_.Peek()) == 0xEFu) is_.Take();
+ if (static_cast<unsigned char>(is_.Peek()) == 0xBBu) is_.Take();
+ if (static_cast<unsigned char>(is_.Peek()) == 0xBFu) is_.Take();
+ }
+ Ch Peek() const { return is_.Peek(); }
+ Ch Take() { return is_.Take(); }
+ size_t Tell() const { return is_.Tell(); }
+
+ // Not implemented
+ void Put(Ch) {}
+ void Flush() {}
+ Ch* PutBegin() { return 0; }
+ size_t PutEnd(Ch*) { return 0; }
+
+ MemoryStream& is_;
+
+private:
+ EncodedInputStream(const EncodedInputStream&);
+ EncodedInputStream& operator=(const EncodedInputStream&);
+};
+
+//! Output byte stream wrapper with statically bound encoding.
+/*!
+ \tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE.
+ \tparam OutputByteStream Type of input byte stream. For example, FileWriteStream.
+*/
+template <typename Encoding, typename OutputByteStream>
+class EncodedOutputStream {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
+public:
+ typedef typename Encoding::Ch Ch;
+
+ EncodedOutputStream(OutputByteStream& os, bool putBOM = true) : os_(os) {
+ if (putBOM)
+ Encoding::PutBOM(os_);
+ }
+
+ void Put(Ch c) { Encoding::Put(os_, c); }
+ void Flush() { os_.Flush(); }
+
+ // Not implemented
+ Ch Peek() const { RAPIDJSON_ASSERT(false); return 0;}
+ Ch Take() { RAPIDJSON_ASSERT(false); return 0;}
+ size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
+ Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
+ size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
+
+private:
+ EncodedOutputStream(const EncodedOutputStream&);
+ EncodedOutputStream& operator=(const EncodedOutputStream&);
+
+ OutputByteStream& os_;
+};
+
+#define RAPIDJSON_ENCODINGS_FUNC(x) UTF8<Ch>::x, UTF16LE<Ch>::x, UTF16BE<Ch>::x, UTF32LE<Ch>::x, UTF32BE<Ch>::x
+
+//! Input stream wrapper with dynamically bound encoding and automatic encoding detection.
+/*!
+ \tparam CharType Type of character for reading.
+ \tparam InputByteStream type of input byte stream to be wrapped.
+*/
+template <typename CharType, typename InputByteStream>
+class AutoUTFInputStream {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
+public:
+ typedef CharType Ch;
+
+ //! Constructor.
+ /*!
+ \param is input stream to be wrapped.
+ \param type UTF encoding type if it is not detected from the stream.
+ */
+ AutoUTFInputStream(InputByteStream& is, UTFType type = kUTF8) : is_(&is), type_(type), hasBOM_(false) {
+ RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE);
+ DetectType();
+ static const TakeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Take) };
+ takeFunc_ = f[type_];
+ current_ = takeFunc_(*is_);
+ }
+
+ UTFType GetType() const { return type_; }
+ bool HasBOM() const { return hasBOM_; }
+
+ Ch Peek() const { return current_; }
+ Ch Take() { Ch c = current_; current_ = takeFunc_(*is_); return c; }
+ size_t Tell() const { return is_->Tell(); }
+
+ // Not implemented
+ void Put(Ch) { RAPIDJSON_ASSERT(false); }
+ void Flush() { RAPIDJSON_ASSERT(false); }
+ Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
+ size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
+
+private:
+ AutoUTFInputStream(const AutoUTFInputStream&);
+ AutoUTFInputStream& operator=(const AutoUTFInputStream&);
+
+ // Detect encoding type with BOM or RFC 4627
+ void DetectType() {
+ // BOM (Byte Order Mark):
+ // 00 00 FE FF UTF-32BE
+ // FF FE 00 00 UTF-32LE
+ // FE FF UTF-16BE
+ // FF FE UTF-16LE
+ // EF BB BF UTF-8
+
+ const unsigned char* c = reinterpret_cast<const unsigned char *>(is_->Peek4());
+ if (!c)
+ return;
+
+ unsigned bom = static_cast<unsigned>(c[0] | (c[1] << 8) | (c[2] << 16) | (c[3] << 24));
+ hasBOM_ = false;
+ if (bom == 0xFFFE0000) { type_ = kUTF32BE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); }
+ else if (bom == 0x0000FEFF) { type_ = kUTF32LE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); }
+ else if ((bom & 0xFFFF) == 0xFFFE) { type_ = kUTF16BE; hasBOM_ = true; is_->Take(); is_->Take(); }
+ else if ((bom & 0xFFFF) == 0xFEFF) { type_ = kUTF16LE; hasBOM_ = true; is_->Take(); is_->Take(); }
+ else if ((bom & 0xFFFFFF) == 0xBFBBEF) { type_ = kUTF8; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); }
+
+ // RFC 4627: Section 3
+ // "Since the first two characters of a JSON text will always be ASCII
+ // characters [RFC0020], it is possible to determine whether an octet
+ // stream is UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE) by looking
+ // at the pattern of nulls in the first four octets."
+ // 00 00 00 xx UTF-32BE
+ // 00 xx 00 xx UTF-16BE
+ // xx 00 00 00 UTF-32LE
+ // xx 00 xx 00 UTF-16LE
+ // xx xx xx xx UTF-8
+
+ if (!hasBOM_) {
+ int pattern = (c[0] ? 1 : 0) | (c[1] ? 2 : 0) | (c[2] ? 4 : 0) | (c[3] ? 8 : 0);
+ switch (pattern) {
+ case 0x08: type_ = kUTF32BE; break;
+ case 0x0A: type_ = kUTF16BE; break;
+ case 0x01: type_ = kUTF32LE; break;
+ case 0x05: type_ = kUTF16LE; break;
+ case 0x0F: type_ = kUTF8; break;
+ default: break; // Use type defined by user.
+ }
+ }
+
+ // Runtime check whether the size of character type is sufficient. It only perform checks with assertion.
+ if (type_ == kUTF16LE || type_ == kUTF16BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 2);
+ if (type_ == kUTF32LE || type_ == kUTF32BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 4);
+ }
+
+ typedef Ch (*TakeFunc)(InputByteStream& is);
+ InputByteStream* is_;
+ UTFType type_;
+ Ch current_;
+ TakeFunc takeFunc_;
+ bool hasBOM_;
+};
+
+//! Output stream wrapper with dynamically bound encoding and automatic encoding detection.
+/*!
+ \tparam CharType Type of character for writing.
+ \tparam OutputByteStream type of output byte stream to be wrapped.
+*/
+template <typename CharType, typename OutputByteStream>
+class AutoUTFOutputStream {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
+public:
+ typedef CharType Ch;
+
+ //! Constructor.
+ /*!
+ \param os output stream to be wrapped.
+ \param type UTF encoding type.
+ \param putBOM Whether to write BOM at the beginning of the stream.
+ */
+ AutoUTFOutputStream(OutputByteStream& os, UTFType type, bool putBOM) : os_(&os), type_(type) {
+ RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE);
+
+ // Runtime check whether the size of character type is sufficient. It only perform checks with assertion.
+ if (type_ == kUTF16LE || type_ == kUTF16BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 2);
+ if (type_ == kUTF32LE || type_ == kUTF32BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 4);
+
+ static const PutFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Put) };
+ putFunc_ = f[type_];
+
+ if (putBOM)
+ PutBOM();
+ }
+
+ UTFType GetType() const { return type_; }
+
+ void Put(Ch c) { putFunc_(*os_, c); }
+ void Flush() { os_->Flush(); }
+
+ // Not implemented
+ Ch Peek() const { RAPIDJSON_ASSERT(false); return 0;}
+ Ch Take() { RAPIDJSON_ASSERT(false); return 0;}
+ size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
+ Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
+ size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
+
+private:
+ AutoUTFOutputStream(const AutoUTFOutputStream&);
+ AutoUTFOutputStream& operator=(const AutoUTFOutputStream&);
+
+ void PutBOM() {
+ typedef void (*PutBOMFunc)(OutputByteStream&);
+ static const PutBOMFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(PutBOM) };
+ f[type_](*os_);
+ }
+
+ typedef void (*PutFunc)(OutputByteStream&, Ch);
+
+ OutputByteStream* os_;
+ UTFType type_;
+ PutFunc putFunc_;
+};
+
+#undef RAPIDJSON_ENCODINGS_FUNC
+
+RAPIDJSON_NAMESPACE_END
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+#endif
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_FILESTREAM_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_encodings.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_encodings.h
new file mode 100644
index 00000000..52089823
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_encodings.h
@@ -0,0 +1,716 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_ENCODINGS_H_
+#define RAPIDJSON_ENCODINGS_H_
+
+#include "lottie_rapidjson_rapidjson.h"
+
+#if defined(_MSC_VER) && !defined(__clang__)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(4244) // conversion from 'type1' to 'type2', possible loss of data
+RAPIDJSON_DIAG_OFF(4702) // unreachable code
+#elif defined(__GNUC__)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+RAPIDJSON_DIAG_OFF(overflow)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+///////////////////////////////////////////////////////////////////////////////
+// Encoding
+
+/*! \class rapidjson::Encoding
+ \brief Concept for encoding of Unicode characters.
+
+\code
+concept Encoding {
+ typename Ch; //! Type of character. A "character" is actually a code unit in unicode's definition.
+
+ enum { supportUnicode = 1 }; // or 0 if not supporting unicode
+
+ //! \brief Encode a Unicode codepoint to an output stream.
+ //! \param os Output stream.
+ //! \param codepoint An unicode codepoint, ranging from 0x0 to 0x10FFFF inclusively.
+ template<typename OutputStream>
+ static void Encode(OutputStream& os, unsigned codepoint);
+
+ //! \brief Decode a Unicode codepoint from an input stream.
+ //! \param is Input stream.
+ //! \param codepoint Output of the unicode codepoint.
+ //! \return true if a valid codepoint can be decoded from the stream.
+ template <typename InputStream>
+ static bool Decode(InputStream& is, unsigned* codepoint);
+
+ //! \brief Validate one Unicode codepoint from an encoded stream.
+ //! \param is Input stream to obtain codepoint.
+ //! \param os Output for copying one codepoint.
+ //! \return true if it is valid.
+ //! \note This function just validating and copying the codepoint without actually decode it.
+ template <typename InputStream, typename OutputStream>
+ static bool Validate(InputStream& is, OutputStream& os);
+
+ // The following functions are deal with byte streams.
+
+ //! Take a character from input byte stream, skip BOM if exist.
+ template <typename InputByteStream>
+ static CharType TakeBOM(InputByteStream& is);
+
+ //! Take a character from input byte stream.
+ template <typename InputByteStream>
+ static Ch Take(InputByteStream& is);
+
+ //! Put BOM to output byte stream.
+ template <typename OutputByteStream>
+ static void PutBOM(OutputByteStream& os);
+
+ //! Put a character to output byte stream.
+ template <typename OutputByteStream>
+ static void Put(OutputByteStream& os, Ch c);
+};
+\endcode
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// UTF8
+
+//! UTF-8 encoding.
+/*! http://en.wikipedia.org/wiki/UTF-8
+ http://tools.ietf.org/html/rfc3629
+ \tparam CharType Code unit for storing 8-bit UTF-8 data. Default is char.
+ \note implements Encoding concept
+*/
+template<typename CharType = char>
+struct UTF8 {
+ typedef CharType Ch;
+
+ enum { supportUnicode = 1 };
+
+ template<typename OutputStream>
+ static void Encode(OutputStream& os, unsigned codepoint) {
+ if (codepoint <= 0x7F)
+ os.Put(static_cast<Ch>(codepoint & 0xFF));
+ else if (codepoint <= 0x7FF) {
+ os.Put(static_cast<Ch>(0xC0 | ((codepoint >> 6) & 0xFF)));
+ os.Put(static_cast<Ch>(0x80 | ((codepoint & 0x3F))));
+ }
+ else if (codepoint <= 0xFFFF) {
+ os.Put(static_cast<Ch>(0xE0 | ((codepoint >> 12) & 0xFF)));
+ os.Put(static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F)));
+ os.Put(static_cast<Ch>(0x80 | (codepoint & 0x3F)));
+ }
+ else {
+ RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
+ os.Put(static_cast<Ch>(0xF0 | ((codepoint >> 18) & 0xFF)));
+ os.Put(static_cast<Ch>(0x80 | ((codepoint >> 12) & 0x3F)));
+ os.Put(static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F)));
+ os.Put(static_cast<Ch>(0x80 | (codepoint & 0x3F)));
+ }
+ }
+
+ template<typename OutputStream>
+ static void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
+ if (codepoint <= 0x7F)
+ PutUnsafe(os, static_cast<Ch>(codepoint & 0xFF));
+ else if (codepoint <= 0x7FF) {
+ PutUnsafe(os, static_cast<Ch>(0xC0 | ((codepoint >> 6) & 0xFF)));
+ PutUnsafe(os, static_cast<Ch>(0x80 | ((codepoint & 0x3F))));
+ }
+ else if (codepoint <= 0xFFFF) {
+ PutUnsafe(os, static_cast<Ch>(0xE0 | ((codepoint >> 12) & 0xFF)));
+ PutUnsafe(os, static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F)));
+ PutUnsafe(os, static_cast<Ch>(0x80 | (codepoint & 0x3F)));
+ }
+ else {
+ RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
+ PutUnsafe(os, static_cast<Ch>(0xF0 | ((codepoint >> 18) & 0xFF)));
+ PutUnsafe(os, static_cast<Ch>(0x80 | ((codepoint >> 12) & 0x3F)));
+ PutUnsafe(os, static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F)));
+ PutUnsafe(os, static_cast<Ch>(0x80 | (codepoint & 0x3F)));
+ }
+ }
+
+ template <typename InputStream>
+ static bool Decode(InputStream& is, unsigned* codepoint) {
+#define RAPIDJSON_COPY() c = is.Take(); *codepoint = (*codepoint << 6) | (static_cast<unsigned char>(c) & 0x3Fu)
+#define RAPIDJSON_TRANS(mask) result &= ((GetRange(static_cast<unsigned char>(c)) & mask) != 0)
+#define RAPIDJSON_TAIL() RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x70)
+ typename InputStream::Ch c = is.Take();
+ if (!(c & 0x80)) {
+ *codepoint = static_cast<unsigned char>(c);
+ return true;
+ }
+
+ unsigned char type = GetRange(static_cast<unsigned char>(c));
+ if (type >= 32) {
+ *codepoint = 0;
+ } else {
+ *codepoint = (0xFFu >> type) & static_cast<unsigned char>(c);
+ }
+ bool result = true;
+ switch (type) {
+ case 2: RAPIDJSON_TAIL(); return result;
+ case 3: RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result;
+ case 4: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x50); RAPIDJSON_TAIL(); return result;
+ case 5: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x10); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result;
+ case 6: RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result;
+ case 10: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x20); RAPIDJSON_TAIL(); return result;
+ case 11: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x60); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result;
+ default: return false;
+ }
+#undef RAPIDJSON_COPY
+#undef RAPIDJSON_TRANS
+#undef RAPIDJSON_TAIL
+ }
+
+ template <typename InputStream, typename OutputStream>
+ static bool Validate(InputStream& is, OutputStream& os) {
+#define RAPIDJSON_COPY() os.Put(c = is.Take())
+#define RAPIDJSON_TRANS(mask) result &= ((GetRange(static_cast<unsigned char>(c)) & mask) != 0)
+#define RAPIDJSON_TAIL() RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x70)
+ Ch c;
+ RAPIDJSON_COPY();
+ if (!(c & 0x80))
+ return true;
+
+ bool result = true;
+ switch (GetRange(static_cast<unsigned char>(c))) {
+ case 2: RAPIDJSON_TAIL(); return result;
+ case 3: RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result;
+ case 4: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x50); RAPIDJSON_TAIL(); return result;
+ case 5: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x10); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result;
+ case 6: RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result;
+ case 10: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x20); RAPIDJSON_TAIL(); return result;
+ case 11: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x60); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result;
+ default: return false;
+ }
+#undef RAPIDJSON_COPY
+#undef RAPIDJSON_TRANS
+#undef RAPIDJSON_TAIL
+ }
+
+ static unsigned char GetRange(unsigned char c) {
+ // Referring to DFA of http://bjoern.hoehrmann.de/utf-8/decoder/dfa/
+ // With new mapping 1 -> 0x10, 7 -> 0x20, 9 -> 0x40, such that AND operation can test multiple types.
+ static const unsigned char type[] = {
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
+ 0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,
+ 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
+ 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
+ 8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+ 10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8,
+ };
+ return type[c];
+ }
+
+ template <typename InputByteStream>
+ static CharType TakeBOM(InputByteStream& is) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
+ typename InputByteStream::Ch c = Take(is);
+ if (static_cast<unsigned char>(c) != 0xEFu) return c;
+ c = is.Take();
+ if (static_cast<unsigned char>(c) != 0xBBu) return c;
+ c = is.Take();
+ if (static_cast<unsigned char>(c) != 0xBFu) return c;
+ c = is.Take();
+ return c;
+ }
+
+ template <typename InputByteStream>
+ static Ch Take(InputByteStream& is) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
+ return static_cast<Ch>(is.Take());
+ }
+
+ template <typename OutputByteStream>
+ static void PutBOM(OutputByteStream& os) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
+ os.Put(static_cast<typename OutputByteStream::Ch>(0xEFu));
+ os.Put(static_cast<typename OutputByteStream::Ch>(0xBBu));
+ os.Put(static_cast<typename OutputByteStream::Ch>(0xBFu));
+ }
+
+ template <typename OutputByteStream>
+ static void Put(OutputByteStream& os, Ch c) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
+ os.Put(static_cast<typename OutputByteStream::Ch>(c));
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// UTF16
+
+//! UTF-16 encoding.
+/*! http://en.wikipedia.org/wiki/UTF-16
+ http://tools.ietf.org/html/rfc2781
+ \tparam CharType Type for storing 16-bit UTF-16 data. Default is wchar_t. C++11 may use char16_t instead.
+ \note implements Encoding concept
+
+ \note For in-memory access, no need to concern endianness. The code units and code points are represented by CPU's endianness.
+ For streaming, use UTF16LE and UTF16BE, which handle endianness.
+*/
+template<typename CharType = wchar_t>
+struct UTF16 {
+ typedef CharType Ch;
+ RAPIDJSON_STATIC_ASSERT(sizeof(Ch) >= 2);
+
+ enum { supportUnicode = 1 };
+
+ template<typename OutputStream>
+ static void Encode(OutputStream& os, unsigned codepoint) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2);
+ if (codepoint <= 0xFFFF) {
+ RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair
+ os.Put(static_cast<typename OutputStream::Ch>(codepoint));
+ }
+ else {
+ RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
+ unsigned v = codepoint - 0x10000;
+ os.Put(static_cast<typename OutputStream::Ch>((v >> 10) | 0xD800));
+ os.Put(static_cast<typename OutputStream::Ch>((v & 0x3FF) | 0xDC00));
+ }
+ }
+
+
+ template<typename OutputStream>
+ static void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2);
+ if (codepoint <= 0xFFFF) {
+ RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair
+ PutUnsafe(os, static_cast<typename OutputStream::Ch>(codepoint));
+ }
+ else {
+ RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
+ unsigned v = codepoint - 0x10000;
+ PutUnsafe(os, static_cast<typename OutputStream::Ch>((v >> 10) | 0xD800));
+ PutUnsafe(os, static_cast<typename OutputStream::Ch>((v & 0x3FF) | 0xDC00));
+ }
+ }
+
+ template <typename InputStream>
+ static bool Decode(InputStream& is, unsigned* codepoint) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 2);
+ typename InputStream::Ch c = is.Take();
+ if (c < 0xD800 || c > 0xDFFF) {
+ *codepoint = static_cast<unsigned>(c);
+ return true;
+ }
+ else if (c <= 0xDBFF) {
+ *codepoint = (static_cast<unsigned>(c) & 0x3FF) << 10;
+ c = is.Take();
+ *codepoint |= (static_cast<unsigned>(c) & 0x3FF);
+ *codepoint += 0x10000;
+ return c >= 0xDC00 && c <= 0xDFFF;
+ }
+ return false;
+ }
+
+ template <typename InputStream, typename OutputStream>
+ static bool Validate(InputStream& is, OutputStream& os) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 2);
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2);
+ typename InputStream::Ch c;
+ os.Put(static_cast<typename OutputStream::Ch>(c = is.Take()));
+ if (c < 0xD800 || c > 0xDFFF)
+ return true;
+ else if (c <= 0xDBFF) {
+ os.Put(c = is.Take());
+ return c >= 0xDC00 && c <= 0xDFFF;
+ }
+ return false;
+ }
+};
+
+//! UTF-16 little endian encoding.
+template<typename CharType = wchar_t>
+struct UTF16LE : UTF16<CharType> {
+ template <typename InputByteStream>
+ static CharType TakeBOM(InputByteStream& is) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
+ CharType c = Take(is);
+ return static_cast<uint16_t>(c) == 0xFEFFu ? Take(is) : c;
+ }
+
+ template <typename InputByteStream>
+ static CharType Take(InputByteStream& is) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
+ unsigned c = static_cast<uint8_t>(is.Take());
+ c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 8;
+ return static_cast<CharType>(c);
+ }
+
+ template <typename OutputByteStream>
+ static void PutBOM(OutputByteStream& os) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
+ os.Put(static_cast<typename OutputByteStream::Ch>(0xFFu));
+ os.Put(static_cast<typename OutputByteStream::Ch>(0xFEu));
+ }
+
+ template <typename OutputByteStream>
+ static void Put(OutputByteStream& os, CharType c) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
+ os.Put(static_cast<typename OutputByteStream::Ch>(static_cast<unsigned>(c) & 0xFFu));
+ os.Put(static_cast<typename OutputByteStream::Ch>((static_cast<unsigned>(c) >> 8) & 0xFFu));
+ }
+};
+
+//! UTF-16 big endian encoding.
+template<typename CharType = wchar_t>
+struct UTF16BE : UTF16<CharType> {
+ template <typename InputByteStream>
+ static CharType TakeBOM(InputByteStream& is) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
+ CharType c = Take(is);
+ return static_cast<uint16_t>(c) == 0xFEFFu ? Take(is) : c;
+ }
+
+ template <typename InputByteStream>
+ static CharType Take(InputByteStream& is) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
+ unsigned c = static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 8;
+ c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take()));
+ return static_cast<CharType>(c);
+ }
+
+ template <typename OutputByteStream>
+ static void PutBOM(OutputByteStream& os) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
+ os.Put(static_cast<typename OutputByteStream::Ch>(0xFEu));
+ os.Put(static_cast<typename OutputByteStream::Ch>(0xFFu));
+ }
+
+ template <typename OutputByteStream>
+ static void Put(OutputByteStream& os, CharType c) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
+ os.Put(static_cast<typename OutputByteStream::Ch>((static_cast<unsigned>(c) >> 8) & 0xFFu));
+ os.Put(static_cast<typename OutputByteStream::Ch>(static_cast<unsigned>(c) & 0xFFu));
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// UTF32
+
+//! UTF-32 encoding.
+/*! http://en.wikipedia.org/wiki/UTF-32
+ \tparam CharType Type for storing 32-bit UTF-32 data. Default is unsigned. C++11 may use char32_t instead.
+ \note implements Encoding concept
+
+ \note For in-memory access, no need to concern endianness. The code units and code points are represented by CPU's endianness.
+ For streaming, use UTF32LE and UTF32BE, which handle endianness.
+*/
+template<typename CharType = unsigned>
+struct UTF32 {
+ typedef CharType Ch;
+ RAPIDJSON_STATIC_ASSERT(sizeof(Ch) >= 4);
+
+ enum { supportUnicode = 1 };
+
+ template<typename OutputStream>
+ static void Encode(OutputStream& os, unsigned codepoint) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 4);
+ RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
+ os.Put(codepoint);
+ }
+
+ template<typename OutputStream>
+ static void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 4);
+ RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
+ PutUnsafe(os, codepoint);
+ }
+
+ template <typename InputStream>
+ static bool Decode(InputStream& is, unsigned* codepoint) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 4);
+ Ch c = is.Take();
+ *codepoint = c;
+ return c <= 0x10FFFF;
+ }
+
+ template <typename InputStream, typename OutputStream>
+ static bool Validate(InputStream& is, OutputStream& os) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 4);
+ Ch c;
+ os.Put(c = is.Take());
+ return c <= 0x10FFFF;
+ }
+};
+
+//! UTF-32 little endian enocoding.
+template<typename CharType = unsigned>
+struct UTF32LE : UTF32<CharType> {
+ template <typename InputByteStream>
+ static CharType TakeBOM(InputByteStream& is) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
+ CharType c = Take(is);
+ return static_cast<uint32_t>(c) == 0x0000FEFFu ? Take(is) : c;
+ }
+
+ template <typename InputByteStream>
+ static CharType Take(InputByteStream& is) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
+ unsigned c = static_cast<uint8_t>(is.Take());
+ c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 8;
+ c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 16;
+ c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 24;
+ return static_cast<CharType>(c);
+ }
+
+ template <typename OutputByteStream>
+ static void PutBOM(OutputByteStream& os) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
+ os.Put(static_cast<typename OutputByteStream::Ch>(0xFFu));
+ os.Put(static_cast<typename OutputByteStream::Ch>(0xFEu));
+ os.Put(static_cast<typename OutputByteStream::Ch>(0x00u));
+ os.Put(static_cast<typename OutputByteStream::Ch>(0x00u));
+ }
+
+ template <typename OutputByteStream>
+ static void Put(OutputByteStream& os, CharType c) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
+ os.Put(static_cast<typename OutputByteStream::Ch>(c & 0xFFu));
+ os.Put(static_cast<typename OutputByteStream::Ch>((c >> 8) & 0xFFu));
+ os.Put(static_cast<typename OutputByteStream::Ch>((c >> 16) & 0xFFu));
+ os.Put(static_cast<typename OutputByteStream::Ch>((c >> 24) & 0xFFu));
+ }
+};
+
+//! UTF-32 big endian encoding.
+template<typename CharType = unsigned>
+struct UTF32BE : UTF32<CharType> {
+ template <typename InputByteStream>
+ static CharType TakeBOM(InputByteStream& is) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
+ CharType c = Take(is);
+ return static_cast<uint32_t>(c) == 0x0000FEFFu ? Take(is) : c;
+ }
+
+ template <typename InputByteStream>
+ static CharType Take(InputByteStream& is) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
+ unsigned c = static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 24;
+ c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 16;
+ c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 8;
+ c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take()));
+ return static_cast<CharType>(c);
+ }
+
+ template <typename OutputByteStream>
+ static void PutBOM(OutputByteStream& os) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
+ os.Put(static_cast<typename OutputByteStream::Ch>(0x00u));
+ os.Put(static_cast<typename OutputByteStream::Ch>(0x00u));
+ os.Put(static_cast<typename OutputByteStream::Ch>(0xFEu));
+ os.Put(static_cast<typename OutputByteStream::Ch>(0xFFu));
+ }
+
+ template <typename OutputByteStream>
+ static void Put(OutputByteStream& os, CharType c) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
+ os.Put(static_cast<typename OutputByteStream::Ch>((c >> 24) & 0xFFu));
+ os.Put(static_cast<typename OutputByteStream::Ch>((c >> 16) & 0xFFu));
+ os.Put(static_cast<typename OutputByteStream::Ch>((c >> 8) & 0xFFu));
+ os.Put(static_cast<typename OutputByteStream::Ch>(c & 0xFFu));
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// ASCII
+
+//! ASCII encoding.
+/*! http://en.wikipedia.org/wiki/ASCII
+ \tparam CharType Code unit for storing 7-bit ASCII data. Default is char.
+ \note implements Encoding concept
+*/
+template<typename CharType = char>
+struct ASCII {
+ typedef CharType Ch;
+
+ enum { supportUnicode = 0 };
+
+ template<typename OutputStream>
+ static void Encode(OutputStream& os, unsigned codepoint) {
+ RAPIDJSON_ASSERT(codepoint <= 0x7F);
+ os.Put(static_cast<Ch>(codepoint & 0xFF));
+ }
+
+ template<typename OutputStream>
+ static void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
+ RAPIDJSON_ASSERT(codepoint <= 0x7F);
+ PutUnsafe(os, static_cast<Ch>(codepoint & 0xFF));
+ }
+
+ template <typename InputStream>
+ static bool Decode(InputStream& is, unsigned* codepoint) {
+ uint8_t c = static_cast<uint8_t>(is.Take());
+ *codepoint = c;
+ return c <= 0X7F;
+ }
+
+ template <typename InputStream, typename OutputStream>
+ static bool Validate(InputStream& is, OutputStream& os) {
+ uint8_t c = static_cast<uint8_t>(is.Take());
+ os.Put(static_cast<typename OutputStream::Ch>(c));
+ return c <= 0x7F;
+ }
+
+ template <typename InputByteStream>
+ static CharType TakeBOM(InputByteStream& is) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
+ uint8_t c = static_cast<uint8_t>(Take(is));
+ return static_cast<Ch>(c);
+ }
+
+ template <typename InputByteStream>
+ static Ch Take(InputByteStream& is) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
+ return static_cast<Ch>(is.Take());
+ }
+
+ template <typename OutputByteStream>
+ static void PutBOM(OutputByteStream& os) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
+ (void)os;
+ }
+
+ template <typename OutputByteStream>
+ static void Put(OutputByteStream& os, Ch c) {
+ RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
+ os.Put(static_cast<typename OutputByteStream::Ch>(c));
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// AutoUTF
+
+//! Runtime-specified UTF encoding type of a stream.
+enum UTFType {
+ kUTF8 = 0, //!< UTF-8.
+ kUTF16LE = 1, //!< UTF-16 little endian.
+ kUTF16BE = 2, //!< UTF-16 big endian.
+ kUTF32LE = 3, //!< UTF-32 little endian.
+ kUTF32BE = 4 //!< UTF-32 big endian.
+};
+
+//! Dynamically select encoding according to stream's runtime-specified UTF encoding type.
+/*! \note This class can be used with AutoUTFInputtStream and AutoUTFOutputStream, which provides GetType().
+*/
+template<typename CharType>
+struct AutoUTF {
+ typedef CharType Ch;
+
+ enum { supportUnicode = 1 };
+
+#define RAPIDJSON_ENCODINGS_FUNC(x) UTF8<Ch>::x, UTF16LE<Ch>::x, UTF16BE<Ch>::x, UTF32LE<Ch>::x, UTF32BE<Ch>::x
+
+ template<typename OutputStream>
+ static RAPIDJSON_FORCEINLINE void Encode(OutputStream& os, unsigned codepoint) {
+ typedef void (*EncodeFunc)(OutputStream&, unsigned);
+ static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Encode) };
+ (*f[os.GetType()])(os, codepoint);
+ }
+
+ template<typename OutputStream>
+ static RAPIDJSON_FORCEINLINE void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
+ typedef void (*EncodeFunc)(OutputStream&, unsigned);
+ static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(EncodeUnsafe) };
+ (*f[os.GetType()])(os, codepoint);
+ }
+
+ template <typename InputStream>
+ static RAPIDJSON_FORCEINLINE bool Decode(InputStream& is, unsigned* codepoint) {
+ typedef bool (*DecodeFunc)(InputStream&, unsigned*);
+ static const DecodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Decode) };
+ return (*f[is.GetType()])(is, codepoint);
+ }
+
+ template <typename InputStream, typename OutputStream>
+ static RAPIDJSON_FORCEINLINE bool Validate(InputStream& is, OutputStream& os) {
+ typedef bool (*ValidateFunc)(InputStream&, OutputStream&);
+ static const ValidateFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Validate) };
+ return (*f[is.GetType()])(is, os);
+ }
+
+#undef RAPIDJSON_ENCODINGS_FUNC
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// Transcoder
+
+//! Encoding conversion.
+template<typename SourceEncoding, typename TargetEncoding>
+struct Transcoder {
+ //! Take one Unicode codepoint from source encoding, convert it to target encoding and put it to the output stream.
+ template<typename InputStream, typename OutputStream>
+ static RAPIDJSON_FORCEINLINE bool Transcode(InputStream& is, OutputStream& os) {
+ unsigned codepoint;
+ if (!SourceEncoding::Decode(is, &codepoint))
+ return false;
+ TargetEncoding::Encode(os, codepoint);
+ return true;
+ }
+
+ template<typename InputStream, typename OutputStream>
+ static RAPIDJSON_FORCEINLINE bool TranscodeUnsafe(InputStream& is, OutputStream& os) {
+ unsigned codepoint;
+ if (!SourceEncoding::Decode(is, &codepoint))
+ return false;
+ TargetEncoding::EncodeUnsafe(os, codepoint);
+ return true;
+ }
+
+ //! Validate one Unicode codepoint from an encoded stream.
+ template<typename InputStream, typename OutputStream>
+ static RAPIDJSON_FORCEINLINE bool Validate(InputStream& is, OutputStream& os) {
+ return Transcode(is, os); // Since source/target encoding is different, must transcode.
+ }
+};
+
+// Forward declaration.
+template<typename Stream>
+inline void PutUnsafe(Stream& stream, typename Stream::Ch c);
+
+//! Specialization of Transcoder with same source and target encoding.
+template<typename Encoding>
+struct Transcoder<Encoding, Encoding> {
+ template<typename InputStream, typename OutputStream>
+ static RAPIDJSON_FORCEINLINE bool Transcode(InputStream& is, OutputStream& os) {
+ os.Put(is.Take()); // Just copy one code unit. This semantic is different from primary template class.
+ return true;
+ }
+
+ template<typename InputStream, typename OutputStream>
+ static RAPIDJSON_FORCEINLINE bool TranscodeUnsafe(InputStream& is, OutputStream& os) {
+ PutUnsafe(os, is.Take()); // Just copy one code unit. This semantic is different from primary template class.
+ return true;
+ }
+
+ template<typename InputStream, typename OutputStream>
+ static RAPIDJSON_FORCEINLINE bool Validate(InputStream& is, OutputStream& os) {
+ return Encoding::Validate(is, os); // source/target encoding are the same
+ }
+};
+
+RAPIDJSON_NAMESPACE_END
+
+#if defined(__GNUC__) || (defined(_MSC_VER) && !defined(__clang__))
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_ENCODINGS_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_error_en.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_error_en.h
new file mode 100644
index 00000000..e81ed92d
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_error_en.h
@@ -0,0 +1,74 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_ERROR_EN_H_
+#define RAPIDJSON_ERROR_EN_H_
+
+#include "lottie_rapidjson_error_error.h"
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(switch-enum)
+RAPIDJSON_DIAG_OFF(covered-switch-default)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+//! Maps error code of parsing into error message.
+/*!
+ \ingroup RAPIDJSON_ERRORS
+ \param parseErrorCode Error code obtained in parsing.
+ \return the error message.
+ \note User can make a copy of this function for localization.
+ Using switch-case is safer for future modification of error codes.
+*/
+inline const RAPIDJSON_ERROR_CHARTYPE* GetParseError_En(ParseErrorCode parseErrorCode) {
+ switch (parseErrorCode) {
+ case kParseErrorNone: return RAPIDJSON_ERROR_STRING("No error.");
+
+ case kParseErrorDocumentEmpty: return RAPIDJSON_ERROR_STRING("The document is empty.");
+ case kParseErrorDocumentRootNotSingular: return RAPIDJSON_ERROR_STRING("The document root must not be followed by other values.");
+
+ case kParseErrorValueInvalid: return RAPIDJSON_ERROR_STRING("Invalid value.");
+
+ case kParseErrorObjectMissName: return RAPIDJSON_ERROR_STRING("Missing a name for object member.");
+ case kParseErrorObjectMissColon: return RAPIDJSON_ERROR_STRING("Missing a colon after a name of object member.");
+ case kParseErrorObjectMissCommaOrCurlyBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or '}' after an object member.");
+
+ case kParseErrorArrayMissCommaOrSquareBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or ']' after an array element.");
+
+ case kParseErrorStringUnicodeEscapeInvalidHex: return RAPIDJSON_ERROR_STRING("Incorrect hex digit after \\u escape in string.");
+ case kParseErrorStringUnicodeSurrogateInvalid: return RAPIDJSON_ERROR_STRING("The surrogate pair in string is invalid.");
+ case kParseErrorStringEscapeInvalid: return RAPIDJSON_ERROR_STRING("Invalid escape character in string.");
+ case kParseErrorStringMissQuotationMark: return RAPIDJSON_ERROR_STRING("Missing a closing quotation mark in string.");
+ case kParseErrorStringInvalidEncoding: return RAPIDJSON_ERROR_STRING("Invalid encoding in string.");
+
+ case kParseErrorNumberTooBig: return RAPIDJSON_ERROR_STRING("Number too big to be stored in double.");
+ case kParseErrorNumberMissFraction: return RAPIDJSON_ERROR_STRING("Miss fraction part in number.");
+ case kParseErrorNumberMissExponent: return RAPIDJSON_ERROR_STRING("Miss exponent in number.");
+
+ case kParseErrorTermination: return RAPIDJSON_ERROR_STRING("Terminate parsing due to Handler error.");
+ case kParseErrorUnspecificSyntaxError: return RAPIDJSON_ERROR_STRING("Unspecific syntax error.");
+
+ default: return RAPIDJSON_ERROR_STRING("Unknown error.");
+ }
+}
+
+RAPIDJSON_NAMESPACE_END
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_ERROR_EN_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_error_error.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_error_error.h
new file mode 100644
index 00000000..034675e5
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_error_error.h
@@ -0,0 +1,161 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_ERROR_ERROR_H_
+#define RAPIDJSON_ERROR_ERROR_H_
+
+#include "lottie_rapidjson_rapidjson.h"
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(padded)
+#endif
+
+/*! \file error.h */
+
+/*! \defgroup RAPIDJSON_ERRORS RapidJSON error handling */
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_ERROR_CHARTYPE
+
+//! Character type of error messages.
+/*! \ingroup RAPIDJSON_ERRORS
+ The default character type is \c char.
+ On Windows, user can define this macro as \c TCHAR for supporting both
+ unicode/non-unicode settings.
+*/
+#ifndef RAPIDJSON_ERROR_CHARTYPE
+#define RAPIDJSON_ERROR_CHARTYPE char
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_ERROR_STRING
+
+//! Macro for converting string literial to \ref RAPIDJSON_ERROR_CHARTYPE[].
+/*! \ingroup RAPIDJSON_ERRORS
+ By default this conversion macro does nothing.
+ On Windows, user can define this macro as \c _T(x) for supporting both
+ unicode/non-unicode settings.
+*/
+#ifndef RAPIDJSON_ERROR_STRING
+#define RAPIDJSON_ERROR_STRING(x) x
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+///////////////////////////////////////////////////////////////////////////////
+// ParseErrorCode
+
+//! Error code of parsing.
+/*! \ingroup RAPIDJSON_ERRORS
+ \see GenericReader::Parse, GenericReader::GetParseErrorCode
+*/
+enum ParseErrorCode {
+ kParseErrorNone = 0, //!< No error.
+
+ kParseErrorDocumentEmpty, //!< The document is empty.
+ kParseErrorDocumentRootNotSingular, //!< The document root must not follow by other values.
+
+ kParseErrorValueInvalid, //!< Invalid value.
+
+ kParseErrorObjectMissName, //!< Missing a name for object member.
+ kParseErrorObjectMissColon, //!< Missing a colon after a name of object member.
+ kParseErrorObjectMissCommaOrCurlyBracket, //!< Missing a comma or '}' after an object member.
+
+ kParseErrorArrayMissCommaOrSquareBracket, //!< Missing a comma or ']' after an array element.
+
+ kParseErrorStringUnicodeEscapeInvalidHex, //!< Incorrect hex digit after \\u escape in string.
+ kParseErrorStringUnicodeSurrogateInvalid, //!< The surrogate pair in string is invalid.
+ kParseErrorStringEscapeInvalid, //!< Invalid escape character in string.
+ kParseErrorStringMissQuotationMark, //!< Missing a closing quotation mark in string.
+ kParseErrorStringInvalidEncoding, //!< Invalid encoding in string.
+
+ kParseErrorNumberTooBig, //!< Number too big to be stored in double.
+ kParseErrorNumberMissFraction, //!< Miss fraction part in number.
+ kParseErrorNumberMissExponent, //!< Miss exponent in number.
+
+ kParseErrorTermination, //!< Parsing was terminated.
+ kParseErrorUnspecificSyntaxError //!< Unspecific syntax error.
+};
+
+//! Result of parsing (wraps ParseErrorCode)
+/*!
+ \ingroup RAPIDJSON_ERRORS
+ \code
+ Document doc;
+ ParseResult ok = doc.Parse("[42]");
+ if (!ok) {
+ fprintf(stderr, "JSON parse error: %s (%u)",
+ GetParseError_En(ok.Code()), ok.Offset());
+ exit(EXIT_FAILURE);
+ }
+ \endcode
+ \see GenericReader::Parse, GenericDocument::Parse
+*/
+struct ParseResult {
+ //!! Unspecified boolean type
+ typedef bool (ParseResult::*BooleanType)() const;
+public:
+ //! Default constructor, no error.
+ ParseResult() : code_(kParseErrorNone), offset_(0) {}
+ //! Constructor to set an error.
+ ParseResult(ParseErrorCode code, size_t offset) : code_(code), offset_(offset) {}
+
+ //! Get the error code.
+ ParseErrorCode Code() const { return code_; }
+ //! Get the error offset, if \ref IsError(), 0 otherwise.
+ size_t Offset() const { return offset_; }
+
+ //! Explicit conversion to \c bool, returns \c true, iff !\ref IsError().
+ operator BooleanType() const { return !IsError() ? &ParseResult::IsError : NULL; }
+ //! Whether the result is an error.
+ bool IsError() const { return code_ != kParseErrorNone; }
+
+ bool operator==(const ParseResult& that) const { return code_ == that.code_; }
+ bool operator==(ParseErrorCode code) const { return code_ == code; }
+ friend bool operator==(ParseErrorCode code, const ParseResult & err) { return code == err.code_; }
+
+ bool operator!=(const ParseResult& that) const { return !(*this == that); }
+ bool operator!=(ParseErrorCode code) const { return !(*this == code); }
+ friend bool operator!=(ParseErrorCode code, const ParseResult & err) { return err != code; }
+
+ //! Reset error code.
+ void Clear() { Set(kParseErrorNone); }
+ //! Update error code and offset.
+ void Set(ParseErrorCode code, size_t offset = 0) { code_ = code; offset_ = offset; }
+
+private:
+ ParseErrorCode code_;
+ size_t offset_;
+};
+
+//! Function pointer type of GetParseError().
+/*! \ingroup RAPIDJSON_ERRORS
+
+ This is the prototype for \c GetParseError_X(), where \c X is a locale.
+ User can dynamically change locale in runtime, e.g.:
+\code
+ GetParseErrorFunc GetParseError = GetParseError_En; // or whatever
+ const RAPIDJSON_ERROR_CHARTYPE* s = GetParseError(document.GetParseErrorCode());
+\endcode
+*/
+typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetParseErrorFunc)(ParseErrorCode);
+
+RAPIDJSON_NAMESPACE_END
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_ERROR_ERROR_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_filereadstream.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_filereadstream.h
new file mode 100644
index 00000000..f46a7545
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_filereadstream.h
@@ -0,0 +1,99 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_FILEREADSTREAM_H_
+#define RAPIDJSON_FILEREADSTREAM_H_
+
+#include "lottie_rapidjson_stream.h"
+#include <cstdio>
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(padded)
+RAPIDJSON_DIAG_OFF(unreachable-code)
+RAPIDJSON_DIAG_OFF(missing-noreturn)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+//! File byte stream for input using fread().
+/*!
+ \note implements Stream concept
+*/
+class FileReadStream {
+public:
+ typedef char Ch; //!< Character type (byte).
+
+ //! Constructor.
+ /*!
+ \param fp File pointer opened for read.
+ \param buffer user-supplied buffer.
+ \param bufferSize size of buffer in bytes. Must >=4 bytes.
+ */
+ FileReadStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) {
+ RAPIDJSON_ASSERT(fp_ != 0);
+ RAPIDJSON_ASSERT(bufferSize >= 4);
+ Read();
+ }
+
+ Ch Peek() const { return *current_; }
+ Ch Take() { Ch c = *current_; Read(); return c; }
+ size_t Tell() const { return count_ + static_cast<size_t>(current_ - buffer_); }
+
+ // Not implemented
+ void Put(Ch) { RAPIDJSON_ASSERT(false); }
+ void Flush() { RAPIDJSON_ASSERT(false); }
+ Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
+ size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
+
+ // For encoding detection only.
+ const Ch* Peek4() const {
+ return (current_ + 4 - !eof_ <= bufferLast_) ? current_ : 0;
+ }
+
+private:
+ void Read() {
+ if (current_ < bufferLast_)
+ ++current_;
+ else if (!eof_) {
+ count_ += readCount_;
+ readCount_ = std::fread(buffer_, 1, bufferSize_, fp_);
+ bufferLast_ = buffer_ + readCount_ - 1;
+ current_ = buffer_;
+
+ if (readCount_ < bufferSize_) {
+ buffer_[readCount_] = '\0';
+ ++bufferLast_;
+ eof_ = true;
+ }
+ }
+ }
+
+ std::FILE* fp_;
+ Ch *buffer_;
+ size_t bufferSize_;
+ Ch *bufferLast_;
+ Ch *current_;
+ size_t readCount_;
+ size_t count_; //!< Number of characters read
+ bool eof_;
+};
+
+RAPIDJSON_NAMESPACE_END
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_FILESTREAM_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_filewritestream.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_filewritestream.h
new file mode 100644
index 00000000..0a3408ee
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_filewritestream.h
@@ -0,0 +1,104 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_FILEWRITESTREAM_H_
+#define RAPIDJSON_FILEWRITESTREAM_H_
+
+#include "lottie_rapidjson_stream.h"
+#include <cstdio>
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(unreachable-code)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+//! Wrapper of C file stream for output using fwrite().
+/*!
+ \note implements Stream concept
+*/
+class FileWriteStream {
+public:
+ typedef char Ch; //!< Character type. Only support char.
+
+ FileWriteStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferEnd_(buffer + bufferSize), current_(buffer_) {
+ RAPIDJSON_ASSERT(fp_ != 0);
+ }
+
+ void Put(char c) {
+ if (current_ >= bufferEnd_)
+ Flush();
+
+ *current_++ = c;
+ }
+
+ void PutN(char c, size_t n) {
+ size_t avail = static_cast<size_t>(bufferEnd_ - current_);
+ while (n > avail) {
+ std::memset(current_, c, avail);
+ current_ += avail;
+ Flush();
+ n -= avail;
+ avail = static_cast<size_t>(bufferEnd_ - current_);
+ }
+
+ if (n > 0) {
+ std::memset(current_, c, n);
+ current_ += n;
+ }
+ }
+
+ void Flush() {
+ if (current_ != buffer_) {
+ size_t result = std::fwrite(buffer_, 1, static_cast<size_t>(current_ - buffer_), fp_);
+ if (result < static_cast<size_t>(current_ - buffer_)) {
+ // failure deliberately ignored at this time
+ // added to avoid warn_unused_result build errors
+ }
+ current_ = buffer_;
+ }
+ }
+
+ // Not implemented
+ char Peek() const { RAPIDJSON_ASSERT(false); return 0; }
+ char Take() { RAPIDJSON_ASSERT(false); return 0; }
+ size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
+ char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
+ size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; }
+
+private:
+ // Prohibit copy constructor & assignment operator.
+ FileWriteStream(const FileWriteStream&);
+ FileWriteStream& operator=(const FileWriteStream&);
+
+ std::FILE* fp_;
+ char *buffer_;
+ char *bufferEnd_;
+ char *current_;
+};
+
+//! Implement specialized version of PutN() with memset() for better performance.
+template<>
+inline void PutN(FileWriteStream& stream, char c, size_t n) {
+ stream.PutN(c, n);
+}
+
+RAPIDJSON_NAMESPACE_END
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_FILESTREAM_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_fwd.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_fwd.h
new file mode 100644
index 00000000..eee2ef8b
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_fwd.h
@@ -0,0 +1,151 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_FWD_H_
+#define RAPIDJSON_FWD_H_
+
+#include "lottie_rapidjson_rapidjson.h"
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+// encodings.h
+
+template<typename CharType> struct UTF8;
+template<typename CharType> struct UTF16;
+template<typename CharType> struct UTF16BE;
+template<typename CharType> struct UTF16LE;
+template<typename CharType> struct UTF32;
+template<typename CharType> struct UTF32BE;
+template<typename CharType> struct UTF32LE;
+template<typename CharType> struct ASCII;
+template<typename CharType> struct AutoUTF;
+
+template<typename SourceEncoding, typename TargetEncoding>
+struct Transcoder;
+
+// allocators.h
+
+class CrtAllocator;
+
+template <typename BaseAllocator>
+class MemoryPoolAllocator;
+
+// stream.h
+
+template <typename Encoding>
+struct GenericStringStream;
+
+typedef GenericStringStream<UTF8<char> > StringStream;
+
+template <typename Encoding>
+struct GenericInsituStringStream;
+
+typedef GenericInsituStringStream<UTF8<char> > InsituStringStream;
+
+// stringbuffer.h
+
+template <typename Encoding, typename Allocator>
+class GenericStringBuffer;
+
+typedef GenericStringBuffer<UTF8<char>, CrtAllocator> StringBuffer;
+
+// filereadstream.h
+
+class FileReadStream;
+
+// filewritestream.h
+
+class FileWriteStream;
+
+// memorybuffer.h
+
+template <typename Allocator>
+struct GenericMemoryBuffer;
+
+typedef GenericMemoryBuffer<CrtAllocator> MemoryBuffer;
+
+// memorystream.h
+
+struct MemoryStream;
+
+// reader.h
+
+template<typename Encoding, typename Derived>
+struct BaseReaderHandler;
+
+template <typename SourceEncoding, typename TargetEncoding, typename StackAllocator>
+class GenericReader;
+
+typedef GenericReader<UTF8<char>, UTF8<char>, CrtAllocator> Reader;
+
+// writer.h
+
+template<typename OutputStream, typename SourceEncoding, typename TargetEncoding, typename StackAllocator, unsigned writeFlags>
+class Writer;
+
+// prettywriter.h
+
+template<typename OutputStream, typename SourceEncoding, typename TargetEncoding, typename StackAllocator, unsigned writeFlags>
+class PrettyWriter;
+
+// document.h
+
+template <typename Encoding, typename Allocator>
+class GenericMember;
+
+template <bool Const, typename Encoding, typename Allocator>
+class GenericMemberIterator;
+
+template<typename CharType>
+struct GenericStringRef;
+
+template <typename Encoding, typename Allocator>
+class GenericValue;
+
+typedef GenericValue<UTF8<char>, MemoryPoolAllocator<CrtAllocator> > Value;
+
+template <typename Encoding, typename Allocator, typename StackAllocator>
+class GenericDocument;
+
+typedef GenericDocument<UTF8<char>, MemoryPoolAllocator<CrtAllocator>, CrtAllocator> Document;
+
+// pointer.h
+
+template <typename ValueType, typename Allocator>
+class GenericPointer;
+
+typedef GenericPointer<Value, CrtAllocator> Pointer;
+
+// schema.h
+
+template <typename SchemaDocumentType>
+class IGenericRemoteSchemaDocumentProvider;
+
+template <typename ValueT, typename Allocator>
+class GenericSchemaDocument;
+
+typedef GenericSchemaDocument<Value, CrtAllocator> SchemaDocument;
+typedef IGenericRemoteSchemaDocumentProvider<SchemaDocument> IRemoteSchemaDocumentProvider;
+
+template <
+ typename SchemaDocumentType,
+ typename OutputHandler,
+ typename StateAllocator>
+class GenericSchemaValidator;
+
+typedef GenericSchemaValidator<SchemaDocument, BaseReaderHandler<UTF8<char>, void>, CrtAllocator> SchemaValidator;
+
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_RAPIDJSONFWD_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_biginteger.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_biginteger.h
new file mode 100644
index 00000000..1d5db026
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_biginteger.h
@@ -0,0 +1,290 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_BIGINTEGER_H_
+#define RAPIDJSON_BIGINTEGER_H_
+
+#include "lottie_rapidjson_rapidjson.h"
+
+#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && defined(_M_AMD64)
+#include <intrin.h> // for _umul128
+#pragma intrinsic(_umul128)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+class BigInteger {
+public:
+ typedef uint64_t Type;
+
+ BigInteger(const BigInteger& rhs) : count_(rhs.count_) {
+ std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type));
+ }
+
+ explicit BigInteger(uint64_t u) : count_(1) {
+ digits_[0] = u;
+ }
+
+ BigInteger(const char* decimals, size_t length) : count_(1) {
+ RAPIDJSON_ASSERT(length > 0);
+ digits_[0] = 0;
+ size_t i = 0;
+ const size_t kMaxDigitPerIteration = 19; // 2^64 = 18446744073709551616 > 10^19
+ while (length >= kMaxDigitPerIteration) {
+ AppendDecimal64(decimals + i, decimals + i + kMaxDigitPerIteration);
+ length -= kMaxDigitPerIteration;
+ i += kMaxDigitPerIteration;
+ }
+
+ if (length > 0)
+ AppendDecimal64(decimals + i, decimals + i + length);
+ }
+
+ BigInteger& operator=(const BigInteger &rhs)
+ {
+ if (this != &rhs) {
+ count_ = rhs.count_;
+ std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type));
+ }
+ return *this;
+ }
+
+ BigInteger& operator=(uint64_t u) {
+ digits_[0] = u;
+ count_ = 1;
+ return *this;
+ }
+
+ BigInteger& operator+=(uint64_t u) {
+ Type backup = digits_[0];
+ digits_[0] += u;
+ for (size_t i = 0; i < count_ - 1; i++) {
+ if (digits_[i] >= backup)
+ return *this; // no carry
+ backup = digits_[i + 1];
+ digits_[i + 1] += 1;
+ }
+
+ // Last carry
+ if (digits_[count_ - 1] < backup)
+ PushBack(1);
+
+ return *this;
+ }
+
+ BigInteger& operator*=(uint64_t u) {
+ if (u == 0) return *this = 0;
+ if (u == 1) return *this;
+ if (*this == 1) return *this = u;
+
+ uint64_t k = 0;
+ for (size_t i = 0; i < count_; i++) {
+ uint64_t hi;
+ digits_[i] = MulAdd64(digits_[i], u, k, &hi);
+ k = hi;
+ }
+
+ if (k > 0)
+ PushBack(k);
+
+ return *this;
+ }
+
+ BigInteger& operator*=(uint32_t u) {
+ if (u == 0) return *this = 0;
+ if (u == 1) return *this;
+ if (*this == 1) return *this = u;
+
+ uint64_t k = 0;
+ for (size_t i = 0; i < count_; i++) {
+ const uint64_t c = digits_[i] >> 32;
+ const uint64_t d = digits_[i] & 0xFFFFFFFF;
+ const uint64_t uc = u * c;
+ const uint64_t ud = u * d;
+ const uint64_t p0 = ud + k;
+ const uint64_t p1 = uc + (p0 >> 32);
+ digits_[i] = (p0 & 0xFFFFFFFF) | (p1 << 32);
+ k = p1 >> 32;
+ }
+
+ if (k > 0)
+ PushBack(k);
+
+ return *this;
+ }
+
+ BigInteger& operator<<=(size_t shift) {
+ if (IsZero() || shift == 0) return *this;
+
+ size_t offset = shift / kTypeBit;
+ size_t interShift = shift % kTypeBit;
+ RAPIDJSON_ASSERT(count_ + offset <= kCapacity);
+
+ if (interShift == 0) {
+ std::memmove(digits_ + offset, digits_, count_ * sizeof(Type));
+ count_ += offset;
+ }
+ else {
+ digits_[count_] = 0;
+ for (size_t i = count_; i > 0; i--)
+ digits_[i + offset] = (digits_[i] << interShift) | (digits_[i - 1] >> (kTypeBit - interShift));
+ digits_[offset] = digits_[0] << interShift;
+ count_ += offset;
+ if (digits_[count_])
+ count_++;
+ }
+
+ std::memset(digits_, 0, offset * sizeof(Type));
+
+ return *this;
+ }
+
+ bool operator==(const BigInteger& rhs) const {
+ return count_ == rhs.count_ && std::memcmp(digits_, rhs.digits_, count_ * sizeof(Type)) == 0;
+ }
+
+ bool operator==(const Type rhs) const {
+ return count_ == 1 && digits_[0] == rhs;
+ }
+
+ BigInteger& MultiplyPow5(unsigned exp) {
+ static const uint32_t kPow5[12] = {
+ 5,
+ 5 * 5,
+ 5 * 5 * 5,
+ 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5
+ };
+ if (exp == 0) return *this;
+ for (; exp >= 27; exp -= 27) *this *= RAPIDJSON_UINT64_C2(0X6765C793, 0XFA10079D); // 5^27
+ for (; exp >= 13; exp -= 13) *this *= static_cast<uint32_t>(1220703125u); // 5^13
+ if (exp > 0) *this *= kPow5[exp - 1];
+ return *this;
+ }
+
+ // Compute absolute difference of this and rhs.
+ // Assume this != rhs
+ bool Difference(const BigInteger& rhs, BigInteger* out) const {
+ int cmp = Compare(rhs);
+ RAPIDJSON_ASSERT(cmp != 0);
+ const BigInteger *a, *b; // Makes a > b
+ bool ret;
+ if (cmp < 0) { a = &rhs; b = this; ret = true; }
+ else { a = this; b = &rhs; ret = false; }
+
+ Type borrow = 0;
+ for (size_t i = 0; i < a->count_; i++) {
+ Type d = a->digits_[i] - borrow;
+ if (i < b->count_)
+ d -= b->digits_[i];
+ borrow = (d > a->digits_[i]) ? 1 : 0;
+ out->digits_[i] = d;
+ if (d != 0)
+ out->count_ = i + 1;
+ }
+
+ return ret;
+ }
+
+ int Compare(const BigInteger& rhs) const {
+ if (count_ != rhs.count_)
+ return count_ < rhs.count_ ? -1 : 1;
+
+ for (size_t i = count_; i-- > 0;)
+ if (digits_[i] != rhs.digits_[i])
+ return digits_[i] < rhs.digits_[i] ? -1 : 1;
+
+ return 0;
+ }
+
+ size_t GetCount() const { return count_; }
+ Type GetDigit(size_t index) const { RAPIDJSON_ASSERT(index < count_); return digits_[index]; }
+ bool IsZero() const { return count_ == 1 && digits_[0] == 0; }
+
+private:
+ void AppendDecimal64(const char* begin, const char* end) {
+ uint64_t u = ParseUint64(begin, end);
+ if (IsZero())
+ *this = u;
+ else {
+ unsigned exp = static_cast<unsigned>(end - begin);
+ (MultiplyPow5(exp) <<= exp) += u; // *this = *this * 10^exp + u
+ }
+ }
+
+ void PushBack(Type digit) {
+ RAPIDJSON_ASSERT(count_ < kCapacity);
+ digits_[count_++] = digit;
+ }
+
+ static uint64_t ParseUint64(const char* begin, const char* end) {
+ uint64_t r = 0;
+ for (const char* p = begin; p != end; ++p) {
+ RAPIDJSON_ASSERT(*p >= '0' && *p <= '9');
+ r = r * 10u + static_cast<unsigned>(*p - '0');
+ }
+ return r;
+ }
+
+ // Assume a * b + k < 2^128
+ static uint64_t MulAdd64(uint64_t a, uint64_t b, uint64_t k, uint64_t* outHigh) {
+#if defined(_MSC_VER) && defined(_M_AMD64)
+ uint64_t low = _umul128(a, b, outHigh) + k;
+ if (low < k)
+ (*outHigh)++;
+ return low;
+#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
+ __extension__ typedef unsigned __int128 uint128;
+ uint128 p = static_cast<uint128>(a) * static_cast<uint128>(b);
+ p += k;
+ *outHigh = static_cast<uint64_t>(p >> 64);
+ return static_cast<uint64_t>(p);
+#else
+ const uint64_t a0 = a & 0xFFFFFFFF, a1 = a >> 32, b0 = b & 0xFFFFFFFF, b1 = b >> 32;
+ uint64_t x0 = a0 * b0, x1 = a0 * b1, x2 = a1 * b0, x3 = a1 * b1;
+ x1 += (x0 >> 32); // can't give carry
+ x1 += x2;
+ if (x1 < x2)
+ x3 += (static_cast<uint64_t>(1) << 32);
+ uint64_t lo = (x1 << 32) + (x0 & 0xFFFFFFFF);
+ uint64_t hi = x3 + (x1 >> 32);
+
+ lo += k;
+ if (lo < k)
+ hi++;
+ *outHigh = hi;
+ return lo;
+#endif
+ }
+
+ static const size_t kBitCount = 3328; // 64bit * 54 > 10^1000
+ static const size_t kCapacity = kBitCount / sizeof(Type);
+ static const size_t kTypeBit = sizeof(Type) * 8;
+
+ Type digits_[kCapacity];
+ size_t count_;
+};
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_BIGINTEGER_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_clzll.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_clzll.h
new file mode 100644
index 00000000..b43ab88d
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_clzll.h
@@ -0,0 +1,71 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_CLZLL_H_
+#define RAPIDJSON_CLZLL_H_
+
+#include "lottie_rapidjson_rapidjson.h"
+
+#if defined(_MSC_VER) && !defined(UNDER_CE)
+#include <intrin.h>
+#if defined(_WIN64)
+#pragma intrinsic(_BitScanReverse64)
+#else
+#pragma intrinsic(_BitScanReverse)
+#endif
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+inline uint32_t clzll(uint64_t x) {
+ // Passing 0 to __builtin_clzll is UB in GCC and results in an
+ // infinite loop in the software implementation.
+ RAPIDJSON_ASSERT(x != 0);
+
+#if defined(_MSC_VER) && !defined(UNDER_CE)
+ unsigned long r = 0;
+#if defined(_WIN64)
+ _BitScanReverse64(&r, x);
+#else
+ // Scan the high 32 bits.
+ if (_BitScanReverse(&r, static_cast<uint32_t>(x >> 32)))
+ return 63 - (r + 32);
+
+ // Scan the low 32 bits.
+ _BitScanReverse(&r, static_cast<uint32_t>(x & 0xFFFFFFFF));
+#endif // _WIN64
+
+ return 63 - r;
+#elif (defined(__GNUC__) && __GNUC__ >= 4) || RAPIDJSON_HAS_BUILTIN(__builtin_clzll)
+ // __builtin_clzll wrapper
+ return static_cast<uint32_t>(__builtin_clzll(x));
+#else
+ // naive version
+ uint32_t r = 0;
+ while (!(x & (static_cast<uint64_t>(1) << 63))) {
+ x <<= 1;
+ ++r;
+ }
+
+ return r;
+#endif // _MSC_VER
+}
+
+#define RAPIDJSON_CLZLL RAPIDJSON_NAMESPACE::internal::clzll
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_CLZLL_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_diyfp.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_diyfp.h
new file mode 100644
index 00000000..349a1003
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_diyfp.h
@@ -0,0 +1,257 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+// This is a C++ header-only implementation of Grisu2 algorithm from the publication:
+// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with
+// integers." ACM Sigplan Notices 45.6 (2010): 233-243.
+
+#ifndef RAPIDJSON_DIYFP_H_
+#define RAPIDJSON_DIYFP_H_
+
+#include "lottie_rapidjson_rapidjson.h"
+#include "lottie_rapidjson_internal_clzll.h"
+#include <limits>
+
+#if defined(_MSC_VER) && defined(_M_AMD64) && !defined(__INTEL_COMPILER)
+#include <intrin.h>
+#pragma intrinsic(_umul128)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(padded)
+#endif
+
+struct DiyFp {
+ DiyFp() : f(), e() {}
+
+ DiyFp(uint64_t fp, int exp) : f(fp), e(exp) {}
+
+ explicit DiyFp(double d) {
+ union {
+ double d;
+ uint64_t u64;
+ } u = { d };
+
+ int biased_e = static_cast<int>((u.u64 & kDpExponentMask) >> kDpSignificandSize);
+ uint64_t significand = (u.u64 & kDpSignificandMask);
+ if (biased_e != 0) {
+ f = significand + kDpHiddenBit;
+ e = biased_e - kDpExponentBias;
+ }
+ else {
+ f = significand;
+ e = kDpMinExponent + 1;
+ }
+ }
+
+ DiyFp operator-(const DiyFp& rhs) const {
+ return DiyFp(f - rhs.f, e);
+ }
+
+ DiyFp operator*(const DiyFp& rhs) const {
+#if defined(_MSC_VER) && defined(_M_AMD64)
+ uint64_t h;
+ uint64_t l = _umul128(f, rhs.f, &h);
+ if (l & (uint64_t(1) << 63)) // rounding
+ h++;
+ return DiyFp(h, e + rhs.e + 64);
+#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
+ __extension__ typedef unsigned __int128 uint128;
+ uint128 p = static_cast<uint128>(f) * static_cast<uint128>(rhs.f);
+ uint64_t h = static_cast<uint64_t>(p >> 64);
+ uint64_t l = static_cast<uint64_t>(p);
+ if (l & (uint64_t(1) << 63)) // rounding
+ h++;
+ return DiyFp(h, e + rhs.e + 64);
+#else
+ const uint64_t M32 = 0xFFFFFFFF;
+ const uint64_t a = f >> 32;
+ const uint64_t b = f & M32;
+ const uint64_t c = rhs.f >> 32;
+ const uint64_t d = rhs.f & M32;
+ const uint64_t ac = a * c;
+ const uint64_t bc = b * c;
+ const uint64_t ad = a * d;
+ const uint64_t bd = b * d;
+ uint64_t tmp = (bd >> 32) + (ad & M32) + (bc & M32);
+ tmp += 1U << 31; /// mult_round
+ return DiyFp(ac + (ad >> 32) + (bc >> 32) + (tmp >> 32), e + rhs.e + 64);
+#endif
+ }
+
+ DiyFp Normalize() const {
+ int s = static_cast<int>(clzll(f));
+ return DiyFp(f << s, e - s);
+ }
+
+ DiyFp NormalizeBoundary() const {
+ DiyFp res = *this;
+ while (!(res.f & (kDpHiddenBit << 1))) {
+ res.f <<= 1;
+ res.e--;
+ }
+ res.f <<= (kDiySignificandSize - kDpSignificandSize - 2);
+ res.e = res.e - (kDiySignificandSize - kDpSignificandSize - 2);
+ return res;
+ }
+
+ void NormalizedBoundaries(DiyFp* minus, DiyFp* plus) const {
+ DiyFp pl = DiyFp((f << 1) + 1, e - 1).NormalizeBoundary();
+ DiyFp mi = (f == kDpHiddenBit) ? DiyFp((f << 2) - 1, e - 2) : DiyFp((f << 1) - 1, e - 1);
+ mi.f <<= mi.e - pl.e;
+ mi.e = pl.e;
+ *plus = pl;
+ *minus = mi;
+ }
+
+ double ToDouble() const {
+ union {
+ double d;
+ uint64_t u64;
+ }u;
+ RAPIDJSON_ASSERT(f <= kDpHiddenBit + kDpSignificandMask);
+ if (e < kDpDenormalExponent) {
+ // Underflow.
+ return 0.0;
+ }
+ if (e >= kDpMaxExponent) {
+ // Overflow.
+ return std::numeric_limits<double>::infinity();
+ }
+ const uint64_t be = (e == kDpDenormalExponent && (f & kDpHiddenBit) == 0) ? 0 :
+ static_cast<uint64_t>(e + kDpExponentBias);
+ u.u64 = (f & kDpSignificandMask) | (be << kDpSignificandSize);
+ return u.d;
+ }
+
+ static const int kDiySignificandSize = 64;
+ static const int kDpSignificandSize = 52;
+ static const int kDpExponentBias = 0x3FF + kDpSignificandSize;
+ static const int kDpMaxExponent = 0x7FF - kDpExponentBias;
+ static const int kDpMinExponent = -kDpExponentBias;
+ static const int kDpDenormalExponent = -kDpExponentBias + 1;
+ static const uint64_t kDpExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000);
+ static const uint64_t kDpSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF);
+ static const uint64_t kDpHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000);
+
+ uint64_t f;
+ int e;
+};
+
+inline DiyFp GetCachedPowerByIndex(size_t index) {
+ // 10^-348, 10^-340, ..., 10^340
+ static const uint64_t kCachedPowers_F[] = {
+ RAPIDJSON_UINT64_C2(0xfa8fd5a0, 0x081c0288), RAPIDJSON_UINT64_C2(0xbaaee17f, 0xa23ebf76),
+ RAPIDJSON_UINT64_C2(0x8b16fb20, 0x3055ac76), RAPIDJSON_UINT64_C2(0xcf42894a, 0x5dce35ea),
+ RAPIDJSON_UINT64_C2(0x9a6bb0aa, 0x55653b2d), RAPIDJSON_UINT64_C2(0xe61acf03, 0x3d1a45df),
+ RAPIDJSON_UINT64_C2(0xab70fe17, 0xc79ac6ca), RAPIDJSON_UINT64_C2(0xff77b1fc, 0xbebcdc4f),
+ RAPIDJSON_UINT64_C2(0xbe5691ef, 0x416bd60c), RAPIDJSON_UINT64_C2(0x8dd01fad, 0x907ffc3c),
+ RAPIDJSON_UINT64_C2(0xd3515c28, 0x31559a83), RAPIDJSON_UINT64_C2(0x9d71ac8f, 0xada6c9b5),
+ RAPIDJSON_UINT64_C2(0xea9c2277, 0x23ee8bcb), RAPIDJSON_UINT64_C2(0xaecc4991, 0x4078536d),
+ RAPIDJSON_UINT64_C2(0x823c1279, 0x5db6ce57), RAPIDJSON_UINT64_C2(0xc2109436, 0x4dfb5637),
+ RAPIDJSON_UINT64_C2(0x9096ea6f, 0x3848984f), RAPIDJSON_UINT64_C2(0xd77485cb, 0x25823ac7),
+ RAPIDJSON_UINT64_C2(0xa086cfcd, 0x97bf97f4), RAPIDJSON_UINT64_C2(0xef340a98, 0x172aace5),
+ RAPIDJSON_UINT64_C2(0xb23867fb, 0x2a35b28e), RAPIDJSON_UINT64_C2(0x84c8d4df, 0xd2c63f3b),
+ RAPIDJSON_UINT64_C2(0xc5dd4427, 0x1ad3cdba), RAPIDJSON_UINT64_C2(0x936b9fce, 0xbb25c996),
+ RAPIDJSON_UINT64_C2(0xdbac6c24, 0x7d62a584), RAPIDJSON_UINT64_C2(0xa3ab6658, 0x0d5fdaf6),
+ RAPIDJSON_UINT64_C2(0xf3e2f893, 0xdec3f126), RAPIDJSON_UINT64_C2(0xb5b5ada8, 0xaaff80b8),
+ RAPIDJSON_UINT64_C2(0x87625f05, 0x6c7c4a8b), RAPIDJSON_UINT64_C2(0xc9bcff60, 0x34c13053),
+ RAPIDJSON_UINT64_C2(0x964e858c, 0x91ba2655), RAPIDJSON_UINT64_C2(0xdff97724, 0x70297ebd),
+ RAPIDJSON_UINT64_C2(0xa6dfbd9f, 0xb8e5b88f), RAPIDJSON_UINT64_C2(0xf8a95fcf, 0x88747d94),
+ RAPIDJSON_UINT64_C2(0xb9447093, 0x8fa89bcf), RAPIDJSON_UINT64_C2(0x8a08f0f8, 0xbf0f156b),
+ RAPIDJSON_UINT64_C2(0xcdb02555, 0x653131b6), RAPIDJSON_UINT64_C2(0x993fe2c6, 0xd07b7fac),
+ RAPIDJSON_UINT64_C2(0xe45c10c4, 0x2a2b3b06), RAPIDJSON_UINT64_C2(0xaa242499, 0x697392d3),
+ RAPIDJSON_UINT64_C2(0xfd87b5f2, 0x8300ca0e), RAPIDJSON_UINT64_C2(0xbce50864, 0x92111aeb),
+ RAPIDJSON_UINT64_C2(0x8cbccc09, 0x6f5088cc), RAPIDJSON_UINT64_C2(0xd1b71758, 0xe219652c),
+ RAPIDJSON_UINT64_C2(0x9c400000, 0x00000000), RAPIDJSON_UINT64_C2(0xe8d4a510, 0x00000000),
+ RAPIDJSON_UINT64_C2(0xad78ebc5, 0xac620000), RAPIDJSON_UINT64_C2(0x813f3978, 0xf8940984),
+ RAPIDJSON_UINT64_C2(0xc097ce7b, 0xc90715b3), RAPIDJSON_UINT64_C2(0x8f7e32ce, 0x7bea5c70),
+ RAPIDJSON_UINT64_C2(0xd5d238a4, 0xabe98068), RAPIDJSON_UINT64_C2(0x9f4f2726, 0x179a2245),
+ RAPIDJSON_UINT64_C2(0xed63a231, 0xd4c4fb27), RAPIDJSON_UINT64_C2(0xb0de6538, 0x8cc8ada8),
+ RAPIDJSON_UINT64_C2(0x83c7088e, 0x1aab65db), RAPIDJSON_UINT64_C2(0xc45d1df9, 0x42711d9a),
+ RAPIDJSON_UINT64_C2(0x924d692c, 0xa61be758), RAPIDJSON_UINT64_C2(0xda01ee64, 0x1a708dea),
+ RAPIDJSON_UINT64_C2(0xa26da399, 0x9aef774a), RAPIDJSON_UINT64_C2(0xf209787b, 0xb47d6b85),
+ RAPIDJSON_UINT64_C2(0xb454e4a1, 0x79dd1877), RAPIDJSON_UINT64_C2(0x865b8692, 0x5b9bc5c2),
+ RAPIDJSON_UINT64_C2(0xc83553c5, 0xc8965d3d), RAPIDJSON_UINT64_C2(0x952ab45c, 0xfa97a0b3),
+ RAPIDJSON_UINT64_C2(0xde469fbd, 0x99a05fe3), RAPIDJSON_UINT64_C2(0xa59bc234, 0xdb398c25),
+ RAPIDJSON_UINT64_C2(0xf6c69a72, 0xa3989f5c), RAPIDJSON_UINT64_C2(0xb7dcbf53, 0x54e9bece),
+ RAPIDJSON_UINT64_C2(0x88fcf317, 0xf22241e2), RAPIDJSON_UINT64_C2(0xcc20ce9b, 0xd35c78a5),
+ RAPIDJSON_UINT64_C2(0x98165af3, 0x7b2153df), RAPIDJSON_UINT64_C2(0xe2a0b5dc, 0x971f303a),
+ RAPIDJSON_UINT64_C2(0xa8d9d153, 0x5ce3b396), RAPIDJSON_UINT64_C2(0xfb9b7cd9, 0xa4a7443c),
+ RAPIDJSON_UINT64_C2(0xbb764c4c, 0xa7a44410), RAPIDJSON_UINT64_C2(0x8bab8eef, 0xb6409c1a),
+ RAPIDJSON_UINT64_C2(0xd01fef10, 0xa657842c), RAPIDJSON_UINT64_C2(0x9b10a4e5, 0xe9913129),
+ RAPIDJSON_UINT64_C2(0xe7109bfb, 0xa19c0c9d), RAPIDJSON_UINT64_C2(0xac2820d9, 0x623bf429),
+ RAPIDJSON_UINT64_C2(0x80444b5e, 0x7aa7cf85), RAPIDJSON_UINT64_C2(0xbf21e440, 0x03acdd2d),
+ RAPIDJSON_UINT64_C2(0x8e679c2f, 0x5e44ff8f), RAPIDJSON_UINT64_C2(0xd433179d, 0x9c8cb841),
+ RAPIDJSON_UINT64_C2(0x9e19db92, 0xb4e31ba9), RAPIDJSON_UINT64_C2(0xeb96bf6e, 0xbadf77d9),
+ RAPIDJSON_UINT64_C2(0xaf87023b, 0x9bf0ee6b)
+ };
+ static const int16_t kCachedPowers_E[] = {
+ -1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980,
+ -954, -927, -901, -874, -847, -821, -794, -768, -741, -715,
+ -688, -661, -635, -608, -582, -555, -529, -502, -475, -449,
+ -422, -396, -369, -343, -316, -289, -263, -236, -210, -183,
+ -157, -130, -103, -77, -50, -24, 3, 30, 56, 83,
+ 109, 136, 162, 189, 216, 242, 269, 295, 322, 348,
+ 375, 402, 428, 455, 481, 508, 534, 561, 588, 614,
+ 641, 667, 694, 720, 747, 774, 800, 827, 853, 880,
+ 907, 933, 960, 986, 1013, 1039, 1066
+ };
+ RAPIDJSON_ASSERT(index < 87);
+ return DiyFp(kCachedPowers_F[index], kCachedPowers_E[index]);
+}
+
+inline DiyFp GetCachedPower(int e, int* K) {
+
+ //int k = static_cast<int>(ceil((-61 - e) * 0.30102999566398114)) + 374;
+ double dk = (-61 - e) * 0.30102999566398114 + 347; // dk must be positive, so can do ceiling in positive
+ int k = static_cast<int>(dk);
+ if (dk - k > 0.0)
+ k++;
+
+ unsigned index = static_cast<unsigned>((k >> 3) + 1);
+ *K = -(-348 + static_cast<int>(index << 3)); // decimal exponent no need lookup table
+
+ return GetCachedPowerByIndex(index);
+}
+
+inline DiyFp GetCachedPower10(int exp, int *outExp) {
+ RAPIDJSON_ASSERT(exp >= -348);
+ unsigned index = static_cast<unsigned>(exp + 348) / 8u;
+ *outExp = -348 + static_cast<int>(index) * 8;
+ return GetCachedPowerByIndex(index);
+}
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_POP
+#endif
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+RAPIDJSON_DIAG_OFF(padded)
+#endif
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_DIYFP_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_dtoa.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_dtoa.h
new file mode 100644
index 00000000..c3be702c
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_dtoa.h
@@ -0,0 +1,245 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+// This is a C++ header-only implementation of Grisu2 algorithm from the publication:
+// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with
+// integers." ACM Sigplan Notices 45.6 (2010): 233-243.
+
+#ifndef RAPIDJSON_DTOA_
+#define RAPIDJSON_DTOA_
+
+#include "lottie_rapidjson_internal_itoa.h"
+#include "lottie_rapidjson_internal_diyfp.h"
+#include "lottie_rapidjson_internal_ieee754.h"
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+RAPIDJSON_DIAG_OFF(array-bounds) // some gcc versions generate wrong warnings https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59124
+#endif
+
+inline void GrisuRound(char* buffer, int len, uint64_t delta, uint64_t rest, uint64_t ten_kappa, uint64_t wp_w) {
+ while (rest < wp_w && delta - rest >= ten_kappa &&
+ (rest + ten_kappa < wp_w || /// closer
+ wp_w - rest > rest + ten_kappa - wp_w)) {
+ buffer[len - 1]--;
+ rest += ten_kappa;
+ }
+}
+
+inline int CountDecimalDigit32(uint32_t n) {
+ // Simple pure C++ implementation was faster than __builtin_clz version in this situation.
+ if (n < 10) return 1;
+ if (n < 100) return 2;
+ if (n < 1000) return 3;
+ if (n < 10000) return 4;
+ if (n < 100000) return 5;
+ if (n < 1000000) return 6;
+ if (n < 10000000) return 7;
+ if (n < 100000000) return 8;
+ // Will not reach 10 digits in DigitGen()
+ //if (n < 1000000000) return 9;
+ //return 10;
+ return 9;
+}
+
+inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buffer, int* len, int* K) {
+ static const uint32_t kPow10[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 };
+ const DiyFp one(uint64_t(1) << -Mp.e, Mp.e);
+ const DiyFp wp_w = Mp - W;
+ uint32_t p1 = static_cast<uint32_t>(Mp.f >> -one.e);
+ uint64_t p2 = Mp.f & (one.f - 1);
+ int kappa = CountDecimalDigit32(p1); // kappa in [0, 9]
+ *len = 0;
+
+ while (kappa > 0) {
+ uint32_t d = 0;
+ switch (kappa) {
+ case 9: d = p1 / 100000000; p1 %= 100000000; break;
+ case 8: d = p1 / 10000000; p1 %= 10000000; break;
+ case 7: d = p1 / 1000000; p1 %= 1000000; break;
+ case 6: d = p1 / 100000; p1 %= 100000; break;
+ case 5: d = p1 / 10000; p1 %= 10000; break;
+ case 4: d = p1 / 1000; p1 %= 1000; break;
+ case 3: d = p1 / 100; p1 %= 100; break;
+ case 2: d = p1 / 10; p1 %= 10; break;
+ case 1: d = p1; p1 = 0; break;
+ default:;
+ }
+ if (d || *len)
+ buffer[(*len)++] = static_cast<char>('0' + static_cast<char>(d));
+ kappa--;
+ uint64_t tmp = (static_cast<uint64_t>(p1) << -one.e) + p2;
+ if (tmp <= delta) {
+ *K += kappa;
+ GrisuRound(buffer, *len, delta, tmp, static_cast<uint64_t>(kPow10[kappa]) << -one.e, wp_w.f);
+ return;
+ }
+ }
+
+ // kappa = 0
+ for (;;) {
+ p2 *= 10;
+ delta *= 10;
+ char d = static_cast<char>(p2 >> -one.e);
+ if (d || *len)
+ buffer[(*len)++] = static_cast<char>('0' + d);
+ p2 &= one.f - 1;
+ kappa--;
+ if (p2 < delta) {
+ *K += kappa;
+ int index = -kappa;
+ GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * (index < 9 ? kPow10[index] : 0));
+ return;
+ }
+ }
+}
+
+inline void Grisu2(double value, char* buffer, int* length, int* K) {
+ const DiyFp v(value);
+ DiyFp w_m, w_p;
+ v.NormalizedBoundaries(&w_m, &w_p);
+
+ const DiyFp c_mk = GetCachedPower(w_p.e, K);
+ const DiyFp W = v.Normalize() * c_mk;
+ DiyFp Wp = w_p * c_mk;
+ DiyFp Wm = w_m * c_mk;
+ Wm.f++;
+ Wp.f--;
+ DigitGen(W, Wp, Wp.f - Wm.f, buffer, length, K);
+}
+
+inline char* WriteExponent(int K, char* buffer) {
+ if (K < 0) {
+ *buffer++ = '-';
+ K = -K;
+ }
+
+ if (K >= 100) {
+ *buffer++ = static_cast<char>('0' + static_cast<char>(K / 100));
+ K %= 100;
+ const char* d = GetDigitsLut() + K * 2;
+ *buffer++ = d[0];
+ *buffer++ = d[1];
+ }
+ else if (K >= 10) {
+ const char* d = GetDigitsLut() + K * 2;
+ *buffer++ = d[0];
+ *buffer++ = d[1];
+ }
+ else
+ *buffer++ = static_cast<char>('0' + static_cast<char>(K));
+
+ return buffer;
+}
+
+inline char* Prettify(char* buffer, int length, int k, int maxDecimalPlaces) {
+ const int kk = length + k; // 10^(kk-1) <= v < 10^kk
+
+ if (0 <= k && kk <= 21) {
+ // 1234e7 -> 12340000000
+ for (int i = length; i < kk; i++)
+ buffer[i] = '0';
+ buffer[kk] = '.';
+ buffer[kk + 1] = '0';
+ return &buffer[kk + 2];
+ }
+ else if (0 < kk && kk <= 21) {
+ // 1234e-2 -> 12.34
+ std::memmove(&buffer[kk + 1], &buffer[kk], static_cast<size_t>(length - kk));
+ buffer[kk] = '.';
+ if (0 > k + maxDecimalPlaces) {
+ // When maxDecimalPlaces = 2, 1.2345 -> 1.23, 1.102 -> 1.1
+ // Remove extra trailing zeros (at least one) after truncation.
+ for (int i = kk + maxDecimalPlaces; i > kk + 1; i--)
+ if (buffer[i] != '0')
+ return &buffer[i + 1];
+ return &buffer[kk + 2]; // Reserve one zero
+ }
+ else
+ return &buffer[length + 1];
+ }
+ else if (-6 < kk && kk <= 0) {
+ // 1234e-6 -> 0.001234
+ const int offset = 2 - kk;
+ std::memmove(&buffer[offset], &buffer[0], static_cast<size_t>(length));
+ buffer[0] = '0';
+ buffer[1] = '.';
+ for (int i = 2; i < offset; i++)
+ buffer[i] = '0';
+ if (length - kk > maxDecimalPlaces) {
+ // When maxDecimalPlaces = 2, 0.123 -> 0.12, 0.102 -> 0.1
+ // Remove extra trailing zeros (at least one) after truncation.
+ for (int i = maxDecimalPlaces + 1; i > 2; i--)
+ if (buffer[i] != '0')
+ return &buffer[i + 1];
+ return &buffer[3]; // Reserve one zero
+ }
+ else
+ return &buffer[length + offset];
+ }
+ else if (kk < -maxDecimalPlaces) {
+ // Truncate to zero
+ buffer[0] = '0';
+ buffer[1] = '.';
+ buffer[2] = '0';
+ return &buffer[3];
+ }
+ else if (length == 1) {
+ // 1e30
+ buffer[1] = 'e';
+ return WriteExponent(kk - 1, &buffer[2]);
+ }
+ else {
+ // 1234e30 -> 1.234e33
+ std::memmove(&buffer[2], &buffer[1], static_cast<size_t>(length - 1));
+ buffer[1] = '.';
+ buffer[length + 1] = 'e';
+ return WriteExponent(kk - 1, &buffer[0 + length + 2]);
+ }
+}
+
+inline char* dtoa(double value, char* buffer, int maxDecimalPlaces = 324) {
+ RAPIDJSON_ASSERT(maxDecimalPlaces >= 1);
+ Double d(value);
+ if (d.IsZero()) {
+ if (d.Sign())
+ *buffer++ = '-'; // -0.0, Issue #289
+ buffer[0] = '0';
+ buffer[1] = '.';
+ buffer[2] = '0';
+ return &buffer[3];
+ }
+ else {
+ if (value < 0) {
+ *buffer++ = '-';
+ value = -value;
+ }
+ int length, K;
+ Grisu2(value, buffer, &length, &K);
+ return Prettify(buffer, length, K, maxDecimalPlaces);
+ }
+}
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_POP
+#endif
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_DTOA_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_ieee754.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_ieee754.h
new file mode 100644
index 00000000..bb004743
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_ieee754.h
@@ -0,0 +1,78 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_IEEE754_
+#define RAPIDJSON_IEEE754_
+
+#include "lottie_rapidjson_rapidjson.h"
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+class Double {
+public:
+ Double() {}
+ Double(double d) : d_(d) {}
+ Double(uint64_t u) : u_(u) {}
+
+ double Value() const { return d_; }
+ uint64_t Uint64Value() const { return u_; }
+
+ double NextPositiveDouble() const {
+ RAPIDJSON_ASSERT(!Sign());
+ return Double(u_ + 1).Value();
+ }
+
+ bool Sign() const { return (u_ & kSignMask) != 0; }
+ uint64_t Significand() const { return u_ & kSignificandMask; }
+ int Exponent() const { return static_cast<int>(((u_ & kExponentMask) >> kSignificandSize) - kExponentBias); }
+
+ bool IsNan() const { return (u_ & kExponentMask) == kExponentMask && Significand() != 0; }
+ bool IsInf() const { return (u_ & kExponentMask) == kExponentMask && Significand() == 0; }
+ bool IsNanOrInf() const { return (u_ & kExponentMask) == kExponentMask; }
+ bool IsNormal() const { return (u_ & kExponentMask) != 0 || Significand() == 0; }
+ bool IsZero() const { return (u_ & (kExponentMask | kSignificandMask)) == 0; }
+
+ uint64_t IntegerSignificand() const { return IsNormal() ? Significand() | kHiddenBit : Significand(); }
+ int IntegerExponent() const { return (IsNormal() ? Exponent() : kDenormalExponent) - kSignificandSize; }
+ uint64_t ToBias() const { return (u_ & kSignMask) ? ~u_ + 1 : u_ | kSignMask; }
+
+ static int EffectiveSignificandSize(int order) {
+ if (order >= -1021)
+ return 53;
+ else if (order <= -1074)
+ return 0;
+ else
+ return order + 1074;
+ }
+
+private:
+ static const int kSignificandSize = 52;
+ static const int kExponentBias = 0x3FF;
+ static const int kDenormalExponent = 1 - kExponentBias;
+ static const uint64_t kSignMask = RAPIDJSON_UINT64_C2(0x80000000, 0x00000000);
+ static const uint64_t kExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000);
+ static const uint64_t kSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF);
+ static const uint64_t kHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000);
+
+ union {
+ double d_;
+ uint64_t u_;
+ };
+};
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_IEEE754_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_itoa.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_itoa.h
new file mode 100644
index 00000000..e63b1308
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_itoa.h
@@ -0,0 +1,308 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_ITOA_
+#define RAPIDJSON_ITOA_
+
+#include "lottie_rapidjson_rapidjson.h"
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+inline const char* GetDigitsLut() {
+ static const char cDigitsLut[200] = {
+ '0','0','0','1','0','2','0','3','0','4','0','5','0','6','0','7','0','8','0','9',
+ '1','0','1','1','1','2','1','3','1','4','1','5','1','6','1','7','1','8','1','9',
+ '2','0','2','1','2','2','2','3','2','4','2','5','2','6','2','7','2','8','2','9',
+ '3','0','3','1','3','2','3','3','3','4','3','5','3','6','3','7','3','8','3','9',
+ '4','0','4','1','4','2','4','3','4','4','4','5','4','6','4','7','4','8','4','9',
+ '5','0','5','1','5','2','5','3','5','4','5','5','5','6','5','7','5','8','5','9',
+ '6','0','6','1','6','2','6','3','6','4','6','5','6','6','6','7','6','8','6','9',
+ '7','0','7','1','7','2','7','3','7','4','7','5','7','6','7','7','7','8','7','9',
+ '8','0','8','1','8','2','8','3','8','4','8','5','8','6','8','7','8','8','8','9',
+ '9','0','9','1','9','2','9','3','9','4','9','5','9','6','9','7','9','8','9','9'
+ };
+ return cDigitsLut;
+}
+
+inline char* u32toa(uint32_t value, char* buffer) {
+ RAPIDJSON_ASSERT(buffer != 0);
+
+ const char* cDigitsLut = GetDigitsLut();
+
+ if (value < 10000) {
+ const uint32_t d1 = (value / 100) << 1;
+ const uint32_t d2 = (value % 100) << 1;
+
+ if (value >= 1000)
+ *buffer++ = cDigitsLut[d1];
+ if (value >= 100)
+ *buffer++ = cDigitsLut[d1 + 1];
+ if (value >= 10)
+ *buffer++ = cDigitsLut[d2];
+ *buffer++ = cDigitsLut[d2 + 1];
+ }
+ else if (value < 100000000) {
+ // value = bbbbcccc
+ const uint32_t b = value / 10000;
+ const uint32_t c = value % 10000;
+
+ const uint32_t d1 = (b / 100) << 1;
+ const uint32_t d2 = (b % 100) << 1;
+
+ const uint32_t d3 = (c / 100) << 1;
+ const uint32_t d4 = (c % 100) << 1;
+
+ if (value >= 10000000)
+ *buffer++ = cDigitsLut[d1];
+ if (value >= 1000000)
+ *buffer++ = cDigitsLut[d1 + 1];
+ if (value >= 100000)
+ *buffer++ = cDigitsLut[d2];
+ *buffer++ = cDigitsLut[d2 + 1];
+
+ *buffer++ = cDigitsLut[d3];
+ *buffer++ = cDigitsLut[d3 + 1];
+ *buffer++ = cDigitsLut[d4];
+ *buffer++ = cDigitsLut[d4 + 1];
+ }
+ else {
+ // value = aabbbbcccc in decimal
+
+ const uint32_t a = value / 100000000; // 1 to 42
+ value %= 100000000;
+
+ if (a >= 10) {
+ const unsigned i = a << 1;
+ *buffer++ = cDigitsLut[i];
+ *buffer++ = cDigitsLut[i + 1];
+ }
+ else
+ *buffer++ = static_cast<char>('0' + static_cast<char>(a));
+
+ const uint32_t b = value / 10000; // 0 to 9999
+ const uint32_t c = value % 10000; // 0 to 9999
+
+ const uint32_t d1 = (b / 100) << 1;
+ const uint32_t d2 = (b % 100) << 1;
+
+ const uint32_t d3 = (c / 100) << 1;
+ const uint32_t d4 = (c % 100) << 1;
+
+ *buffer++ = cDigitsLut[d1];
+ *buffer++ = cDigitsLut[d1 + 1];
+ *buffer++ = cDigitsLut[d2];
+ *buffer++ = cDigitsLut[d2 + 1];
+ *buffer++ = cDigitsLut[d3];
+ *buffer++ = cDigitsLut[d3 + 1];
+ *buffer++ = cDigitsLut[d4];
+ *buffer++ = cDigitsLut[d4 + 1];
+ }
+ return buffer;
+}
+
+inline char* i32toa(int32_t value, char* buffer) {
+ RAPIDJSON_ASSERT(buffer != 0);
+ uint32_t u = static_cast<uint32_t>(value);
+ if (value < 0) {
+ *buffer++ = '-';
+ u = ~u + 1;
+ }
+
+ return u32toa(u, buffer);
+}
+
+inline char* u64toa(uint64_t value, char* buffer) {
+ RAPIDJSON_ASSERT(buffer != 0);
+ const char* cDigitsLut = GetDigitsLut();
+ const uint64_t kTen8 = 100000000;
+ const uint64_t kTen9 = kTen8 * 10;
+ const uint64_t kTen10 = kTen8 * 100;
+ const uint64_t kTen11 = kTen8 * 1000;
+ const uint64_t kTen12 = kTen8 * 10000;
+ const uint64_t kTen13 = kTen8 * 100000;
+ const uint64_t kTen14 = kTen8 * 1000000;
+ const uint64_t kTen15 = kTen8 * 10000000;
+ const uint64_t kTen16 = kTen8 * kTen8;
+
+ if (value < kTen8) {
+ uint32_t v = static_cast<uint32_t>(value);
+ if (v < 10000) {
+ const uint32_t d1 = (v / 100) << 1;
+ const uint32_t d2 = (v % 100) << 1;
+
+ if (v >= 1000)
+ *buffer++ = cDigitsLut[d1];
+ if (v >= 100)
+ *buffer++ = cDigitsLut[d1 + 1];
+ if (v >= 10)
+ *buffer++ = cDigitsLut[d2];
+ *buffer++ = cDigitsLut[d2 + 1];
+ }
+ else {
+ // value = bbbbcccc
+ const uint32_t b = v / 10000;
+ const uint32_t c = v % 10000;
+
+ const uint32_t d1 = (b / 100) << 1;
+ const uint32_t d2 = (b % 100) << 1;
+
+ const uint32_t d3 = (c / 100) << 1;
+ const uint32_t d4 = (c % 100) << 1;
+
+ if (value >= 10000000)
+ *buffer++ = cDigitsLut[d1];
+ if (value >= 1000000)
+ *buffer++ = cDigitsLut[d1 + 1];
+ if (value >= 100000)
+ *buffer++ = cDigitsLut[d2];
+ *buffer++ = cDigitsLut[d2 + 1];
+
+ *buffer++ = cDigitsLut[d3];
+ *buffer++ = cDigitsLut[d3 + 1];
+ *buffer++ = cDigitsLut[d4];
+ *buffer++ = cDigitsLut[d4 + 1];
+ }
+ }
+ else if (value < kTen16) {
+ const uint32_t v0 = static_cast<uint32_t>(value / kTen8);
+ const uint32_t v1 = static_cast<uint32_t>(value % kTen8);
+
+ const uint32_t b0 = v0 / 10000;
+ const uint32_t c0 = v0 % 10000;
+
+ const uint32_t d1 = (b0 / 100) << 1;
+ const uint32_t d2 = (b0 % 100) << 1;
+
+ const uint32_t d3 = (c0 / 100) << 1;
+ const uint32_t d4 = (c0 % 100) << 1;
+
+ const uint32_t b1 = v1 / 10000;
+ const uint32_t c1 = v1 % 10000;
+
+ const uint32_t d5 = (b1 / 100) << 1;
+ const uint32_t d6 = (b1 % 100) << 1;
+
+ const uint32_t d7 = (c1 / 100) << 1;
+ const uint32_t d8 = (c1 % 100) << 1;
+
+ if (value >= kTen15)
+ *buffer++ = cDigitsLut[d1];
+ if (value >= kTen14)
+ *buffer++ = cDigitsLut[d1 + 1];
+ if (value >= kTen13)
+ *buffer++ = cDigitsLut[d2];
+ if (value >= kTen12)
+ *buffer++ = cDigitsLut[d2 + 1];
+ if (value >= kTen11)
+ *buffer++ = cDigitsLut[d3];
+ if (value >= kTen10)
+ *buffer++ = cDigitsLut[d3 + 1];
+ if (value >= kTen9)
+ *buffer++ = cDigitsLut[d4];
+
+ *buffer++ = cDigitsLut[d4 + 1];
+ *buffer++ = cDigitsLut[d5];
+ *buffer++ = cDigitsLut[d5 + 1];
+ *buffer++ = cDigitsLut[d6];
+ *buffer++ = cDigitsLut[d6 + 1];
+ *buffer++ = cDigitsLut[d7];
+ *buffer++ = cDigitsLut[d7 + 1];
+ *buffer++ = cDigitsLut[d8];
+ *buffer++ = cDigitsLut[d8 + 1];
+ }
+ else {
+ const uint32_t a = static_cast<uint32_t>(value / kTen16); // 1 to 1844
+ value %= kTen16;
+
+ if (a < 10)
+ *buffer++ = static_cast<char>('0' + static_cast<char>(a));
+ else if (a < 100) {
+ const uint32_t i = a << 1;
+ *buffer++ = cDigitsLut[i];
+ *buffer++ = cDigitsLut[i + 1];
+ }
+ else if (a < 1000) {
+ *buffer++ = static_cast<char>('0' + static_cast<char>(a / 100));
+
+ const uint32_t i = (a % 100) << 1;
+ *buffer++ = cDigitsLut[i];
+ *buffer++ = cDigitsLut[i + 1];
+ }
+ else {
+ const uint32_t i = (a / 100) << 1;
+ const uint32_t j = (a % 100) << 1;
+ *buffer++ = cDigitsLut[i];
+ *buffer++ = cDigitsLut[i + 1];
+ *buffer++ = cDigitsLut[j];
+ *buffer++ = cDigitsLut[j + 1];
+ }
+
+ const uint32_t v0 = static_cast<uint32_t>(value / kTen8);
+ const uint32_t v1 = static_cast<uint32_t>(value % kTen8);
+
+ const uint32_t b0 = v0 / 10000;
+ const uint32_t c0 = v0 % 10000;
+
+ const uint32_t d1 = (b0 / 100) << 1;
+ const uint32_t d2 = (b0 % 100) << 1;
+
+ const uint32_t d3 = (c0 / 100) << 1;
+ const uint32_t d4 = (c0 % 100) << 1;
+
+ const uint32_t b1 = v1 / 10000;
+ const uint32_t c1 = v1 % 10000;
+
+ const uint32_t d5 = (b1 / 100) << 1;
+ const uint32_t d6 = (b1 % 100) << 1;
+
+ const uint32_t d7 = (c1 / 100) << 1;
+ const uint32_t d8 = (c1 % 100) << 1;
+
+ *buffer++ = cDigitsLut[d1];
+ *buffer++ = cDigitsLut[d1 + 1];
+ *buffer++ = cDigitsLut[d2];
+ *buffer++ = cDigitsLut[d2 + 1];
+ *buffer++ = cDigitsLut[d3];
+ *buffer++ = cDigitsLut[d3 + 1];
+ *buffer++ = cDigitsLut[d4];
+ *buffer++ = cDigitsLut[d4 + 1];
+ *buffer++ = cDigitsLut[d5];
+ *buffer++ = cDigitsLut[d5 + 1];
+ *buffer++ = cDigitsLut[d6];
+ *buffer++ = cDigitsLut[d6 + 1];
+ *buffer++ = cDigitsLut[d7];
+ *buffer++ = cDigitsLut[d7 + 1];
+ *buffer++ = cDigitsLut[d8];
+ *buffer++ = cDigitsLut[d8 + 1];
+ }
+
+ return buffer;
+}
+
+inline char* i64toa(int64_t value, char* buffer) {
+ RAPIDJSON_ASSERT(buffer != 0);
+ uint64_t u = static_cast<uint64_t>(value);
+ if (value < 0) {
+ *buffer++ = '-';
+ u = ~u + 1;
+ }
+
+ return u64toa(u, buffer);
+}
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_ITOA_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_meta.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_meta.h
new file mode 100644
index 00000000..dfc590e6
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_meta.h
@@ -0,0 +1,186 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_INTERNAL_META_H_
+#define RAPIDJSON_INTERNAL_META_H_
+
+#include "lottie_rapidjson_rapidjson.h"
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(6334)
+#endif
+
+#if RAPIDJSON_HAS_CXX11_TYPETRAITS
+#include <type_traits>
+#endif
+
+//@cond RAPIDJSON_INTERNAL
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+// Helper to wrap/convert arbitrary types to void, useful for arbitrary type matching
+template <typename T> struct Void { typedef void Type; };
+
+///////////////////////////////////////////////////////////////////////////////
+// BoolType, TrueType, FalseType
+//
+template <bool Cond> struct BoolType {
+ static const bool Value = Cond;
+ typedef BoolType Type;
+};
+typedef BoolType<true> TrueType;
+typedef BoolType<false> FalseType;
+
+
+///////////////////////////////////////////////////////////////////////////////
+// SelectIf, BoolExpr, NotExpr, AndExpr, OrExpr
+//
+
+template <bool C> struct SelectIfImpl { template <typename T1, typename T2> struct Apply { typedef T1 Type; }; };
+template <> struct SelectIfImpl<false> { template <typename T1, typename T2> struct Apply { typedef T2 Type; }; };
+template <bool C, typename T1, typename T2> struct SelectIfCond : SelectIfImpl<C>::template Apply<T1,T2> {};
+template <typename C, typename T1, typename T2> struct SelectIf : SelectIfCond<C::Value, T1, T2> {};
+
+template <bool Cond1, bool Cond2> struct AndExprCond : FalseType {};
+template <> struct AndExprCond<true, true> : TrueType {};
+template <bool Cond1, bool Cond2> struct OrExprCond : TrueType {};
+template <> struct OrExprCond<false, false> : FalseType {};
+
+template <typename C> struct BoolExpr : SelectIf<C,TrueType,FalseType>::Type {};
+template <typename C> struct NotExpr : SelectIf<C,FalseType,TrueType>::Type {};
+template <typename C1, typename C2> struct AndExpr : AndExprCond<C1::Value, C2::Value>::Type {};
+template <typename C1, typename C2> struct OrExpr : OrExprCond<C1::Value, C2::Value>::Type {};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// AddConst, MaybeAddConst, RemoveConst
+template <typename T> struct AddConst { typedef const T Type; };
+template <bool Constify, typename T> struct MaybeAddConst : SelectIfCond<Constify, const T, T> {};
+template <typename T> struct RemoveConst { typedef T Type; };
+template <typename T> struct RemoveConst<const T> { typedef T Type; };
+
+
+///////////////////////////////////////////////////////////////////////////////
+// IsSame, IsConst, IsMoreConst, IsPointer
+//
+template <typename T, typename U> struct IsSame : FalseType {};
+template <typename T> struct IsSame<T, T> : TrueType {};
+
+template <typename T> struct IsConst : FalseType {};
+template <typename T> struct IsConst<const T> : TrueType {};
+
+template <typename CT, typename T>
+struct IsMoreConst
+ : AndExpr<IsSame<typename RemoveConst<CT>::Type, typename RemoveConst<T>::Type>,
+ BoolType<IsConst<CT>::Value >= IsConst<T>::Value> >::Type {};
+
+template <typename T> struct IsPointer : FalseType {};
+template <typename T> struct IsPointer<T*> : TrueType {};
+
+///////////////////////////////////////////////////////////////////////////////
+// IsBaseOf
+//
+#if RAPIDJSON_HAS_CXX11_TYPETRAITS
+
+template <typename B, typename D> struct IsBaseOf
+ : BoolType< ::std::is_base_of<B,D>::value> {};
+
+#else // simplified version adopted from Boost
+
+template<typename B, typename D> struct IsBaseOfImpl {
+ RAPIDJSON_STATIC_ASSERT(sizeof(B) != 0);
+ RAPIDJSON_STATIC_ASSERT(sizeof(D) != 0);
+
+ typedef char (&Yes)[1];
+ typedef char (&No) [2];
+
+ template <typename T>
+ static Yes Check(const D*, T);
+ static No Check(const B*, int);
+
+ struct Host {
+ operator const B*() const;
+ operator const D*();
+ };
+
+ enum { Value = (sizeof(Check(Host(), 0)) == sizeof(Yes)) };
+};
+
+template <typename B, typename D> struct IsBaseOf
+ : OrExpr<IsSame<B, D>, BoolExpr<IsBaseOfImpl<B, D> > >::Type {};
+
+#endif // RAPIDJSON_HAS_CXX11_TYPETRAITS
+
+
+//////////////////////////////////////////////////////////////////////////
+// EnableIf / DisableIf
+//
+template <bool Condition, typename T = void> struct EnableIfCond { typedef T Type; };
+template <typename T> struct EnableIfCond<false, T> { /* empty */ };
+
+template <bool Condition, typename T = void> struct DisableIfCond { typedef T Type; };
+template <typename T> struct DisableIfCond<true, T> { /* empty */ };
+
+template <typename Condition, typename T = void>
+struct EnableIf : EnableIfCond<Condition::Value, T> {};
+
+template <typename Condition, typename T = void>
+struct DisableIf : DisableIfCond<Condition::Value, T> {};
+
+// SFINAE helpers
+struct SfinaeTag {};
+template <typename T> struct RemoveSfinaeTag;
+template <typename T> struct RemoveSfinaeTag<SfinaeTag&(*)(T)> { typedef T Type; };
+
+#define RAPIDJSON_REMOVEFPTR_(type) \
+ typename ::RAPIDJSON_NAMESPACE::internal::RemoveSfinaeTag \
+ < ::RAPIDJSON_NAMESPACE::internal::SfinaeTag&(*) type>::Type
+
+#define RAPIDJSON_ENABLEIF(cond) \
+ typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \
+ <RAPIDJSON_REMOVEFPTR_(cond)>::Type * = NULL
+
+#define RAPIDJSON_DISABLEIF(cond) \
+ typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \
+ <RAPIDJSON_REMOVEFPTR_(cond)>::Type * = NULL
+
+#define RAPIDJSON_ENABLEIF_RETURN(cond,returntype) \
+ typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \
+ <RAPIDJSON_REMOVEFPTR_(cond), \
+ RAPIDJSON_REMOVEFPTR_(returntype)>::Type
+
+#define RAPIDJSON_DISABLEIF_RETURN(cond,returntype) \
+ typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \
+ <RAPIDJSON_REMOVEFPTR_(cond), \
+ RAPIDJSON_REMOVEFPTR_(returntype)>::Type
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+//@endcond
+
+#if defined(_MSC_VER) && !defined(__clang__)
+RAPIDJSON_DIAG_POP
+#endif
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_INTERNAL_META_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_pow10.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_pow10.h
new file mode 100644
index 00000000..18fae6fb
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_pow10.h
@@ -0,0 +1,55 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_POW10_
+#define RAPIDJSON_POW10_
+
+#include "lottie_rapidjson_rapidjson.h"
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+//! Computes integer powers of 10 in double (10.0^n).
+/*! This function uses lookup table for fast and accurate results.
+ \param n non-negative exponent. Must <= 308.
+ \return 10.0^n
+*/
+inline double Pow10(int n) {
+ static const double e[] = { // 1e-0...1e308: 309 * 8 bytes = 2472 bytes
+ 1e+0,
+ 1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20,
+ 1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, 1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40,
+ 1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60,
+ 1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70, 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80,
+ 1e+81, 1e+82, 1e+83, 1e+84, 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100,
+ 1e+101,1e+102,1e+103,1e+104,1e+105,1e+106,1e+107,1e+108,1e+109,1e+110,1e+111,1e+112,1e+113,1e+114,1e+115,1e+116,1e+117,1e+118,1e+119,1e+120,
+ 1e+121,1e+122,1e+123,1e+124,1e+125,1e+126,1e+127,1e+128,1e+129,1e+130,1e+131,1e+132,1e+133,1e+134,1e+135,1e+136,1e+137,1e+138,1e+139,1e+140,
+ 1e+141,1e+142,1e+143,1e+144,1e+145,1e+146,1e+147,1e+148,1e+149,1e+150,1e+151,1e+152,1e+153,1e+154,1e+155,1e+156,1e+157,1e+158,1e+159,1e+160,
+ 1e+161,1e+162,1e+163,1e+164,1e+165,1e+166,1e+167,1e+168,1e+169,1e+170,1e+171,1e+172,1e+173,1e+174,1e+175,1e+176,1e+177,1e+178,1e+179,1e+180,
+ 1e+181,1e+182,1e+183,1e+184,1e+185,1e+186,1e+187,1e+188,1e+189,1e+190,1e+191,1e+192,1e+193,1e+194,1e+195,1e+196,1e+197,1e+198,1e+199,1e+200,
+ 1e+201,1e+202,1e+203,1e+204,1e+205,1e+206,1e+207,1e+208,1e+209,1e+210,1e+211,1e+212,1e+213,1e+214,1e+215,1e+216,1e+217,1e+218,1e+219,1e+220,
+ 1e+221,1e+222,1e+223,1e+224,1e+225,1e+226,1e+227,1e+228,1e+229,1e+230,1e+231,1e+232,1e+233,1e+234,1e+235,1e+236,1e+237,1e+238,1e+239,1e+240,
+ 1e+241,1e+242,1e+243,1e+244,1e+245,1e+246,1e+247,1e+248,1e+249,1e+250,1e+251,1e+252,1e+253,1e+254,1e+255,1e+256,1e+257,1e+258,1e+259,1e+260,
+ 1e+261,1e+262,1e+263,1e+264,1e+265,1e+266,1e+267,1e+268,1e+269,1e+270,1e+271,1e+272,1e+273,1e+274,1e+275,1e+276,1e+277,1e+278,1e+279,1e+280,
+ 1e+281,1e+282,1e+283,1e+284,1e+285,1e+286,1e+287,1e+288,1e+289,1e+290,1e+291,1e+292,1e+293,1e+294,1e+295,1e+296,1e+297,1e+298,1e+299,1e+300,
+ 1e+301,1e+302,1e+303,1e+304,1e+305,1e+306,1e+307,1e+308
+ };
+ RAPIDJSON_ASSERT(n >= 0 && n <= 308);
+ return e[n];
+}
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_POW10_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_regex.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_regex.h
new file mode 100644
index 00000000..422971b4
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_regex.h
@@ -0,0 +1,739 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_INTERNAL_REGEX_H_
+#define RAPIDJSON_INTERNAL_REGEX_H_
+
+#include "lottie_rapidjson_allocators.h"
+#include "lottie_rapidjson_stream.h"
+#include "lottie_rapidjson_internal_stack.h"
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(padded)
+RAPIDJSON_DIAG_OFF(switch-enum)
+#elif defined(_MSC_VER)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
+#endif
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+
+#ifndef RAPIDJSON_REGEX_VERBOSE
+#define RAPIDJSON_REGEX_VERBOSE 0
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+///////////////////////////////////////////////////////////////////////////////
+// DecodedStream
+
+template <typename SourceStream, typename Encoding>
+class DecodedStream {
+public:
+ DecodedStream(SourceStream& ss) : ss_(ss), codepoint_() { Decode(); }
+ unsigned Peek() { return codepoint_; }
+ unsigned Take() {
+ unsigned c = codepoint_;
+ if (c) // No further decoding when '\0'
+ Decode();
+ return c;
+ }
+
+private:
+ void Decode() {
+ if (!Encoding::Decode(ss_, &codepoint_))
+ codepoint_ = 0;
+ }
+
+ SourceStream& ss_;
+ unsigned codepoint_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// GenericRegex
+
+static const SizeType kRegexInvalidState = ~SizeType(0); //!< Represents an invalid index in GenericRegex::State::out, out1
+static const SizeType kRegexInvalidRange = ~SizeType(0);
+
+template <typename Encoding, typename Allocator>
+class GenericRegexSearch;
+
+//! Regular expression engine with subset of ECMAscript grammar.
+/*!
+ Supported regular expression syntax:
+ - \c ab Concatenation
+ - \c a|b Alternation
+ - \c a? Zero or one
+ - \c a* Zero or more
+ - \c a+ One or more
+ - \c a{3} Exactly 3 times
+ - \c a{3,} At least 3 times
+ - \c a{3,5} 3 to 5 times
+ - \c (ab) Grouping
+ - \c ^a At the beginning
+ - \c a$ At the end
+ - \c . Any character
+ - \c [abc] Character classes
+ - \c [a-c] Character class range
+ - \c [a-z0-9_] Character class combination
+ - \c [^abc] Negated character classes
+ - \c [^a-c] Negated character class range
+ - \c [\b] Backspace (U+0008)
+ - \c \\| \\\\ ... Escape characters
+ - \c \\f Form feed (U+000C)
+ - \c \\n Line feed (U+000A)
+ - \c \\r Carriage return (U+000D)
+ - \c \\t Tab (U+0009)
+ - \c \\v Vertical tab (U+000B)
+
+ \note This is a Thompson NFA engine, implemented with reference to
+ Cox, Russ. "Regular Expression Matching Can Be Simple And Fast (but is slow in Java, Perl, PHP, Python, Ruby,...).",
+ https://swtch.com/~rsc/regexp/regexp1.html
+*/
+template <typename Encoding, typename Allocator = CrtAllocator>
+class GenericRegex {
+public:
+ typedef Encoding EncodingType;
+ typedef typename Encoding::Ch Ch;
+ template <typename, typename> friend class GenericRegexSearch;
+
+ GenericRegex(const Ch* source, Allocator* allocator = 0) :
+ ownAllocator_(allocator ? 0 : RAPIDJSON_NEW(Allocator)()), allocator_(allocator ? allocator : ownAllocator_),
+ states_(allocator_, 256), ranges_(allocator_, 256), root_(kRegexInvalidState), stateCount_(), rangeCount_(),
+ anchorBegin_(), anchorEnd_()
+ {
+ GenericStringStream<Encoding> ss(source);
+ DecodedStream<GenericStringStream<Encoding>, Encoding> ds(ss);
+ Parse(ds);
+ }
+
+ ~GenericRegex()
+ {
+ RAPIDJSON_DELETE(ownAllocator_);
+ }
+
+ bool IsValid() const {
+ return root_ != kRegexInvalidState;
+ }
+
+private:
+ enum Operator {
+ kZeroOrOne,
+ kZeroOrMore,
+ kOneOrMore,
+ kConcatenation,
+ kAlternation,
+ kLeftParenthesis
+ };
+
+ static const unsigned kAnyCharacterClass = 0xFFFFFFFF; //!< For '.'
+ static const unsigned kRangeCharacterClass = 0xFFFFFFFE;
+ static const unsigned kRangeNegationFlag = 0x80000000;
+
+ struct Range {
+ unsigned start; //
+ unsigned end;
+ SizeType next;
+ };
+
+ struct State {
+ SizeType out; //!< Equals to kInvalid for matching state
+ SizeType out1; //!< Equals to non-kInvalid for split
+ SizeType rangeStart;
+ unsigned codepoint;
+ };
+
+ struct Frag {
+ Frag(SizeType s, SizeType o, SizeType m) : start(s), out(o), minIndex(m) {}
+ SizeType start;
+ SizeType out; //!< link-list of all output states
+ SizeType minIndex;
+ };
+
+ State& GetState(SizeType index) {
+ RAPIDJSON_ASSERT(index < stateCount_);
+ return states_.template Bottom<State>()[index];
+ }
+
+ const State& GetState(SizeType index) const {
+ RAPIDJSON_ASSERT(index < stateCount_);
+ return states_.template Bottom<State>()[index];
+ }
+
+ Range& GetRange(SizeType index) {
+ RAPIDJSON_ASSERT(index < rangeCount_);
+ return ranges_.template Bottom<Range>()[index];
+ }
+
+ const Range& GetRange(SizeType index) const {
+ RAPIDJSON_ASSERT(index < rangeCount_);
+ return ranges_.template Bottom<Range>()[index];
+ }
+
+ template <typename InputStream>
+ void Parse(DecodedStream<InputStream, Encoding>& ds) {
+ Stack<Allocator> operandStack(allocator_, 256); // Frag
+ Stack<Allocator> operatorStack(allocator_, 256); // Operator
+ Stack<Allocator> atomCountStack(allocator_, 256); // unsigned (Atom per parenthesis)
+
+ *atomCountStack.template Push<unsigned>() = 0;
+
+ unsigned codepoint;
+ while (ds.Peek() != 0) {
+ switch (codepoint = ds.Take()) {
+ case '^':
+ anchorBegin_ = true;
+ break;
+
+ case '$':
+ anchorEnd_ = true;
+ break;
+
+ case '|':
+ while (!operatorStack.Empty() && *operatorStack.template Top<Operator>() < kAlternation)
+ if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1)))
+ return;
+ *operatorStack.template Push<Operator>() = kAlternation;
+ *atomCountStack.template Top<unsigned>() = 0;
+ break;
+
+ case '(':
+ *operatorStack.template Push<Operator>() = kLeftParenthesis;
+ *atomCountStack.template Push<unsigned>() = 0;
+ break;
+
+ case ')':
+ while (!operatorStack.Empty() && *operatorStack.template Top<Operator>() != kLeftParenthesis)
+ if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1)))
+ return;
+ if (operatorStack.Empty())
+ return;
+ operatorStack.template Pop<Operator>(1);
+ atomCountStack.template Pop<unsigned>(1);
+ ImplicitConcatenation(atomCountStack, operatorStack);
+ break;
+
+ case '?':
+ if (!Eval(operandStack, kZeroOrOne))
+ return;
+ break;
+
+ case '*':
+ if (!Eval(operandStack, kZeroOrMore))
+ return;
+ break;
+
+ case '+':
+ if (!Eval(operandStack, kOneOrMore))
+ return;
+ break;
+
+ case '{':
+ {
+ unsigned n, m;
+ if (!ParseUnsigned(ds, &n))
+ return;
+
+ if (ds.Peek() == ',') {
+ ds.Take();
+ if (ds.Peek() == '}')
+ m = kInfinityQuantifier;
+ else if (!ParseUnsigned(ds, &m) || m < n)
+ return;
+ }
+ else
+ m = n;
+
+ if (!EvalQuantifier(operandStack, n, m) || ds.Peek() != '}')
+ return;
+ ds.Take();
+ }
+ break;
+
+ case '.':
+ PushOperand(operandStack, kAnyCharacterClass);
+ ImplicitConcatenation(atomCountStack, operatorStack);
+ break;
+
+ case '[':
+ {
+ SizeType range;
+ if (!ParseRange(ds, &range))
+ return;
+ SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, kRangeCharacterClass);
+ GetState(s).rangeStart = range;
+ *operandStack.template Push<Frag>() = Frag(s, s, s);
+ }
+ ImplicitConcatenation(atomCountStack, operatorStack);
+ break;
+
+ case '\\': // Escape character
+ if (!CharacterEscape(ds, &codepoint))
+ return; // Unsupported escape character
+ // fall through to default
+ RAPIDJSON_DELIBERATE_FALLTHROUGH;
+
+ default: // Pattern character
+ PushOperand(operandStack, codepoint);
+ ImplicitConcatenation(atomCountStack, operatorStack);
+ }
+ }
+
+ while (!operatorStack.Empty())
+ if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1)))
+ return;
+
+ // Link the operand to matching state.
+ if (operandStack.GetSize() == sizeof(Frag)) {
+ Frag* e = operandStack.template Pop<Frag>(1);
+ Patch(e->out, NewState(kRegexInvalidState, kRegexInvalidState, 0));
+ root_ = e->start;
+
+#if RAPIDJSON_REGEX_VERBOSE
+ printf("root: %d\n", root_);
+ for (SizeType i = 0; i < stateCount_ ; i++) {
+ State& s = GetState(i);
+ printf("[%2d] out: %2d out1: %2d c: '%c'\n", i, s.out, s.out1, (char)s.codepoint);
+ }
+ printf("\n");
+#endif
+ }
+ }
+
+ SizeType NewState(SizeType out, SizeType out1, unsigned codepoint) {
+ State* s = states_.template Push<State>();
+ s->out = out;
+ s->out1 = out1;
+ s->codepoint = codepoint;
+ s->rangeStart = kRegexInvalidRange;
+ return stateCount_++;
+ }
+
+ void PushOperand(Stack<Allocator>& operandStack, unsigned codepoint) {
+ SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, codepoint);
+ *operandStack.template Push<Frag>() = Frag(s, s, s);
+ }
+
+ void ImplicitConcatenation(Stack<Allocator>& atomCountStack, Stack<Allocator>& operatorStack) {
+ if (*atomCountStack.template Top<unsigned>())
+ *operatorStack.template Push<Operator>() = kConcatenation;
+ (*atomCountStack.template Top<unsigned>())++;
+ }
+
+ SizeType Append(SizeType l1, SizeType l2) {
+ SizeType old = l1;
+ while (GetState(l1).out != kRegexInvalidState)
+ l1 = GetState(l1).out;
+ GetState(l1).out = l2;
+ return old;
+ }
+
+ void Patch(SizeType l, SizeType s) {
+ for (SizeType next; l != kRegexInvalidState; l = next) {
+ next = GetState(l).out;
+ GetState(l).out = s;
+ }
+ }
+
+ bool Eval(Stack<Allocator>& operandStack, Operator op) {
+ switch (op) {
+ case kConcatenation:
+ RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag) * 2);
+ {
+ Frag e2 = *operandStack.template Pop<Frag>(1);
+ Frag e1 = *operandStack.template Pop<Frag>(1);
+ Patch(e1.out, e2.start);
+ *operandStack.template Push<Frag>() = Frag(e1.start, e2.out, Min(e1.minIndex, e2.minIndex));
+ }
+ return true;
+
+ case kAlternation:
+ if (operandStack.GetSize() >= sizeof(Frag) * 2) {
+ Frag e2 = *operandStack.template Pop<Frag>(1);
+ Frag e1 = *operandStack.template Pop<Frag>(1);
+ SizeType s = NewState(e1.start, e2.start, 0);
+ *operandStack.template Push<Frag>() = Frag(s, Append(e1.out, e2.out), Min(e1.minIndex, e2.minIndex));
+ return true;
+ }
+ return false;
+
+ case kZeroOrOne:
+ if (operandStack.GetSize() >= sizeof(Frag)) {
+ Frag e = *operandStack.template Pop<Frag>(1);
+ SizeType s = NewState(kRegexInvalidState, e.start, 0);
+ *operandStack.template Push<Frag>() = Frag(s, Append(e.out, s), e.minIndex);
+ return true;
+ }
+ return false;
+
+ case kZeroOrMore:
+ if (operandStack.GetSize() >= sizeof(Frag)) {
+ Frag e = *operandStack.template Pop<Frag>(1);
+ SizeType s = NewState(kRegexInvalidState, e.start, 0);
+ Patch(e.out, s);
+ *operandStack.template Push<Frag>() = Frag(s, s, e.minIndex);
+ return true;
+ }
+ return false;
+
+ case kOneOrMore:
+ if (operandStack.GetSize() >= sizeof(Frag)) {
+ Frag e = *operandStack.template Pop<Frag>(1);
+ SizeType s = NewState(kRegexInvalidState, e.start, 0);
+ Patch(e.out, s);
+ *operandStack.template Push<Frag>() = Frag(e.start, s, e.minIndex);
+ return true;
+ }
+ return false;
+
+ default:
+ // syntax error (e.g. unclosed kLeftParenthesis)
+ return false;
+ }
+ }
+
+ bool EvalQuantifier(Stack<Allocator>& operandStack, unsigned n, unsigned m) {
+ RAPIDJSON_ASSERT(n <= m);
+ RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag));
+
+ if (n == 0) {
+ if (m == 0) // a{0} not support
+ return false;
+ else if (m == kInfinityQuantifier)
+ Eval(operandStack, kZeroOrMore); // a{0,} -> a*
+ else {
+ Eval(operandStack, kZeroOrOne); // a{0,5} -> a?
+ for (unsigned i = 0; i < m - 1; i++)
+ CloneTopOperand(operandStack); // a{0,5} -> a? a? a? a? a?
+ for (unsigned i = 0; i < m - 1; i++)
+ Eval(operandStack, kConcatenation); // a{0,5} -> a?a?a?a?a?
+ }
+ return true;
+ }
+
+ for (unsigned i = 0; i < n - 1; i++) // a{3} -> a a a
+ CloneTopOperand(operandStack);
+
+ if (m == kInfinityQuantifier)
+ Eval(operandStack, kOneOrMore); // a{3,} -> a a a+
+ else if (m > n) {
+ CloneTopOperand(operandStack); // a{3,5} -> a a a a
+ Eval(operandStack, kZeroOrOne); // a{3,5} -> a a a a?
+ for (unsigned i = n; i < m - 1; i++)
+ CloneTopOperand(operandStack); // a{3,5} -> a a a a? a?
+ for (unsigned i = n; i < m; i++)
+ Eval(operandStack, kConcatenation); // a{3,5} -> a a aa?a?
+ }
+
+ for (unsigned i = 0; i < n - 1; i++)
+ Eval(operandStack, kConcatenation); // a{3} -> aaa, a{3,} -> aaa+, a{3.5} -> aaaa?a?
+
+ return true;
+ }
+
+ static SizeType Min(SizeType a, SizeType b) { return a < b ? a : b; }
+
+ void CloneTopOperand(Stack<Allocator>& operandStack) {
+ const Frag src = *operandStack.template Top<Frag>(); // Copy constructor to prevent invalidation
+ SizeType count = stateCount_ - src.minIndex; // Assumes top operand contains states in [src->minIndex, stateCount_)
+ State* s = states_.template Push<State>(count);
+ memcpy(s, &GetState(src.minIndex), count * sizeof(State));
+ for (SizeType j = 0; j < count; j++) {
+ if (s[j].out != kRegexInvalidState)
+ s[j].out += count;
+ if (s[j].out1 != kRegexInvalidState)
+ s[j].out1 += count;
+ }
+ *operandStack.template Push<Frag>() = Frag(src.start + count, src.out + count, src.minIndex + count);
+ stateCount_ += count;
+ }
+
+ template <typename InputStream>
+ bool ParseUnsigned(DecodedStream<InputStream, Encoding>& ds, unsigned* u) {
+ unsigned r = 0;
+ if (ds.Peek() < '0' || ds.Peek() > '9')
+ return false;
+ while (ds.Peek() >= '0' && ds.Peek() <= '9') {
+ if (r >= 429496729 && ds.Peek() > '5') // 2^32 - 1 = 4294967295
+ return false; // overflow
+ r = r * 10 + (ds.Take() - '0');
+ }
+ *u = r;
+ return true;
+ }
+
+ template <typename InputStream>
+ bool ParseRange(DecodedStream<InputStream, Encoding>& ds, SizeType* range) {
+ bool isBegin = true;
+ bool negate = false;
+ int step = 0;
+ SizeType start = kRegexInvalidRange;
+ SizeType current = kRegexInvalidRange;
+ unsigned codepoint;
+ while ((codepoint = ds.Take()) != 0) {
+ if (isBegin) {
+ isBegin = false;
+ if (codepoint == '^') {
+ negate = true;
+ continue;
+ }
+ }
+
+ switch (codepoint) {
+ case ']':
+ if (start == kRegexInvalidRange)
+ return false; // Error: nothing inside []
+ if (step == 2) { // Add trailing '-'
+ SizeType r = NewRange('-');
+ RAPIDJSON_ASSERT(current != kRegexInvalidRange);
+ GetRange(current).next = r;
+ }
+ if (negate)
+ GetRange(start).start |= kRangeNegationFlag;
+ *range = start;
+ return true;
+
+ case '\\':
+ if (ds.Peek() == 'b') {
+ ds.Take();
+ codepoint = 0x0008; // Escape backspace character
+ }
+ else if (!CharacterEscape(ds, &codepoint))
+ return false;
+ // fall through to default
+ RAPIDJSON_DELIBERATE_FALLTHROUGH;
+
+ default:
+ switch (step) {
+ case 1:
+ if (codepoint == '-') {
+ step++;
+ break;
+ }
+ // fall through to step 0 for other characters
+ RAPIDJSON_DELIBERATE_FALLTHROUGH;
+
+ case 0:
+ {
+ SizeType r = NewRange(codepoint);
+ if (current != kRegexInvalidRange)
+ GetRange(current).next = r;
+ if (start == kRegexInvalidRange)
+ start = r;
+ current = r;
+ }
+ step = 1;
+ break;
+
+ default:
+ RAPIDJSON_ASSERT(step == 2);
+ GetRange(current).end = codepoint;
+ step = 0;
+ }
+ }
+ }
+ return false;
+ }
+
+ SizeType NewRange(unsigned codepoint) {
+ Range* r = ranges_.template Push<Range>();
+ r->start = r->end = codepoint;
+ r->next = kRegexInvalidRange;
+ return rangeCount_++;
+ }
+
+ template <typename InputStream>
+ bool CharacterEscape(DecodedStream<InputStream, Encoding>& ds, unsigned* escapedCodepoint) {
+ unsigned codepoint;
+ switch (codepoint = ds.Take()) {
+ case '^':
+ case '$':
+ case '|':
+ case '(':
+ case ')':
+ case '?':
+ case '*':
+ case '+':
+ case '.':
+ case '[':
+ case ']':
+ case '{':
+ case '}':
+ case '\\':
+ *escapedCodepoint = codepoint; return true;
+ case 'f': *escapedCodepoint = 0x000C; return true;
+ case 'n': *escapedCodepoint = 0x000A; return true;
+ case 'r': *escapedCodepoint = 0x000D; return true;
+ case 't': *escapedCodepoint = 0x0009; return true;
+ case 'v': *escapedCodepoint = 0x000B; return true;
+ default:
+ return false; // Unsupported escape character
+ }
+ }
+
+ Allocator* ownAllocator_;
+ Allocator* allocator_;
+ Stack<Allocator> states_;
+ Stack<Allocator> ranges_;
+ SizeType root_;
+ SizeType stateCount_;
+ SizeType rangeCount_;
+
+ static const unsigned kInfinityQuantifier = ~0u;
+
+ // For SearchWithAnchoring()
+ bool anchorBegin_;
+ bool anchorEnd_;
+};
+
+template <typename RegexType, typename Allocator = CrtAllocator>
+class GenericRegexSearch {
+public:
+ typedef typename RegexType::EncodingType Encoding;
+ typedef typename Encoding::Ch Ch;
+
+ GenericRegexSearch(const RegexType& regex, Allocator* allocator = 0) :
+ regex_(regex), allocator_(allocator), ownAllocator_(0),
+ state0_(allocator, 0), state1_(allocator, 0), stateSet_()
+ {
+ RAPIDJSON_ASSERT(regex_.IsValid());
+ if (!allocator_)
+ ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)();
+ stateSet_ = static_cast<unsigned*>(allocator_->Malloc(GetStateSetSize()));
+ state0_.template Reserve<SizeType>(regex_.stateCount_);
+ state1_.template Reserve<SizeType>(regex_.stateCount_);
+ }
+
+ ~GenericRegexSearch() {
+ Allocator::Free(stateSet_);
+ RAPIDJSON_DELETE(ownAllocator_);
+ }
+
+ template <typename InputStream>
+ bool Match(InputStream& is) {
+ return SearchWithAnchoring(is, true, true);
+ }
+
+ bool Match(const Ch* s) {
+ GenericStringStream<Encoding> is(s);
+ return Match(is);
+ }
+
+ template <typename InputStream>
+ bool Search(InputStream& is) {
+ return SearchWithAnchoring(is, regex_.anchorBegin_, regex_.anchorEnd_);
+ }
+
+ bool Search(const Ch* s) {
+ GenericStringStream<Encoding> is(s);
+ return Search(is);
+ }
+
+private:
+ typedef typename RegexType::State State;
+ typedef typename RegexType::Range Range;
+
+ template <typename InputStream>
+ bool SearchWithAnchoring(InputStream& is, bool anchorBegin, bool anchorEnd) {
+ DecodedStream<InputStream, Encoding> ds(is);
+
+ state0_.Clear();
+ Stack<Allocator> *current = &state0_, *next = &state1_;
+ const size_t stateSetSize = GetStateSetSize();
+ std::memset(stateSet_, 0, stateSetSize);
+
+ bool matched = AddState(*current, regex_.root_);
+ unsigned codepoint;
+ while (!current->Empty() && (codepoint = ds.Take()) != 0) {
+ std::memset(stateSet_, 0, stateSetSize);
+ next->Clear();
+ matched = false;
+ for (const SizeType* s = current->template Bottom<SizeType>(); s != current->template End<SizeType>(); ++s) {
+ const State& sr = regex_.GetState(*s);
+ if (sr.codepoint == codepoint ||
+ sr.codepoint == RegexType::kAnyCharacterClass ||
+ (sr.codepoint == RegexType::kRangeCharacterClass && MatchRange(sr.rangeStart, codepoint)))
+ {
+ matched = AddState(*next, sr.out) || matched;
+ if (!anchorEnd && matched)
+ return true;
+ }
+ if (!anchorBegin)
+ AddState(*next, regex_.root_);
+ }
+ internal::Swap(current, next);
+ }
+
+ return matched;
+ }
+
+ size_t GetStateSetSize() const {
+ return (regex_.stateCount_ + 31) / 32 * 4;
+ }
+
+ // Return whether the added states is a match state
+ bool AddState(Stack<Allocator>& l, SizeType index) {
+ RAPIDJSON_ASSERT(index != kRegexInvalidState);
+
+ const State& s = regex_.GetState(index);
+ if (s.out1 != kRegexInvalidState) { // Split
+ bool matched = AddState(l, s.out);
+ return AddState(l, s.out1) || matched;
+ }
+ else if (!(stateSet_[index >> 5] & (1u << (index & 31)))) {
+ stateSet_[index >> 5] |= (1u << (index & 31));
+ *l.template PushUnsafe<SizeType>() = index;
+ }
+ return s.out == kRegexInvalidState; // by using PushUnsafe() above, we can ensure s is not validated due to reallocation.
+ }
+
+ bool MatchRange(SizeType rangeIndex, unsigned codepoint) const {
+ bool yes = (regex_.GetRange(rangeIndex).start & RegexType::kRangeNegationFlag) == 0;
+ while (rangeIndex != kRegexInvalidRange) {
+ const Range& r = regex_.GetRange(rangeIndex);
+ if (codepoint >= (r.start & ~RegexType::kRangeNegationFlag) && codepoint <= r.end)
+ return yes;
+ rangeIndex = r.next;
+ }
+ return !yes;
+ }
+
+ const RegexType& regex_;
+ Allocator* allocator_;
+ Allocator* ownAllocator_;
+ Stack<Allocator> state0_;
+ Stack<Allocator> state1_;
+ uint32_t* stateSet_;
+};
+
+typedef GenericRegex<UTF8<> > Regex;
+typedef GenericRegexSearch<Regex> RegexSearch;
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_POP
+#endif
+
+#if defined(__clang__) || defined(_MSC_VER)
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_INTERNAL_REGEX_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_stack.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_stack.h
new file mode 100644
index 00000000..173e0fec
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_stack.h
@@ -0,0 +1,232 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_INTERNAL_STACK_H_
+#define RAPIDJSON_INTERNAL_STACK_H_
+
+#include "lottie_rapidjson_allocators.h"
+#include "lottie_rapidjson_internal_swap.h"
+#include <cstddef>
+
+#if defined(__clang__)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(c++98-compat)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+///////////////////////////////////////////////////////////////////////////////
+// Stack
+
+//! A type-unsafe stack for storing different types of data.
+/*! \tparam Allocator Allocator for allocating stack memory.
+*/
+template <typename Allocator>
+class Stack {
+public:
+ // Optimization note: Do not allocate memory for stack_ in constructor.
+ // Do it lazily when first Push() -> Expand() -> Resize().
+ Stack(Allocator* allocator, size_t stackCapacity) : allocator_(allocator), ownAllocator_(0), stack_(0), stackTop_(0), stackEnd_(0), initialCapacity_(stackCapacity) {
+ }
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ Stack(Stack&& rhs)
+ : allocator_(rhs.allocator_),
+ ownAllocator_(rhs.ownAllocator_),
+ stack_(rhs.stack_),
+ stackTop_(rhs.stackTop_),
+ stackEnd_(rhs.stackEnd_),
+ initialCapacity_(rhs.initialCapacity_)
+ {
+ rhs.allocator_ = 0;
+ rhs.ownAllocator_ = 0;
+ rhs.stack_ = 0;
+ rhs.stackTop_ = 0;
+ rhs.stackEnd_ = 0;
+ rhs.initialCapacity_ = 0;
+ }
+#endif
+
+ ~Stack() {
+ Destroy();
+ }
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ Stack& operator=(Stack&& rhs) {
+ if (&rhs != this)
+ {
+ Destroy();
+
+ allocator_ = rhs.allocator_;
+ ownAllocator_ = rhs.ownAllocator_;
+ stack_ = rhs.stack_;
+ stackTop_ = rhs.stackTop_;
+ stackEnd_ = rhs.stackEnd_;
+ initialCapacity_ = rhs.initialCapacity_;
+
+ rhs.allocator_ = 0;
+ rhs.ownAllocator_ = 0;
+ rhs.stack_ = 0;
+ rhs.stackTop_ = 0;
+ rhs.stackEnd_ = 0;
+ rhs.initialCapacity_ = 0;
+ }
+ return *this;
+ }
+#endif
+
+ void Swap(Stack& rhs) RAPIDJSON_NOEXCEPT {
+ internal::Swap(allocator_, rhs.allocator_);
+ internal::Swap(ownAllocator_, rhs.ownAllocator_);
+ internal::Swap(stack_, rhs.stack_);
+ internal::Swap(stackTop_, rhs.stackTop_);
+ internal::Swap(stackEnd_, rhs.stackEnd_);
+ internal::Swap(initialCapacity_, rhs.initialCapacity_);
+ }
+
+ void Clear() { stackTop_ = stack_; }
+
+ void ShrinkToFit() {
+ if (Empty()) {
+ // If the stack is empty, completely deallocate the memory.
+ Allocator::Free(stack_); // NOLINT (+clang-analyzer-unix.Malloc)
+ stack_ = 0;
+ stackTop_ = 0;
+ stackEnd_ = 0;
+ }
+ else
+ Resize(GetSize());
+ }
+
+ // Optimization note: try to minimize the size of this function for force inline.
+ // Expansion is run very infrequently, so it is moved to another (probably non-inline) function.
+ template<typename T>
+ RAPIDJSON_FORCEINLINE void Reserve(size_t count = 1) {
+ // Expand the stack if needed
+ if (RAPIDJSON_UNLIKELY(static_cast<std::ptrdiff_t>(sizeof(T) * count) > (stackEnd_ - stackTop_)))
+ Expand<T>(count);
+ }
+
+ template<typename T>
+ RAPIDJSON_FORCEINLINE T* Push(size_t count = 1) {
+ Reserve<T>(count);
+ return PushUnsafe<T>(count);
+ }
+
+ template<typename T>
+ RAPIDJSON_FORCEINLINE T* PushUnsafe(size_t count = 1) {
+ RAPIDJSON_ASSERT(stackTop_);
+ RAPIDJSON_ASSERT(static_cast<std::ptrdiff_t>(sizeof(T) * count) <= (stackEnd_ - stackTop_));
+ T* ret = reinterpret_cast<T*>(stackTop_);
+ stackTop_ += sizeof(T) * count;
+ return ret;
+ }
+
+ template<typename T>
+ T* Pop(size_t count) {
+ RAPIDJSON_ASSERT(GetSize() >= count * sizeof(T));
+ stackTop_ -= count * sizeof(T);
+ return reinterpret_cast<T*>(stackTop_);
+ }
+
+ template<typename T>
+ T* Top() {
+ RAPIDJSON_ASSERT(GetSize() >= sizeof(T));
+ return reinterpret_cast<T*>(stackTop_ - sizeof(T));
+ }
+
+ template<typename T>
+ const T* Top() const {
+ RAPIDJSON_ASSERT(GetSize() >= sizeof(T));
+ return reinterpret_cast<T*>(stackTop_ - sizeof(T));
+ }
+
+ template<typename T>
+ T* End() { return reinterpret_cast<T*>(stackTop_); }
+
+ template<typename T>
+ const T* End() const { return reinterpret_cast<T*>(stackTop_); }
+
+ template<typename T>
+ T* Bottom() { return reinterpret_cast<T*>(stack_); }
+
+ template<typename T>
+ const T* Bottom() const { return reinterpret_cast<T*>(stack_); }
+
+ bool HasAllocator() const {
+ return allocator_ != 0;
+ }
+
+ Allocator& GetAllocator() {
+ RAPIDJSON_ASSERT(allocator_);
+ return *allocator_;
+ }
+
+ bool Empty() const { return stackTop_ == stack_; }
+ size_t GetSize() const { return static_cast<size_t>(stackTop_ - stack_); }
+ size_t GetCapacity() const { return static_cast<size_t>(stackEnd_ - stack_); }
+
+private:
+ template<typename T>
+ void Expand(size_t count) {
+ // Only expand the capacity if the current stack exists. Otherwise just create a stack with initial capacity.
+ size_t newCapacity;
+ if (stack_ == 0) {
+ if (!allocator_)
+ ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)();
+ newCapacity = initialCapacity_;
+ } else {
+ newCapacity = GetCapacity();
+ newCapacity += (newCapacity + 1) / 2;
+ }
+ size_t newSize = GetSize() + sizeof(T) * count;
+ if (newCapacity < newSize)
+ newCapacity = newSize;
+
+ Resize(newCapacity);
+ }
+
+ void Resize(size_t newCapacity) {
+ const size_t size = GetSize(); // Backup the current size
+ stack_ = static_cast<char*>(allocator_->Realloc(stack_, GetCapacity(), newCapacity));
+ stackTop_ = stack_ + size;
+ stackEnd_ = stack_ + newCapacity;
+ }
+
+ void Destroy() {
+ Allocator::Free(stack_);
+ RAPIDJSON_DELETE(ownAllocator_); // Only delete if it is owned by the stack
+ }
+
+ // Prohibit copy constructor & assignment operator.
+ Stack(const Stack&);
+ Stack& operator=(const Stack&);
+
+ Allocator* allocator_;
+ Allocator* ownAllocator_;
+ char *stack_;
+ char *stackTop_;
+ char *stackEnd_;
+ size_t initialCapacity_;
+};
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#if defined(__clang__)
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_STACK_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_strfunc.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_strfunc.h
new file mode 100644
index 00000000..7fa0d81e
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_strfunc.h
@@ -0,0 +1,69 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_INTERNAL_STRFUNC_H_
+#define RAPIDJSON_INTERNAL_STRFUNC_H_
+
+#include "lottie_rapidjson_stream.h"
+#include <cwchar>
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+//! Custom strlen() which works on different character types.
+/*! \tparam Ch Character type (e.g. char, wchar_t, short)
+ \param s Null-terminated input string.
+ \return Number of characters in the string.
+ \note This has the same semantics as strlen(), the return value is not number of Unicode codepoints.
+*/
+template <typename Ch>
+inline SizeType StrLen(const Ch* s) {
+ RAPIDJSON_ASSERT(s != 0);
+ const Ch* p = s;
+ while (*p) ++p;
+ return SizeType(p - s);
+}
+
+template <>
+inline SizeType StrLen(const char* s) {
+ return SizeType(std::strlen(s));
+}
+
+template <>
+inline SizeType StrLen(const wchar_t* s) {
+ return SizeType(std::wcslen(s));
+}
+
+//! Returns number of code points in a encoded string.
+template<typename Encoding>
+bool CountStringCodePoint(const typename Encoding::Ch* s, SizeType length, SizeType* outCount) {
+ RAPIDJSON_ASSERT(s != 0);
+ RAPIDJSON_ASSERT(outCount != 0);
+ GenericStringStream<Encoding> is(s);
+ const typename Encoding::Ch* end = s + length;
+ SizeType count = 0;
+ while (is.src_ < end) {
+ unsigned codepoint;
+ if (!Encoding::Decode(is, &codepoint))
+ return false;
+ count++;
+ }
+ *outCount = count;
+ return true;
+}
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_INTERNAL_STRFUNC_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_strtod.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_strtod.h
new file mode 100644
index 00000000..fc1bc5d6
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_strtod.h
@@ -0,0 +1,290 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_STRTOD_
+#define RAPIDJSON_STRTOD_
+
+#include "lottie_rapidjson_internal_ieee754.h"
+#include "lottie_rapidjson_internal_biginteger.h"
+#include "lottie_rapidjson_internal_diyfp.h"
+#include "lottie_rapidjson_internal_pow10.h"
+#include <climits>
+#include <limits>
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+inline double FastPath(double significand, int exp) {
+ if (exp < -308)
+ return 0.0;
+ else if (exp >= 0)
+ return significand * internal::Pow10(exp);
+ else
+ return significand / internal::Pow10(-exp);
+}
+
+inline double StrtodNormalPrecision(double d, int p) {
+ if (p < -308) {
+ // Prevent expSum < -308, making Pow10(p) = 0
+ d = FastPath(d, -308);
+ d = FastPath(d, p + 308);
+ }
+ else
+ d = FastPath(d, p);
+ return d;
+}
+
+template <typename T>
+inline T Min3(T a, T b, T c) {
+ T m = a;
+ if (m > b) m = b;
+ if (m > c) m = c;
+ return m;
+}
+
+inline int CheckWithinHalfULP(double b, const BigInteger& d, int dExp) {
+ const Double db(b);
+ const uint64_t bInt = db.IntegerSignificand();
+ const int bExp = db.IntegerExponent();
+ const int hExp = bExp - 1;
+
+ int dS_Exp2 = 0, dS_Exp5 = 0, bS_Exp2 = 0, bS_Exp5 = 0, hS_Exp2 = 0, hS_Exp5 = 0;
+
+ // Adjust for decimal exponent
+ if (dExp >= 0) {
+ dS_Exp2 += dExp;
+ dS_Exp5 += dExp;
+ }
+ else {
+ bS_Exp2 -= dExp;
+ bS_Exp5 -= dExp;
+ hS_Exp2 -= dExp;
+ hS_Exp5 -= dExp;
+ }
+
+ // Adjust for binary exponent
+ if (bExp >= 0)
+ bS_Exp2 += bExp;
+ else {
+ dS_Exp2 -= bExp;
+ hS_Exp2 -= bExp;
+ }
+
+ // Adjust for half ulp exponent
+ if (hExp >= 0)
+ hS_Exp2 += hExp;
+ else {
+ dS_Exp2 -= hExp;
+ bS_Exp2 -= hExp;
+ }
+
+ // Remove common power of two factor from all three scaled values
+ int common_Exp2 = Min3(dS_Exp2, bS_Exp2, hS_Exp2);
+ dS_Exp2 -= common_Exp2;
+ bS_Exp2 -= common_Exp2;
+ hS_Exp2 -= common_Exp2;
+
+ BigInteger dS = d;
+ dS.MultiplyPow5(static_cast<unsigned>(dS_Exp5)) <<= static_cast<unsigned>(dS_Exp2);
+
+ BigInteger bS(bInt);
+ bS.MultiplyPow5(static_cast<unsigned>(bS_Exp5)) <<= static_cast<unsigned>(bS_Exp2);
+
+ BigInteger hS(1);
+ hS.MultiplyPow5(static_cast<unsigned>(hS_Exp5)) <<= static_cast<unsigned>(hS_Exp2);
+
+ BigInteger delta(0);
+ dS.Difference(bS, &delta);
+
+ return delta.Compare(hS);
+}
+
+inline bool StrtodFast(double d, int p, double* result) {
+ // Use fast path for string-to-double conversion if possible
+ // see http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/
+ if (p > 22 && p < 22 + 16) {
+ // Fast Path Cases In Disguise
+ d *= internal::Pow10(p - 22);
+ p = 22;
+ }
+
+ if (p >= -22 && p <= 22 && d <= 9007199254740991.0) { // 2^53 - 1
+ *result = FastPath(d, p);
+ return true;
+ }
+ else
+ return false;
+}
+
+// Compute an approximation and see if it is within 1/2 ULP
+inline bool StrtodDiyFp(const char* decimals, int dLen, int dExp, double* result) {
+ uint64_t significand = 0;
+ int i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999
+ for (; i < dLen; i++) {
+ if (significand > RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) ||
+ (significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > '5'))
+ break;
+ significand = significand * 10u + static_cast<unsigned>(decimals[i] - '0');
+ }
+
+ if (i < dLen && decimals[i] >= '5') // Rounding
+ significand++;
+
+ int remaining = dLen - i;
+ const int kUlpShift = 3;
+ const int kUlp = 1 << kUlpShift;
+ int64_t error = (remaining == 0) ? 0 : kUlp / 2;
+
+ DiyFp v(significand, 0);
+ v = v.Normalize();
+ error <<= -v.e;
+
+ dExp += remaining;
+
+ int actualExp;
+ DiyFp cachedPower = GetCachedPower10(dExp, &actualExp);
+ if (actualExp != dExp) {
+ static const DiyFp kPow10[] = {
+ DiyFp(RAPIDJSON_UINT64_C2(0xa0000000, 0x00000000), -60), // 10^1
+ DiyFp(RAPIDJSON_UINT64_C2(0xc8000000, 0x00000000), -57), // 10^2
+ DiyFp(RAPIDJSON_UINT64_C2(0xfa000000, 0x00000000), -54), // 10^3
+ DiyFp(RAPIDJSON_UINT64_C2(0x9c400000, 0x00000000), -50), // 10^4
+ DiyFp(RAPIDJSON_UINT64_C2(0xc3500000, 0x00000000), -47), // 10^5
+ DiyFp(RAPIDJSON_UINT64_C2(0xf4240000, 0x00000000), -44), // 10^6
+ DiyFp(RAPIDJSON_UINT64_C2(0x98968000, 0x00000000), -40) // 10^7
+ };
+ int adjustment = dExp - actualExp;
+ RAPIDJSON_ASSERT(adjustment >= 1 && adjustment < 8);
+ v = v * kPow10[adjustment - 1];
+ if (dLen + adjustment > 19) // has more digits than decimal digits in 64-bit
+ error += kUlp / 2;
+ }
+
+ v = v * cachedPower;
+
+ error += kUlp + (error == 0 ? 0 : 1);
+
+ const int oldExp = v.e;
+ v = v.Normalize();
+ error <<= oldExp - v.e;
+
+ const int effectiveSignificandSize = Double::EffectiveSignificandSize(64 + v.e);
+ int precisionSize = 64 - effectiveSignificandSize;
+ if (precisionSize + kUlpShift >= 64) {
+ int scaleExp = (precisionSize + kUlpShift) - 63;
+ v.f >>= scaleExp;
+ v.e += scaleExp;
+ error = (error >> scaleExp) + 1 + kUlp;
+ precisionSize -= scaleExp;
+ }
+
+ DiyFp rounded(v.f >> precisionSize, v.e + precisionSize);
+ const uint64_t precisionBits = (v.f & ((uint64_t(1) << precisionSize) - 1)) * kUlp;
+ const uint64_t halfWay = (uint64_t(1) << (precisionSize - 1)) * kUlp;
+ if (precisionBits >= halfWay + static_cast<unsigned>(error)) {
+ rounded.f++;
+ if (rounded.f & (DiyFp::kDpHiddenBit << 1)) { // rounding overflows mantissa (issue #340)
+ rounded.f >>= 1;
+ rounded.e++;
+ }
+ }
+
+ *result = rounded.ToDouble();
+
+ return halfWay - static_cast<unsigned>(error) >= precisionBits || precisionBits >= halfWay + static_cast<unsigned>(error);
+}
+
+inline double StrtodBigInteger(double approx, const char* decimals, int dLen, int dExp) {
+ RAPIDJSON_ASSERT(dLen >= 0);
+ const BigInteger dInt(decimals, static_cast<unsigned>(dLen));
+ Double a(approx);
+ int cmp = CheckWithinHalfULP(a.Value(), dInt, dExp);
+ if (cmp < 0)
+ return a.Value(); // within half ULP
+ else if (cmp == 0) {
+ // Round towards even
+ if (a.Significand() & 1)
+ return a.NextPositiveDouble();
+ else
+ return a.Value();
+ }
+ else // adjustment
+ return a.NextPositiveDouble();
+}
+
+inline double StrtodFullPrecision(double d, int p, const char* decimals, size_t length, size_t decimalPosition, int exp) {
+ RAPIDJSON_ASSERT(d >= 0.0);
+ RAPIDJSON_ASSERT(length >= 1);
+
+ double result = 0.0;
+ if (StrtodFast(d, p, &result))
+ return result;
+
+ RAPIDJSON_ASSERT(length <= INT_MAX);
+ int dLen = static_cast<int>(length);
+
+ RAPIDJSON_ASSERT(length >= decimalPosition);
+ RAPIDJSON_ASSERT(length - decimalPosition <= INT_MAX);
+ int dExpAdjust = static_cast<int>(length - decimalPosition);
+
+ RAPIDJSON_ASSERT(exp >= INT_MIN + dExpAdjust);
+ int dExp = exp - dExpAdjust;
+
+ // Make sure length+dExp does not overflow
+ RAPIDJSON_ASSERT(dExp <= INT_MAX - dLen);
+
+ // Trim leading zeros
+ while (dLen > 0 && *decimals == '0') {
+ dLen--;
+ decimals++;
+ }
+
+ // Trim trailing zeros
+ while (dLen > 0 && decimals[dLen - 1] == '0') {
+ dLen--;
+ dExp++;
+ }
+
+ if (dLen == 0) { // Buffer only contains zeros.
+ return 0.0;
+ }
+
+ // Trim right-most digits
+ const int kMaxDecimalDigit = 767 + 1;
+ if (dLen > kMaxDecimalDigit) {
+ dExp += dLen - kMaxDecimalDigit;
+ dLen = kMaxDecimalDigit;
+ }
+
+ // If too small, underflow to zero.
+ // Any x <= 10^-324 is interpreted as zero.
+ if (dLen + dExp <= -324)
+ return 0.0;
+
+ // If too large, overflow to infinity.
+ // Any x >= 10^309 is interpreted as +infinity.
+ if (dLen + dExp > 309)
+ return std::numeric_limits<double>::infinity();
+
+ if (StrtodDiyFp(decimals, dLen, dExp, &result))
+ return result;
+
+ // Use approximation from StrtodDiyFp and make adjustment with BigInteger comparison
+ return StrtodBigInteger(result, decimals, dLen, dExp);
+}
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_STRTOD_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_swap.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_swap.h
new file mode 100644
index 00000000..816fe3cc
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_swap.h
@@ -0,0 +1,46 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_INTERNAL_SWAP_H_
+#define RAPIDJSON_INTERNAL_SWAP_H_
+
+#include "lottie_rapidjson_rapidjson.h"
+
+#if defined(__clang__)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(c++98-compat)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+//! Custom swap() to avoid dependency on C++ <algorithm> header
+/*! \tparam T Type of the arguments to swap, should be instantiated with primitive C++ types only.
+ \note This has the same semantics as std::swap().
+*/
+template <typename T>
+inline void Swap(T& a, T& b) RAPIDJSON_NOEXCEPT {
+ T tmp = a;
+ a = b;
+ b = tmp;
+}
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#if defined(__clang__)
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_INTERNAL_SWAP_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_istreamwrapper.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_istreamwrapper.h
new file mode 100644
index 00000000..47b279f9
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_istreamwrapper.h
@@ -0,0 +1,128 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_ISTREAMWRAPPER_H_
+#define RAPIDJSON_ISTREAMWRAPPER_H_
+
+#include "lottie_rapidjson_stream.h"
+#include <iosfwd>
+#include <ios>
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(padded)
+#elif defined(_MSC_VER)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(4351) // new behavior: elements of array 'array' will be default initialized
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+//! Wrapper of \c std::basic_istream into RapidJSON's Stream concept.
+/*!
+ The classes can be wrapped including but not limited to:
+
+ - \c std::istringstream
+ - \c std::stringstream
+ - \c std::wistringstream
+ - \c std::wstringstream
+ - \c std::ifstream
+ - \c std::fstream
+ - \c std::wifstream
+ - \c std::wfstream
+
+ \tparam StreamType Class derived from \c std::basic_istream.
+*/
+
+template <typename StreamType>
+class BasicIStreamWrapper {
+public:
+ typedef typename StreamType::char_type Ch;
+
+ //! Constructor.
+ /*!
+ \param stream stream opened for read.
+ */
+ BasicIStreamWrapper(StreamType &stream) : stream_(stream), buffer_(peekBuffer_), bufferSize_(4), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) {
+ Read();
+ }
+
+ //! Constructor.
+ /*!
+ \param stream stream opened for read.
+ \param buffer user-supplied buffer.
+ \param bufferSize size of buffer in bytes. Must >=4 bytes.
+ */
+ BasicIStreamWrapper(StreamType &stream, char* buffer, size_t bufferSize) : stream_(stream), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) {
+ RAPIDJSON_ASSERT(bufferSize >= 4);
+ Read();
+ }
+
+ Ch Peek() const { return *current_; }
+ Ch Take() { Ch c = *current_; Read(); return c; }
+ size_t Tell() const { return count_ + static_cast<size_t>(current_ - buffer_); }
+
+ // Not implemented
+ void Put(Ch) { RAPIDJSON_ASSERT(false); }
+ void Flush() { RAPIDJSON_ASSERT(false); }
+ Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
+ size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
+
+ // For encoding detection only.
+ const Ch* Peek4() const {
+ return (current_ + 4 - !eof_ <= bufferLast_) ? current_ : 0;
+ }
+
+private:
+ BasicIStreamWrapper();
+ BasicIStreamWrapper(const BasicIStreamWrapper&);
+ BasicIStreamWrapper& operator=(const BasicIStreamWrapper&);
+
+ void Read() {
+ if (current_ < bufferLast_)
+ ++current_;
+ else if (!eof_) {
+ count_ += readCount_;
+ readCount_ = bufferSize_;
+ bufferLast_ = buffer_ + readCount_ - 1;
+ current_ = buffer_;
+
+ if (!stream_.read(buffer_, static_cast<std::streamsize>(bufferSize_))) {
+ readCount_ = static_cast<size_t>(stream_.gcount());
+ *(bufferLast_ = buffer_ + readCount_) = '\0';
+ eof_ = true;
+ }
+ }
+ }
+
+ StreamType &stream_;
+ Ch peekBuffer_[4], *buffer_;
+ size_t bufferSize_;
+ Ch *bufferLast_;
+ Ch *current_;
+ size_t readCount_;
+ size_t count_; //!< Number of characters read
+ bool eof_;
+};
+
+typedef BasicIStreamWrapper<std::istream> IStreamWrapper;
+typedef BasicIStreamWrapper<std::wistream> WIStreamWrapper;
+
+#if defined(__clang__) || defined(_MSC_VER)
+RAPIDJSON_DIAG_POP
+#endif
+
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_ISTREAMWRAPPER_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_memorybuffer.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_memorybuffer.h
new file mode 100644
index 00000000..100a46fd
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_memorybuffer.h
@@ -0,0 +1,70 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_MEMORYBUFFER_H_
+#define RAPIDJSON_MEMORYBUFFER_H_
+
+#include "lottie_rapidjson_stream.h"
+#include "lottie_rapidjson_internal_stack.h"
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+//! Represents an in-memory output byte stream.
+/*!
+ This class is mainly for being wrapped by EncodedOutputStream or AutoUTFOutputStream.
+
+ It is similar to FileWriteBuffer but the destination is an in-memory buffer instead of a file.
+
+ Differences between MemoryBuffer and StringBuffer:
+ 1. StringBuffer has Encoding but MemoryBuffer is only a byte buffer.
+ 2. StringBuffer::GetString() returns a null-terminated string. MemoryBuffer::GetBuffer() returns a buffer without terminator.
+
+ \tparam Allocator type for allocating memory buffer.
+ \note implements Stream concept
+*/
+template <typename Allocator = CrtAllocator>
+struct GenericMemoryBuffer {
+ typedef char Ch; // byte
+
+ GenericMemoryBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {}
+
+ void Put(Ch c) { *stack_.template Push<Ch>() = c; }
+ void Flush() {}
+
+ void Clear() { stack_.Clear(); }
+ void ShrinkToFit() { stack_.ShrinkToFit(); }
+ Ch* Push(size_t count) { return stack_.template Push<Ch>(count); }
+ void Pop(size_t count) { stack_.template Pop<Ch>(count); }
+
+ const Ch* GetBuffer() const {
+ return stack_.template Bottom<Ch>();
+ }
+
+ size_t GetSize() const { return stack_.GetSize(); }
+
+ static const size_t kDefaultCapacity = 256;
+ mutable internal::Stack<Allocator> stack_;
+};
+
+typedef GenericMemoryBuffer<> MemoryBuffer;
+
+//! Implement specialized version of PutN() with memset() for better performance.
+template<>
+inline void PutN(MemoryBuffer& memoryBuffer, char c, size_t n) {
+ std::memset(memoryBuffer.stack_.Push<char>(n), c, n * sizeof(c));
+}
+
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_MEMORYBUFFER_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_memorystream.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_memorystream.h
new file mode 100644
index 00000000..933319e8
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_memorystream.h
@@ -0,0 +1,71 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_MEMORYSTREAM_H_
+#define RAPIDJSON_MEMORYSTREAM_H_
+
+#include "lottie_rapidjson_stream.h"
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(unreachable-code)
+RAPIDJSON_DIAG_OFF(missing-noreturn)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+//! Represents an in-memory input byte stream.
+/*!
+ This class is mainly for being wrapped by EncodedInputStream or AutoUTFInputStream.
+
+ It is similar to FileReadBuffer but the source is an in-memory buffer instead of a file.
+
+ Differences between MemoryStream and StringStream:
+ 1. StringStream has encoding but MemoryStream is a byte stream.
+ 2. MemoryStream needs size of the source buffer and the buffer don't need to be null terminated. StringStream assume null-terminated string as source.
+ 3. MemoryStream supports Peek4() for encoding detection. StringStream is specified with an encoding so it should not have Peek4().
+ \note implements Stream concept
+*/
+struct MemoryStream {
+ typedef char Ch; // byte
+
+ MemoryStream(const Ch *src, size_t size) : src_(src), begin_(src), end_(src + size), size_(size) {}
+
+ Ch Peek() const { return RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_; }
+ Ch Take() { return RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_++; }
+ size_t Tell() const { return static_cast<size_t>(src_ - begin_); }
+
+ Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
+ void Put(Ch) { RAPIDJSON_ASSERT(false); }
+ void Flush() { RAPIDJSON_ASSERT(false); }
+ size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
+
+ // For encoding detection only.
+ const Ch* Peek4() const {
+ return Tell() + 4 <= size_ ? src_ : 0;
+ }
+
+ const Ch* src_; //!< Current read position.
+ const Ch* begin_; //!< Original head of the string.
+ const Ch* end_; //!< End of stream.
+ size_t size_; //!< Size of the stream.
+};
+
+RAPIDJSON_NAMESPACE_END
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_MEMORYBUFFER_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_msinttypes_inttypes.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_msinttypes_inttypes.h
new file mode 100644
index 00000000..8db2e8e7
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_msinttypes_inttypes.h
@@ -0,0 +1,316 @@
+// ISO C9x compliant inttypes.h for Microsoft Visual Studio
+// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
+//
+// Copyright (c) 2006-2013 Alexander Chemeris
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the product nor the names of its contributors may
+// be used to endorse or promote products derived from this software
+// without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+// The above software in this distribution may have been modified by
+// THL A29 Limited ("Tencent Modifications").
+// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited.
+
+#ifndef _MSC_VER // [
+#error "Use this header only with Microsoft Visual C++ compilers!"
+#endif // _MSC_VER ]
+
+#ifndef _MSC_INTTYPES_H_ // [
+#define _MSC_INTTYPES_H_
+
+#if _MSC_VER > 1000
+#pragma once
+#endif
+
+#include "lottie_rapidjson_msinttypes_stdint.h"
+
+// miloyip: VC supports inttypes.h since VC2013
+#if _MSC_VER >= 1800
+#include <inttypes.h>
+#else
+
+// 7.8 Format conversion of integer types
+
+typedef struct {
+ intmax_t quot;
+ intmax_t rem;
+} imaxdiv_t;
+
+// 7.8.1 Macros for format specifiers
+
+#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198
+
+// The fprintf macros for signed integers are:
+#define PRId8 "d"
+#define PRIi8 "i"
+#define PRIdLEAST8 "d"
+#define PRIiLEAST8 "i"
+#define PRIdFAST8 "d"
+#define PRIiFAST8 "i"
+
+#define PRId16 "hd"
+#define PRIi16 "hi"
+#define PRIdLEAST16 "hd"
+#define PRIiLEAST16 "hi"
+#define PRIdFAST16 "hd"
+#define PRIiFAST16 "hi"
+
+#define PRId32 "I32d"
+#define PRIi32 "I32i"
+#define PRIdLEAST32 "I32d"
+#define PRIiLEAST32 "I32i"
+#define PRIdFAST32 "I32d"
+#define PRIiFAST32 "I32i"
+
+#define PRId64 "I64d"
+#define PRIi64 "I64i"
+#define PRIdLEAST64 "I64d"
+#define PRIiLEAST64 "I64i"
+#define PRIdFAST64 "I64d"
+#define PRIiFAST64 "I64i"
+
+#define PRIdMAX "I64d"
+#define PRIiMAX "I64i"
+
+#define PRIdPTR "Id"
+#define PRIiPTR "Ii"
+
+// The fprintf macros for unsigned integers are:
+#define PRIo8 "o"
+#define PRIu8 "u"
+#define PRIx8 "x"
+#define PRIX8 "X"
+#define PRIoLEAST8 "o"
+#define PRIuLEAST8 "u"
+#define PRIxLEAST8 "x"
+#define PRIXLEAST8 "X"
+#define PRIoFAST8 "o"
+#define PRIuFAST8 "u"
+#define PRIxFAST8 "x"
+#define PRIXFAST8 "X"
+
+#define PRIo16 "ho"
+#define PRIu16 "hu"
+#define PRIx16 "hx"
+#define PRIX16 "hX"
+#define PRIoLEAST16 "ho"
+#define PRIuLEAST16 "hu"
+#define PRIxLEAST16 "hx"
+#define PRIXLEAST16 "hX"
+#define PRIoFAST16 "ho"
+#define PRIuFAST16 "hu"
+#define PRIxFAST16 "hx"
+#define PRIXFAST16 "hX"
+
+#define PRIo32 "I32o"
+#define PRIu32 "I32u"
+#define PRIx32 "I32x"
+#define PRIX32 "I32X"
+#define PRIoLEAST32 "I32o"
+#define PRIuLEAST32 "I32u"
+#define PRIxLEAST32 "I32x"
+#define PRIXLEAST32 "I32X"
+#define PRIoFAST32 "I32o"
+#define PRIuFAST32 "I32u"
+#define PRIxFAST32 "I32x"
+#define PRIXFAST32 "I32X"
+
+#define PRIo64 "I64o"
+#define PRIu64 "I64u"
+#define PRIx64 "I64x"
+#define PRIX64 "I64X"
+#define PRIoLEAST64 "I64o"
+#define PRIuLEAST64 "I64u"
+#define PRIxLEAST64 "I64x"
+#define PRIXLEAST64 "I64X"
+#define PRIoFAST64 "I64o"
+#define PRIuFAST64 "I64u"
+#define PRIxFAST64 "I64x"
+#define PRIXFAST64 "I64X"
+
+#define PRIoMAX "I64o"
+#define PRIuMAX "I64u"
+#define PRIxMAX "I64x"
+#define PRIXMAX "I64X"
+
+#define PRIoPTR "Io"
+#define PRIuPTR "Iu"
+#define PRIxPTR "Ix"
+#define PRIXPTR "IX"
+
+// The fscanf macros for signed integers are:
+#define SCNd8 "d"
+#define SCNi8 "i"
+#define SCNdLEAST8 "d"
+#define SCNiLEAST8 "i"
+#define SCNdFAST8 "d"
+#define SCNiFAST8 "i"
+
+#define SCNd16 "hd"
+#define SCNi16 "hi"
+#define SCNdLEAST16 "hd"
+#define SCNiLEAST16 "hi"
+#define SCNdFAST16 "hd"
+#define SCNiFAST16 "hi"
+
+#define SCNd32 "ld"
+#define SCNi32 "li"
+#define SCNdLEAST32 "ld"
+#define SCNiLEAST32 "li"
+#define SCNdFAST32 "ld"
+#define SCNiFAST32 "li"
+
+#define SCNd64 "I64d"
+#define SCNi64 "I64i"
+#define SCNdLEAST64 "I64d"
+#define SCNiLEAST64 "I64i"
+#define SCNdFAST64 "I64d"
+#define SCNiFAST64 "I64i"
+
+#define SCNdMAX "I64d"
+#define SCNiMAX "I64i"
+
+#ifdef _WIN64 // [
+# define SCNdPTR "I64d"
+# define SCNiPTR "I64i"
+#else // _WIN64 ][
+# define SCNdPTR "ld"
+# define SCNiPTR "li"
+#endif // _WIN64 ]
+
+// The fscanf macros for unsigned integers are:
+#define SCNo8 "o"
+#define SCNu8 "u"
+#define SCNx8 "x"
+#define SCNX8 "X"
+#define SCNoLEAST8 "o"
+#define SCNuLEAST8 "u"
+#define SCNxLEAST8 "x"
+#define SCNXLEAST8 "X"
+#define SCNoFAST8 "o"
+#define SCNuFAST8 "u"
+#define SCNxFAST8 "x"
+#define SCNXFAST8 "X"
+
+#define SCNo16 "ho"
+#define SCNu16 "hu"
+#define SCNx16 "hx"
+#define SCNX16 "hX"
+#define SCNoLEAST16 "ho"
+#define SCNuLEAST16 "hu"
+#define SCNxLEAST16 "hx"
+#define SCNXLEAST16 "hX"
+#define SCNoFAST16 "ho"
+#define SCNuFAST16 "hu"
+#define SCNxFAST16 "hx"
+#define SCNXFAST16 "hX"
+
+#define SCNo32 "lo"
+#define SCNu32 "lu"
+#define SCNx32 "lx"
+#define SCNX32 "lX"
+#define SCNoLEAST32 "lo"
+#define SCNuLEAST32 "lu"
+#define SCNxLEAST32 "lx"
+#define SCNXLEAST32 "lX"
+#define SCNoFAST32 "lo"
+#define SCNuFAST32 "lu"
+#define SCNxFAST32 "lx"
+#define SCNXFAST32 "lX"
+
+#define SCNo64 "I64o"
+#define SCNu64 "I64u"
+#define SCNx64 "I64x"
+#define SCNX64 "I64X"
+#define SCNoLEAST64 "I64o"
+#define SCNuLEAST64 "I64u"
+#define SCNxLEAST64 "I64x"
+#define SCNXLEAST64 "I64X"
+#define SCNoFAST64 "I64o"
+#define SCNuFAST64 "I64u"
+#define SCNxFAST64 "I64x"
+#define SCNXFAST64 "I64X"
+
+#define SCNoMAX "I64o"
+#define SCNuMAX "I64u"
+#define SCNxMAX "I64x"
+#define SCNXMAX "I64X"
+
+#ifdef _WIN64 // [
+# define SCNoPTR "I64o"
+# define SCNuPTR "I64u"
+# define SCNxPTR "I64x"
+# define SCNXPTR "I64X"
+#else // _WIN64 ][
+# define SCNoPTR "lo"
+# define SCNuPTR "lu"
+# define SCNxPTR "lx"
+# define SCNXPTR "lX"
+#endif // _WIN64 ]
+
+#endif // __STDC_FORMAT_MACROS ]
+
+// 7.8.2 Functions for greatest-width integer types
+
+// 7.8.2.1 The imaxabs function
+#define imaxabs _abs64
+
+// 7.8.2.2 The imaxdiv function
+
+// This is modified version of div() function from Microsoft's div.c found
+// in %MSVC.NET%\crt\src\div.c
+#ifdef STATIC_IMAXDIV // [
+static
+#else // STATIC_IMAXDIV ][
+_inline
+#endif // STATIC_IMAXDIV ]
+imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)
+{
+ imaxdiv_t result;
+
+ result.quot = numer / denom;
+ result.rem = numer % denom;
+
+ if (numer < 0 && result.rem > 0) {
+ // did division wrong; must fix up
+ ++result.quot;
+ result.rem -= denom;
+ }
+
+ return result;
+}
+
+// 7.8.2.3 The strtoimax and strtoumax functions
+#define strtoimax _strtoi64
+#define strtoumax _strtoui64
+
+// 7.8.2.4 The wcstoimax and wcstoumax functions
+#define wcstoimax _wcstoi64
+#define wcstoumax _wcstoui64
+
+#endif // _MSC_VER >= 1800
+
+#endif // _MSC_INTTYPES_H_ ]
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_msinttypes_stdint.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_msinttypes_stdint.h
new file mode 100644
index 00000000..3d4477b9
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_msinttypes_stdint.h
@@ -0,0 +1,300 @@
+// ISO C9x compliant stdint.h for Microsoft Visual Studio
+// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
+//
+// Copyright (c) 2006-2013 Alexander Chemeris
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the product nor the names of its contributors may
+// be used to endorse or promote products derived from this software
+// without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+// The above software in this distribution may have been modified by
+// THL A29 Limited ("Tencent Modifications").
+// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited.
+
+#ifndef _MSC_VER // [
+#error "Use this header only with Microsoft Visual C++ compilers!"
+#endif // _MSC_VER ]
+
+#ifndef _MSC_STDINT_H_ // [
+#define _MSC_STDINT_H_
+
+#if _MSC_VER > 1000
+#pragma once
+#endif
+
+// miloyip: Originally Visual Studio 2010 uses its own stdint.h. However it generates warning with INT64_C(), so change to use this file for vs2010.
+#if _MSC_VER >= 1600 // [
+#include <stdint.h>
+
+#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
+
+#undef INT8_C
+#undef INT16_C
+#undef INT32_C
+#undef INT64_C
+#undef UINT8_C
+#undef UINT16_C
+#undef UINT32_C
+#undef UINT64_C
+
+// 7.18.4.1 Macros for minimum-width integer constants
+
+#define INT8_C(val) val##i8
+#define INT16_C(val) val##i16
+#define INT32_C(val) val##i32
+#define INT64_C(val) val##i64
+
+#define UINT8_C(val) val##ui8
+#define UINT16_C(val) val##ui16
+#define UINT32_C(val) val##ui32
+#define UINT64_C(val) val##ui64
+
+// 7.18.4.2 Macros for greatest-width integer constants
+// These #ifndef's are needed to prevent collisions with <boost/cstdint.hpp>.
+// Check out Issue 9 for the details.
+#ifndef INTMAX_C // [
+# define INTMAX_C INT64_C
+#endif // INTMAX_C ]
+#ifndef UINTMAX_C // [
+# define UINTMAX_C UINT64_C
+#endif // UINTMAX_C ]
+
+#endif // __STDC_CONSTANT_MACROS ]
+
+#else // ] _MSC_VER >= 1700 [
+
+#include <limits.h>
+
+// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
+// compiling for ARM we have to wrap <wchar.h> include with 'extern "C++" {}'
+// or compiler would give many errors like this:
+// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
+#if defined(__cplusplus) && !defined(_M_ARM)
+extern "C" {
+#endif
+# include <wchar.h>
+#if defined(__cplusplus) && !defined(_M_ARM)
+}
+#endif
+
+// Define _W64 macros to mark types changing their size, like intptr_t.
+#ifndef _W64
+# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
+# define _W64 __w64
+# else
+# define _W64
+# endif
+#endif
+
+
+// 7.18.1 Integer types
+
+// 7.18.1.1 Exact-width integer types
+
+// Visual Studio 6 and Embedded Visual C++ 4 doesn't
+// realize that, e.g. char has the same size as __int8
+// so we give up on __intX for them.
+#if (_MSC_VER < 1300)
+ typedef signed char int8_t;
+ typedef signed short int16_t;
+ typedef signed int int32_t;
+ typedef unsigned char uint8_t;
+ typedef unsigned short uint16_t;
+ typedef unsigned int uint32_t;
+#else
+ typedef signed __int8 int8_t;
+ typedef signed __int16 int16_t;
+ typedef signed __int32 int32_t;
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int16 uint16_t;
+ typedef unsigned __int32 uint32_t;
+#endif
+typedef signed __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+
+
+// 7.18.1.2 Minimum-width integer types
+typedef int8_t int_least8_t;
+typedef int16_t int_least16_t;
+typedef int32_t int_least32_t;
+typedef int64_t int_least64_t;
+typedef uint8_t uint_least8_t;
+typedef uint16_t uint_least16_t;
+typedef uint32_t uint_least32_t;
+typedef uint64_t uint_least64_t;
+
+// 7.18.1.3 Fastest minimum-width integer types
+typedef int8_t int_fast8_t;
+typedef int16_t int_fast16_t;
+typedef int32_t int_fast32_t;
+typedef int64_t int_fast64_t;
+typedef uint8_t uint_fast8_t;
+typedef uint16_t uint_fast16_t;
+typedef uint32_t uint_fast32_t;
+typedef uint64_t uint_fast64_t;
+
+// 7.18.1.4 Integer types capable of holding object pointers
+#ifdef _WIN64 // [
+ typedef signed __int64 intptr_t;
+ typedef unsigned __int64 uintptr_t;
+#else // _WIN64 ][
+ typedef _W64 signed int intptr_t;
+ typedef _W64 unsigned int uintptr_t;
+#endif // _WIN64 ]
+
+// 7.18.1.5 Greatest-width integer types
+typedef int64_t intmax_t;
+typedef uint64_t uintmax_t;
+
+
+// 7.18.2 Limits of specified-width integer types
+
+#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
+
+// 7.18.2.1 Limits of exact-width integer types
+#define INT8_MIN ((int8_t)_I8_MIN)
+#define INT8_MAX _I8_MAX
+#define INT16_MIN ((int16_t)_I16_MIN)
+#define INT16_MAX _I16_MAX
+#define INT32_MIN ((int32_t)_I32_MIN)
+#define INT32_MAX _I32_MAX
+#define INT64_MIN ((int64_t)_I64_MIN)
+#define INT64_MAX _I64_MAX
+#define UINT8_MAX _UI8_MAX
+#define UINT16_MAX _UI16_MAX
+#define UINT32_MAX _UI32_MAX
+#define UINT64_MAX _UI64_MAX
+
+// 7.18.2.2 Limits of minimum-width integer types
+#define INT_LEAST8_MIN INT8_MIN
+#define INT_LEAST8_MAX INT8_MAX
+#define INT_LEAST16_MIN INT16_MIN
+#define INT_LEAST16_MAX INT16_MAX
+#define INT_LEAST32_MIN INT32_MIN
+#define INT_LEAST32_MAX INT32_MAX
+#define INT_LEAST64_MIN INT64_MIN
+#define INT_LEAST64_MAX INT64_MAX
+#define UINT_LEAST8_MAX UINT8_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+
+// 7.18.2.3 Limits of fastest minimum-width integer types
+#define INT_FAST8_MIN INT8_MIN
+#define INT_FAST8_MAX INT8_MAX
+#define INT_FAST16_MIN INT16_MIN
+#define INT_FAST16_MAX INT16_MAX
+#define INT_FAST32_MIN INT32_MIN
+#define INT_FAST32_MAX INT32_MAX
+#define INT_FAST64_MIN INT64_MIN
+#define INT_FAST64_MAX INT64_MAX
+#define UINT_FAST8_MAX UINT8_MAX
+#define UINT_FAST16_MAX UINT16_MAX
+#define UINT_FAST32_MAX UINT32_MAX
+#define UINT_FAST64_MAX UINT64_MAX
+
+// 7.18.2.4 Limits of integer types capable of holding object pointers
+#ifdef _WIN64 // [
+# define INTPTR_MIN INT64_MIN
+# define INTPTR_MAX INT64_MAX
+# define UINTPTR_MAX UINT64_MAX
+#else // _WIN64 ][
+# define INTPTR_MIN INT32_MIN
+# define INTPTR_MAX INT32_MAX
+# define UINTPTR_MAX UINT32_MAX
+#endif // _WIN64 ]
+
+// 7.18.2.5 Limits of greatest-width integer types
+#define INTMAX_MIN INT64_MIN
+#define INTMAX_MAX INT64_MAX
+#define UINTMAX_MAX UINT64_MAX
+
+// 7.18.3 Limits of other integer types
+
+#ifdef _WIN64 // [
+# define PTRDIFF_MIN _I64_MIN
+# define PTRDIFF_MAX _I64_MAX
+#else // _WIN64 ][
+# define PTRDIFF_MIN _I32_MIN
+# define PTRDIFF_MAX _I32_MAX
+#endif // _WIN64 ]
+
+#define SIG_ATOMIC_MIN INT_MIN
+#define SIG_ATOMIC_MAX INT_MAX
+
+#ifndef SIZE_MAX // [
+# ifdef _WIN64 // [
+# define SIZE_MAX _UI64_MAX
+# else // _WIN64 ][
+# define SIZE_MAX _UI32_MAX
+# endif // _WIN64 ]
+#endif // SIZE_MAX ]
+
+// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
+#ifndef WCHAR_MIN // [
+# define WCHAR_MIN 0
+#endif // WCHAR_MIN ]
+#ifndef WCHAR_MAX // [
+# define WCHAR_MAX _UI16_MAX
+#endif // WCHAR_MAX ]
+
+#define WINT_MIN 0
+#define WINT_MAX _UI16_MAX
+
+#endif // __STDC_LIMIT_MACROS ]
+
+
+// 7.18.4 Limits of other integer types
+
+#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
+
+// 7.18.4.1 Macros for minimum-width integer constants
+
+#define INT8_C(val) val##i8
+#define INT16_C(val) val##i16
+#define INT32_C(val) val##i32
+#define INT64_C(val) val##i64
+
+#define UINT8_C(val) val##ui8
+#define UINT16_C(val) val##ui16
+#define UINT32_C(val) val##ui32
+#define UINT64_C(val) val##ui64
+
+// 7.18.4.2 Macros for greatest-width integer constants
+// These #ifndef's are needed to prevent collisions with <boost/cstdint.hpp>.
+// Check out Issue 9 for the details.
+#ifndef INTMAX_C // [
+# define INTMAX_C INT64_C
+#endif // INTMAX_C ]
+#ifndef UINTMAX_C // [
+# define UINTMAX_C UINT64_C
+#endif // UINTMAX_C ]
+
+#endif // __STDC_CONSTANT_MACROS ]
+
+#endif // _MSC_VER >= 1600 ]
+
+#endif // _MSC_STDINT_H_ ]
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_ostreamwrapper.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_ostreamwrapper.h
new file mode 100644
index 00000000..1a606077
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_ostreamwrapper.h
@@ -0,0 +1,81 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_OSTREAMWRAPPER_H_
+#define RAPIDJSON_OSTREAMWRAPPER_H_
+
+#include "lottie_rapidjson_stream.h"
+#include <iosfwd>
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(padded)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+//! Wrapper of \c std::basic_ostream into RapidJSON's Stream concept.
+/*!
+ The classes can be wrapped including but not limited to:
+
+ - \c std::ostringstream
+ - \c std::stringstream
+ - \c std::wpstringstream
+ - \c std::wstringstream
+ - \c std::ifstream
+ - \c std::fstream
+ - \c std::wofstream
+ - \c std::wfstream
+
+ \tparam StreamType Class derived from \c std::basic_ostream.
+*/
+
+template <typename StreamType>
+class BasicOStreamWrapper {
+public:
+ typedef typename StreamType::char_type Ch;
+ BasicOStreamWrapper(StreamType& stream) : stream_(stream) {}
+
+ void Put(Ch c) {
+ stream_.put(c);
+ }
+
+ void Flush() {
+ stream_.flush();
+ }
+
+ // Not implemented
+ char Peek() const { RAPIDJSON_ASSERT(false); return 0; }
+ char Take() { RAPIDJSON_ASSERT(false); return 0; }
+ size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
+ char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
+ size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; }
+
+private:
+ BasicOStreamWrapper(const BasicOStreamWrapper&);
+ BasicOStreamWrapper& operator=(const BasicOStreamWrapper&);
+
+ StreamType& stream_;
+};
+
+typedef BasicOStreamWrapper<std::ostream> OStreamWrapper;
+typedef BasicOStreamWrapper<std::wostream> WOStreamWrapper;
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+#endif
+
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_OSTREAMWRAPPER_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_pointer.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_pointer.h
new file mode 100644
index 00000000..82b25eb9
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_pointer.h
@@ -0,0 +1,1415 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_POINTER_H_
+#define RAPIDJSON_POINTER_H_
+
+#include "lottie_rapidjson_document.h"
+#include "lottie_rapidjson_internal_itoa.h"
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(switch-enum)
+#elif defined(_MSC_VER)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+static const SizeType kPointerInvalidIndex = ~SizeType(0); //!< Represents an invalid index in GenericPointer::Token
+
+//! Error code of parsing.
+/*! \ingroup RAPIDJSON_ERRORS
+ \see GenericPointer::GenericPointer, GenericPointer::GetParseErrorCode
+*/
+enum PointerParseErrorCode {
+ kPointerParseErrorNone = 0, //!< The parse is successful
+
+ kPointerParseErrorTokenMustBeginWithSolidus, //!< A token must begin with a '/'
+ kPointerParseErrorInvalidEscape, //!< Invalid escape
+ kPointerParseErrorInvalidPercentEncoding, //!< Invalid percent encoding in URI fragment
+ kPointerParseErrorCharacterMustPercentEncode //!< A character must percent encoded in URI fragment
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// GenericPointer
+
+//! Represents a JSON Pointer. Use Pointer for UTF8 encoding and default allocator.
+/*!
+ This class implements RFC 6901 "JavaScript Object Notation (JSON) Pointer"
+ (https://tools.ietf.org/html/rfc6901).
+
+ A JSON pointer is for identifying a specific value in a JSON document
+ (GenericDocument). It can simplify coding of DOM tree manipulation, because it
+ can access multiple-level depth of DOM tree with single API call.
+
+ After it parses a string representation (e.g. "/foo/0" or URI fragment
+ representation (e.g. "#/foo/0") into its internal representation (tokens),
+ it can be used to resolve a specific value in multiple documents, or sub-tree
+ of documents.
+
+ Contrary to GenericValue, Pointer can be copy constructed and copy assigned.
+ Apart from assignment, a Pointer cannot be modified after construction.
+
+ Although Pointer is very convenient, please aware that constructing Pointer
+ involves parsing and dynamic memory allocation. A special constructor with user-
+ supplied tokens eliminates these.
+
+ GenericPointer depends on GenericDocument and GenericValue.
+
+ \tparam ValueType The value type of the DOM tree. E.g. GenericValue<UTF8<> >
+ \tparam Allocator The allocator type for allocating memory for internal representation.
+
+ \note GenericPointer uses same encoding of ValueType.
+ However, Allocator of GenericPointer is independent of Allocator of Value.
+*/
+template <typename ValueType, typename Allocator = CrtAllocator>
+class GenericPointer {
+public:
+ typedef typename ValueType::EncodingType EncodingType; //!< Encoding type from Value
+ typedef typename ValueType::Ch Ch; //!< Character type from Value
+
+ //! A token is the basic units of internal representation.
+ /*!
+ A JSON pointer string representation "/foo/123" is parsed to two tokens:
+ "foo" and 123. 123 will be represented in both numeric form and string form.
+ They are resolved according to the actual value type (object or array).
+
+ For token that are not numbers, or the numeric value is out of bound
+ (greater than limits of SizeType), they are only treated as string form
+ (i.e. the token's index will be equal to kPointerInvalidIndex).
+
+ This struct is public so that user can create a Pointer without parsing and
+ allocation, using a special constructor.
+ */
+ struct Token {
+ const Ch* name; //!< Name of the token. It has null character at the end but it can contain null character.
+ SizeType length; //!< Length of the name.
+ SizeType index; //!< A valid array index, if it is not equal to kPointerInvalidIndex.
+ };
+
+ //!@name Constructors and destructor.
+ //@{
+
+ //! Default constructor.
+ GenericPointer(Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {}
+
+ //! Constructor that parses a string or URI fragment representation.
+ /*!
+ \param source A null-terminated, string or URI fragment representation of JSON pointer.
+ \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one.
+ */
+ explicit GenericPointer(const Ch* source, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {
+ Parse(source, internal::StrLen(source));
+ }
+
+#if RAPIDJSON_HAS_STDSTRING
+ //! Constructor that parses a string or URI fragment representation.
+ /*!
+ \param source A string or URI fragment representation of JSON pointer.
+ \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one.
+ \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING.
+ */
+ explicit GenericPointer(const std::basic_string<Ch>& source, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {
+ Parse(source.c_str(), source.size());
+ }
+#endif
+
+ //! Constructor that parses a string or URI fragment representation, with length of the source string.
+ /*!
+ \param source A string or URI fragment representation of JSON pointer.
+ \param length Length of source.
+ \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one.
+ \note Slightly faster than the overload without length.
+ */
+ GenericPointer(const Ch* source, size_t length, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {
+ Parse(source, length);
+ }
+
+ //! Constructor with user-supplied tokens.
+ /*!
+ This constructor let user supplies const array of tokens.
+ This prevents the parsing process and eliminates allocation.
+ This is preferred for memory constrained environments.
+
+ \param tokens An constant array of tokens representing the JSON pointer.
+ \param tokenCount Number of tokens.
+
+ \b Example
+ \code
+ #define NAME(s) { s, sizeof(s) / sizeof(s[0]) - 1, kPointerInvalidIndex }
+ #define INDEX(i) { #i, sizeof(#i) - 1, i }
+
+ static const Pointer::Token kTokens[] = { NAME("foo"), INDEX(123) };
+ static const Pointer p(kTokens, sizeof(kTokens) / sizeof(kTokens[0]));
+ // Equivalent to static const Pointer p("/foo/123");
+
+ #undef NAME
+ #undef INDEX
+ \endcode
+ */
+ GenericPointer(const Token* tokens, size_t tokenCount) : allocator_(), ownAllocator_(), nameBuffer_(), tokens_(const_cast<Token*>(tokens)), tokenCount_(tokenCount), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {}
+
+ //! Copy constructor.
+ GenericPointer(const GenericPointer& rhs) : allocator_(rhs.allocator_), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {
+ *this = rhs;
+ }
+
+ //! Copy constructor.
+ GenericPointer(const GenericPointer& rhs, Allocator* allocator) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {
+ *this = rhs;
+ }
+
+ //! Destructor.
+ ~GenericPointer() {
+ if (nameBuffer_) // If user-supplied tokens constructor is used, nameBuffer_ is nullptr and tokens_ are not deallocated.
+ Allocator::Free(tokens_);
+ RAPIDJSON_DELETE(ownAllocator_);
+ }
+
+ //! Assignment operator.
+ GenericPointer& operator=(const GenericPointer& rhs) {
+ if (this != &rhs) {
+ // Do not delete ownAllcator
+ if (nameBuffer_)
+ Allocator::Free(tokens_);
+
+ tokenCount_ = rhs.tokenCount_;
+ parseErrorOffset_ = rhs.parseErrorOffset_;
+ parseErrorCode_ = rhs.parseErrorCode_;
+
+ if (rhs.nameBuffer_)
+ CopyFromRaw(rhs); // Normally parsed tokens.
+ else {
+ tokens_ = rhs.tokens_; // User supplied const tokens.
+ nameBuffer_ = 0;
+ }
+ }
+ return *this;
+ }
+
+ //! Swap the content of this pointer with an other.
+ /*!
+ \param other The pointer to swap with.
+ \note Constant complexity.
+ */
+ GenericPointer& Swap(GenericPointer& other) RAPIDJSON_NOEXCEPT {
+ internal::Swap(allocator_, other.allocator_);
+ internal::Swap(ownAllocator_, other.ownAllocator_);
+ internal::Swap(nameBuffer_, other.nameBuffer_);
+ internal::Swap(tokens_, other.tokens_);
+ internal::Swap(tokenCount_, other.tokenCount_);
+ internal::Swap(parseErrorOffset_, other.parseErrorOffset_);
+ internal::Swap(parseErrorCode_, other.parseErrorCode_);
+ return *this;
+ }
+
+ //! free-standing swap function helper
+ /*!
+ Helper function to enable support for common swap implementation pattern based on \c std::swap:
+ \code
+ void swap(MyClass& a, MyClass& b) {
+ using std::swap;
+ swap(a.pointer, b.pointer);
+ // ...
+ }
+ \endcode
+ \see Swap()
+ */
+ friend inline void swap(GenericPointer& a, GenericPointer& b) RAPIDJSON_NOEXCEPT { a.Swap(b); }
+
+ //@}
+
+ //!@name Append token
+ //@{
+
+ //! Append a token and return a new Pointer
+ /*!
+ \param token Token to be appended.
+ \param allocator Allocator for the newly return Pointer.
+ \return A new Pointer with appended token.
+ */
+ GenericPointer Append(const Token& token, Allocator* allocator = 0) const {
+ GenericPointer r;
+ r.allocator_ = allocator;
+ Ch *p = r.CopyFromRaw(*this, 1, token.length + 1);
+ std::memcpy(p, token.name, (token.length + 1) * sizeof(Ch));
+ r.tokens_[tokenCount_].name = p;
+ r.tokens_[tokenCount_].length = token.length;
+ r.tokens_[tokenCount_].index = token.index;
+ return r;
+ }
+
+ //! Append a name token with length, and return a new Pointer
+ /*!
+ \param name Name to be appended.
+ \param length Length of name.
+ \param allocator Allocator for the newly return Pointer.
+ \return A new Pointer with appended token.
+ */
+ GenericPointer Append(const Ch* name, SizeType length, Allocator* allocator = 0) const {
+ Token token = { name, length, kPointerInvalidIndex };
+ return Append(token, allocator);
+ }
+
+ //! Append a name token without length, and return a new Pointer
+ /*!
+ \param name Name (const Ch*) to be appended.
+ \param allocator Allocator for the newly return Pointer.
+ \return A new Pointer with appended token.
+ */
+ template <typename T>
+ RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr<internal::IsSame<typename internal::RemoveConst<T>::Type, Ch> >), (GenericPointer))
+ Append(T* name, Allocator* allocator = 0) const {
+ return Append(name, internal::StrLen(name), allocator);
+ }
+
+#if RAPIDJSON_HAS_STDSTRING
+ //! Append a name token, and return a new Pointer
+ /*!
+ \param name Name to be appended.
+ \param allocator Allocator for the newly return Pointer.
+ \return A new Pointer with appended token.
+ */
+ GenericPointer Append(const std::basic_string<Ch>& name, Allocator* allocator = 0) const {
+ return Append(name.c_str(), static_cast<SizeType>(name.size()), allocator);
+ }
+#endif
+
+ //! Append a index token, and return a new Pointer
+ /*!
+ \param index Index to be appended.
+ \param allocator Allocator for the newly return Pointer.
+ \return A new Pointer with appended token.
+ */
+ GenericPointer Append(SizeType index, Allocator* allocator = 0) const {
+ char buffer[21];
+ char* end = sizeof(SizeType) == 4 ? internal::u32toa(index, buffer) : internal::u64toa(index, buffer);
+ SizeType length = static_cast<SizeType>(end - buffer);
+ buffer[length] = '\0';
+
+ if (sizeof(Ch) == 1) {
+ Token token = { reinterpret_cast<Ch*>(buffer), length, index };
+ return Append(token, allocator);
+ }
+ else {
+ Ch name[21];
+ for (size_t i = 0; i <= length; i++)
+ name[i] = static_cast<Ch>(buffer[i]);
+ Token token = { name, length, index };
+ return Append(token, allocator);
+ }
+ }
+
+ //! Append a token by value, and return a new Pointer
+ /*!
+ \param token token to be appended.
+ \param allocator Allocator for the newly return Pointer.
+ \return A new Pointer with appended token.
+ */
+ GenericPointer Append(const ValueType& token, Allocator* allocator = 0) const {
+ if (token.IsString())
+ return Append(token.GetString(), token.GetStringLength(), allocator);
+ else {
+ RAPIDJSON_ASSERT(token.IsUint64());
+ RAPIDJSON_ASSERT(token.GetUint64() <= SizeType(~0));
+ return Append(static_cast<SizeType>(token.GetUint64()), allocator);
+ }
+ }
+
+ //!@name Handling Parse Error
+ //@{
+
+ //! Check whether this is a valid pointer.
+ bool IsValid() const { return parseErrorCode_ == kPointerParseErrorNone; }
+
+ //! Get the parsing error offset in code unit.
+ size_t GetParseErrorOffset() const { return parseErrorOffset_; }
+
+ //! Get the parsing error code.
+ PointerParseErrorCode GetParseErrorCode() const { return parseErrorCode_; }
+
+ //@}
+
+ //! Get the allocator of this pointer.
+ Allocator& GetAllocator() { return *allocator_; }
+
+ //!@name Tokens
+ //@{
+
+ //! Get the token array (const version only).
+ const Token* GetTokens() const { return tokens_; }
+
+ //! Get the number of tokens.
+ size_t GetTokenCount() const { return tokenCount_; }
+
+ //@}
+
+ //!@name Equality/inequality operators
+ //@{
+
+ //! Equality operator.
+ /*!
+ \note When any pointers are invalid, always returns false.
+ */
+ bool operator==(const GenericPointer& rhs) const {
+ if (!IsValid() || !rhs.IsValid() || tokenCount_ != rhs.tokenCount_)
+ return false;
+
+ for (size_t i = 0; i < tokenCount_; i++) {
+ if (tokens_[i].index != rhs.tokens_[i].index ||
+ tokens_[i].length != rhs.tokens_[i].length ||
+ (tokens_[i].length != 0 && std::memcmp(tokens_[i].name, rhs.tokens_[i].name, sizeof(Ch)* tokens_[i].length) != 0))
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ //! Inequality operator.
+ /*!
+ \note When any pointers are invalid, always returns true.
+ */
+ bool operator!=(const GenericPointer& rhs) const { return !(*this == rhs); }
+
+ //! Less than operator.
+ /*!
+ \note Invalid pointers are always greater than valid ones.
+ */
+ bool operator<(const GenericPointer& rhs) const {
+ if (!IsValid())
+ return false;
+ if (!rhs.IsValid())
+ return true;
+
+ if (tokenCount_ != rhs.tokenCount_)
+ return tokenCount_ < rhs.tokenCount_;
+
+ for (size_t i = 0; i < tokenCount_; i++) {
+ if (tokens_[i].index != rhs.tokens_[i].index)
+ return tokens_[i].index < rhs.tokens_[i].index;
+
+ if (tokens_[i].length != rhs.tokens_[i].length)
+ return tokens_[i].length < rhs.tokens_[i].length;
+
+ if (int cmp = std::memcmp(tokens_[i].name, rhs.tokens_[i].name, sizeof(Ch) * tokens_[i].length))
+ return cmp < 0;
+ }
+
+ return false;
+ }
+
+ //@}
+
+ //!@name Stringify
+ //@{
+
+ //! Stringify the pointer into string representation.
+ /*!
+ \tparam OutputStream Type of output stream.
+ \param os The output stream.
+ */
+ template<typename OutputStream>
+ bool Stringify(OutputStream& os) const {
+ return Stringify<false, OutputStream>(os);
+ }
+
+ //! Stringify the pointer into URI fragment representation.
+ /*!
+ \tparam OutputStream Type of output stream.
+ \param os The output stream.
+ */
+ template<typename OutputStream>
+ bool StringifyUriFragment(OutputStream& os) const {
+ return Stringify<true, OutputStream>(os);
+ }
+
+ //@}
+
+ //!@name Create value
+ //@{
+
+ //! Create a value in a subtree.
+ /*!
+ If the value is not exist, it creates all parent values and a JSON Null value.
+ So it always succeed and return the newly created or existing value.
+
+ Remind that it may change types of parents according to tokens, so it
+ potentially removes previously stored values. For example, if a document
+ was an array, and "/foo" is used to create a value, then the document
+ will be changed to an object, and all existing array elements are lost.
+
+ \param root Root value of a DOM subtree to be resolved. It can be any value other than document root.
+ \param allocator Allocator for creating the values if the specified value or its parents are not exist.
+ \param alreadyExist If non-null, it stores whether the resolved value is already exist.
+ \return The resolved newly created (a JSON Null value), or already exists value.
+ */
+ ValueType& Create(ValueType& root, typename ValueType::AllocatorType& allocator, bool* alreadyExist = 0) const {
+ RAPIDJSON_ASSERT(IsValid());
+ ValueType* v = &root;
+ bool exist = true;
+ for (const Token *t = tokens_; t != tokens_ + tokenCount_; ++t) {
+ if (v->IsArray() && t->name[0] == '-' && t->length == 1) {
+ v->PushBack(ValueType().Move(), allocator);
+ v = &((*v)[v->Size() - 1]);
+ exist = false;
+ }
+ else {
+ if (t->index == kPointerInvalidIndex) { // must be object name
+ if (!v->IsObject())
+ v->SetObject(); // Change to Object
+ }
+ else { // object name or array index
+ if (!v->IsArray() && !v->IsObject())
+ v->SetArray(); // Change to Array
+ }
+
+ if (v->IsArray()) {
+ if (t->index >= v->Size()) {
+ v->Reserve(t->index + 1, allocator);
+ while (t->index >= v->Size())
+ v->PushBack(ValueType().Move(), allocator);
+ exist = false;
+ }
+ v = &((*v)[t->index]);
+ }
+ else {
+ typename ValueType::MemberIterator m = v->FindMember(GenericValue<EncodingType>(GenericStringRef<Ch>(t->name, t->length)));
+ if (m == v->MemberEnd()) {
+ v->AddMember(ValueType(t->name, t->length, allocator).Move(), ValueType().Move(), allocator);
+ m = v->MemberEnd();
+ v = &(--m)->value; // Assumes AddMember() appends at the end
+ exist = false;
+ }
+ else
+ v = &m->value;
+ }
+ }
+ }
+
+ if (alreadyExist)
+ *alreadyExist = exist;
+
+ return *v;
+ }
+
+ //! Creates a value in a document.
+ /*!
+ \param document A document to be resolved.
+ \param alreadyExist If non-null, it stores whether the resolved value is already exist.
+ \return The resolved newly created, or already exists value.
+ */
+ template <typename stackAllocator>
+ ValueType& Create(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, bool* alreadyExist = 0) const {
+ return Create(document, document.GetAllocator(), alreadyExist);
+ }
+
+ //@}
+
+ //!@name Query value
+ //@{
+
+ //! Query a value in a subtree.
+ /*!
+ \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root.
+ \param unresolvedTokenIndex If the pointer cannot resolve a token in the pointer, this parameter can obtain the index of unresolved token.
+ \return Pointer to the value if it can be resolved. Otherwise null.
+
+ \note
+ There are only 3 situations when a value cannot be resolved:
+ 1. A value in the path is not an array nor object.
+ 2. An object value does not contain the token.
+ 3. A token is out of range of an array value.
+
+ Use unresolvedTokenIndex to retrieve the token index.
+ */
+ ValueType* Get(ValueType& root, size_t* unresolvedTokenIndex = 0) const {
+ RAPIDJSON_ASSERT(IsValid());
+ ValueType* v = &root;
+ for (const Token *t = tokens_; t != tokens_ + tokenCount_; ++t) {
+ switch (v->GetType()) {
+ case kObjectType:
+ {
+ typename ValueType::MemberIterator m = v->FindMember(GenericValue<EncodingType>(GenericStringRef<Ch>(t->name, t->length)));
+ if (m == v->MemberEnd())
+ break;
+ v = &m->value;
+ }
+ continue;
+ case kArrayType:
+ if (t->index == kPointerInvalidIndex || t->index >= v->Size())
+ break;
+ v = &((*v)[t->index]);
+ continue;
+ default:
+ break;
+ }
+
+ // Error: unresolved token
+ if (unresolvedTokenIndex)
+ *unresolvedTokenIndex = static_cast<size_t>(t - tokens_);
+ return 0;
+ }
+ return v;
+ }
+
+ //! Query a const value in a const subtree.
+ /*!
+ \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root.
+ \return Pointer to the value if it can be resolved. Otherwise null.
+ */
+ const ValueType* Get(const ValueType& root, size_t* unresolvedTokenIndex = 0) const {
+ return Get(const_cast<ValueType&>(root), unresolvedTokenIndex);
+ }
+
+ //@}
+
+ //!@name Query a value with default
+ //@{
+
+ //! Query a value in a subtree with default value.
+ /*!
+ Similar to Get(), but if the specified value do not exists, it creates all parents and clone the default value.
+ So that this function always succeed.
+
+ \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root.
+ \param defaultValue Default value to be cloned if the value was not exists.
+ \param allocator Allocator for creating the values if the specified value or its parents are not exist.
+ \see Create()
+ */
+ ValueType& GetWithDefault(ValueType& root, const ValueType& defaultValue, typename ValueType::AllocatorType& allocator) const {
+ bool alreadyExist;
+ ValueType& v = Create(root, allocator, &alreadyExist);
+ return alreadyExist ? v : v.CopyFrom(defaultValue, allocator);
+ }
+
+ //! Query a value in a subtree with default null-terminated string.
+ ValueType& GetWithDefault(ValueType& root, const Ch* defaultValue, typename ValueType::AllocatorType& allocator) const {
+ bool alreadyExist;
+ ValueType& v = Create(root, allocator, &alreadyExist);
+ return alreadyExist ? v : v.SetString(defaultValue, allocator);
+ }
+
+#if RAPIDJSON_HAS_STDSTRING
+ //! Query a value in a subtree with default std::basic_string.
+ ValueType& GetWithDefault(ValueType& root, const std::basic_string<Ch>& defaultValue, typename ValueType::AllocatorType& allocator) const {
+ bool alreadyExist;
+ ValueType& v = Create(root, allocator, &alreadyExist);
+ return alreadyExist ? v : v.SetString(defaultValue, allocator);
+ }
+#endif
+
+ //! Query a value in a subtree with default primitive value.
+ /*!
+ \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool
+ */
+ template <typename T>
+ RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (ValueType&))
+ GetWithDefault(ValueType& root, T defaultValue, typename ValueType::AllocatorType& allocator) const {
+ return GetWithDefault(root, ValueType(defaultValue).Move(), allocator);
+ }
+
+ //! Query a value in a document with default value.
+ template <typename stackAllocator>
+ ValueType& GetWithDefault(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const ValueType& defaultValue) const {
+ return GetWithDefault(document, defaultValue, document.GetAllocator());
+ }
+
+ //! Query a value in a document with default null-terminated string.
+ template <typename stackAllocator>
+ ValueType& GetWithDefault(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const Ch* defaultValue) const {
+ return GetWithDefault(document, defaultValue, document.GetAllocator());
+ }
+
+#if RAPIDJSON_HAS_STDSTRING
+ //! Query a value in a document with default std::basic_string.
+ template <typename stackAllocator>
+ ValueType& GetWithDefault(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const std::basic_string<Ch>& defaultValue) const {
+ return GetWithDefault(document, defaultValue, document.GetAllocator());
+ }
+#endif
+
+ //! Query a value in a document with default primitive value.
+ /*!
+ \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool
+ */
+ template <typename T, typename stackAllocator>
+ RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (ValueType&))
+ GetWithDefault(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, T defaultValue) const {
+ return GetWithDefault(document, defaultValue, document.GetAllocator());
+ }
+
+ //@}
+
+ //!@name Set a value
+ //@{
+
+ //! Set a value in a subtree, with move semantics.
+ /*!
+ It creates all parents if they are not exist or types are different to the tokens.
+ So this function always succeeds but potentially remove existing values.
+
+ \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root.
+ \param value Value to be set.
+ \param allocator Allocator for creating the values if the specified value or its parents are not exist.
+ \see Create()
+ */
+ ValueType& Set(ValueType& root, ValueType& value, typename ValueType::AllocatorType& allocator) const {
+ return Create(root, allocator) = value;
+ }
+
+ //! Set a value in a subtree, with copy semantics.
+ ValueType& Set(ValueType& root, const ValueType& value, typename ValueType::AllocatorType& allocator) const {
+ return Create(root, allocator).CopyFrom(value, allocator);
+ }
+
+ //! Set a null-terminated string in a subtree.
+ ValueType& Set(ValueType& root, const Ch* value, typename ValueType::AllocatorType& allocator) const {
+ return Create(root, allocator) = ValueType(value, allocator).Move();
+ }
+
+#if RAPIDJSON_HAS_STDSTRING
+ //! Set a std::basic_string in a subtree.
+ ValueType& Set(ValueType& root, const std::basic_string<Ch>& value, typename ValueType::AllocatorType& allocator) const {
+ return Create(root, allocator) = ValueType(value, allocator).Move();
+ }
+#endif
+
+ //! Set a primitive value in a subtree.
+ /*!
+ \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool
+ */
+ template <typename T>
+ RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (ValueType&))
+ Set(ValueType& root, T value, typename ValueType::AllocatorType& allocator) const {
+ return Create(root, allocator) = ValueType(value).Move();
+ }
+
+ //! Set a value in a document, with move semantics.
+ template <typename stackAllocator>
+ ValueType& Set(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, ValueType& value) const {
+ return Create(document) = value;
+ }
+
+ //! Set a value in a document, with copy semantics.
+ template <typename stackAllocator>
+ ValueType& Set(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const ValueType& value) const {
+ return Create(document).CopyFrom(value, document.GetAllocator());
+ }
+
+ //! Set a null-terminated string in a document.
+ template <typename stackAllocator>
+ ValueType& Set(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const Ch* value) const {
+ return Create(document) = ValueType(value, document.GetAllocator()).Move();
+ }
+
+#if RAPIDJSON_HAS_STDSTRING
+ //! Sets a std::basic_string in a document.
+ template <typename stackAllocator>
+ ValueType& Set(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, const std::basic_string<Ch>& value) const {
+ return Create(document) = ValueType(value, document.GetAllocator()).Move();
+ }
+#endif
+
+ //! Set a primitive value in a document.
+ /*!
+ \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool
+ */
+ template <typename T, typename stackAllocator>
+ RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T>, internal::IsGenericValue<T> >), (ValueType&))
+ Set(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, T value) const {
+ return Create(document) = value;
+ }
+
+ //@}
+
+ //!@name Swap a value
+ //@{
+
+ //! Swap a value with a value in a subtree.
+ /*!
+ It creates all parents if they are not exist or types are different to the tokens.
+ So this function always succeeds but potentially remove existing values.
+
+ \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root.
+ \param value Value to be swapped.
+ \param allocator Allocator for creating the values if the specified value or its parents are not exist.
+ \see Create()
+ */
+ ValueType& Swap(ValueType& root, ValueType& value, typename ValueType::AllocatorType& allocator) const {
+ return Create(root, allocator).Swap(value);
+ }
+
+ //! Swap a value with a value in a document.
+ template <typename stackAllocator>
+ ValueType& Swap(GenericDocument<EncodingType, typename ValueType::AllocatorType, stackAllocator>& document, ValueType& value) const {
+ return Create(document).Swap(value);
+ }
+
+ //@}
+
+ //! Erase a value in a subtree.
+ /*!
+ \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root.
+ \return Whether the resolved value is found and erased.
+
+ \note Erasing with an empty pointer \c Pointer(""), i.e. the root, always fail and return false.
+ */
+ bool Erase(ValueType& root) const {
+ RAPIDJSON_ASSERT(IsValid());
+ if (tokenCount_ == 0) // Cannot erase the root
+ return false;
+
+ ValueType* v = &root;
+ const Token* last = tokens_ + (tokenCount_ - 1);
+ for (const Token *t = tokens_; t != last; ++t) {
+ switch (v->GetType()) {
+ case kObjectType:
+ {
+ typename ValueType::MemberIterator m = v->FindMember(GenericValue<EncodingType>(GenericStringRef<Ch>(t->name, t->length)));
+ if (m == v->MemberEnd())
+ return false;
+ v = &m->value;
+ }
+ break;
+ case kArrayType:
+ if (t->index == kPointerInvalidIndex || t->index >= v->Size())
+ return false;
+ v = &((*v)[t->index]);
+ break;
+ default:
+ return false;
+ }
+ }
+
+ switch (v->GetType()) {
+ case kObjectType:
+ return v->EraseMember(GenericStringRef<Ch>(last->name, last->length));
+ case kArrayType:
+ if (last->index == kPointerInvalidIndex || last->index >= v->Size())
+ return false;
+ v->Erase(v->Begin() + last->index);
+ return true;
+ default:
+ return false;
+ }
+ }
+
+private:
+ //! Clone the content from rhs to this.
+ /*!
+ \param rhs Source pointer.
+ \param extraToken Extra tokens to be allocated.
+ \param extraNameBufferSize Extra name buffer size (in number of Ch) to be allocated.
+ \return Start of non-occupied name buffer, for storing extra names.
+ */
+ Ch* CopyFromRaw(const GenericPointer& rhs, size_t extraToken = 0, size_t extraNameBufferSize = 0) {
+ if (!allocator_) // allocator is independently owned.
+ ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)();
+
+ size_t nameBufferSize = rhs.tokenCount_; // null terminators for tokens
+ for (Token *t = rhs.tokens_; t != rhs.tokens_ + rhs.tokenCount_; ++t)
+ nameBufferSize += t->length;
+
+ tokenCount_ = rhs.tokenCount_ + extraToken;
+ tokens_ = static_cast<Token *>(allocator_->Malloc(tokenCount_ * sizeof(Token) + (nameBufferSize + extraNameBufferSize) * sizeof(Ch)));
+ nameBuffer_ = reinterpret_cast<Ch *>(tokens_ + tokenCount_);
+ if (rhs.tokenCount_ > 0) {
+ std::memcpy(tokens_, rhs.tokens_, rhs.tokenCount_ * sizeof(Token));
+ }
+ if (nameBufferSize > 0) {
+ std::memcpy(nameBuffer_, rhs.nameBuffer_, nameBufferSize * sizeof(Ch));
+ }
+
+ // Adjust pointers to name buffer
+ std::ptrdiff_t diff = nameBuffer_ - rhs.nameBuffer_;
+ for (Token *t = tokens_; t != tokens_ + rhs.tokenCount_; ++t)
+ t->name += diff;
+
+ return nameBuffer_ + nameBufferSize;
+ }
+
+ //! Check whether a character should be percent-encoded.
+ /*!
+ According to RFC 3986 2.3 Unreserved Characters.
+ \param c The character (code unit) to be tested.
+ */
+ bool NeedPercentEncode(Ch c) const {
+ return !((c >= '0' && c <= '9') || (c >= 'A' && c <='Z') || (c >= 'a' && c <= 'z') || c == '-' || c == '.' || c == '_' || c =='~');
+ }
+
+ //! Parse a JSON String or its URI fragment representation into tokens.
+#ifndef __clang__ // -Wdocumentation
+ /*!
+ \param source Either a JSON Pointer string, or its URI fragment representation. Not need to be null terminated.
+ \param length Length of the source string.
+ \note Source cannot be JSON String Representation of JSON Pointer, e.g. In "/\u0000", \u0000 will not be unescaped.
+ */
+#endif
+ void Parse(const Ch* source, size_t length) {
+ RAPIDJSON_ASSERT(source != NULL);
+ RAPIDJSON_ASSERT(nameBuffer_ == 0);
+ RAPIDJSON_ASSERT(tokens_ == 0);
+
+ // Create own allocator if user did not supply.
+ if (!allocator_)
+ ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)();
+
+ // Count number of '/' as tokenCount
+ tokenCount_ = 0;
+ for (const Ch* s = source; s != source + length; s++)
+ if (*s == '/')
+ tokenCount_++;
+
+ Token* token = tokens_ = static_cast<Token *>(allocator_->Malloc(tokenCount_ * sizeof(Token) + length * sizeof(Ch)));
+ Ch* name = nameBuffer_ = reinterpret_cast<Ch *>(tokens_ + tokenCount_);
+ size_t i = 0;
+
+ // Detect if it is a URI fragment
+ bool uriFragment = false;
+ if (source[i] == '#') {
+ uriFragment = true;
+ i++;
+ }
+
+ if (i != length && source[i] != '/') {
+ parseErrorCode_ = kPointerParseErrorTokenMustBeginWithSolidus;
+ goto error;
+ }
+
+ while (i < length) {
+ RAPIDJSON_ASSERT(source[i] == '/');
+ i++; // consumes '/'
+
+ token->name = name;
+ bool isNumber = true;
+
+ while (i < length && source[i] != '/') {
+ Ch c = source[i];
+ if (uriFragment) {
+ // Decoding percent-encoding for URI fragment
+ if (c == '%') {
+ PercentDecodeStream is(&source[i], source + length);
+ GenericInsituStringStream<EncodingType> os(name);
+ Ch* begin = os.PutBegin();
+ if (!Transcoder<UTF8<>, EncodingType>().Validate(is, os) || !is.IsValid()) {
+ parseErrorCode_ = kPointerParseErrorInvalidPercentEncoding;
+ goto error;
+ }
+ size_t len = os.PutEnd(begin);
+ i += is.Tell() - 1;
+ if (len == 1)
+ c = *name;
+ else {
+ name += len;
+ isNumber = false;
+ i++;
+ continue;
+ }
+ }
+ else if (NeedPercentEncode(c)) {
+ parseErrorCode_ = kPointerParseErrorCharacterMustPercentEncode;
+ goto error;
+ }
+ }
+
+ i++;
+
+ // Escaping "~0" -> '~', "~1" -> '/'
+ if (c == '~') {
+ if (i < length) {
+ c = source[i];
+ if (c == '0') c = '~';
+ else if (c == '1') c = '/';
+ else {
+ parseErrorCode_ = kPointerParseErrorInvalidEscape;
+ goto error;
+ }
+ i++;
+ }
+ else {
+ parseErrorCode_ = kPointerParseErrorInvalidEscape;
+ goto error;
+ }
+ }
+
+ // First check for index: all of characters are digit
+ if (c < '0' || c > '9')
+ isNumber = false;
+
+ *name++ = c;
+ }
+ token->length = static_cast<SizeType>(name - token->name);
+ if (token->length == 0)
+ isNumber = false;
+ *name++ = '\0'; // Null terminator
+
+ // Second check for index: more than one digit cannot have leading zero
+ if (isNumber && token->length > 1 && token->name[0] == '0')
+ isNumber = false;
+
+ // String to SizeType conversion
+ SizeType n = 0;
+ if (isNumber) {
+ for (size_t j = 0; j < token->length; j++) {
+ SizeType m = n * 10 + static_cast<SizeType>(token->name[j] - '0');
+ if (m < n) { // overflow detection
+ isNumber = false;
+ break;
+ }
+ n = m;
+ }
+ }
+
+ token->index = isNumber ? n : kPointerInvalidIndex;
+ token++;
+ }
+
+ RAPIDJSON_ASSERT(name <= nameBuffer_ + length); // Should not overflow buffer
+ parseErrorCode_ = kPointerParseErrorNone;
+ return;
+
+ error:
+ Allocator::Free(tokens_);
+ nameBuffer_ = 0;
+ tokens_ = 0;
+ tokenCount_ = 0;
+ parseErrorOffset_ = i;
+ return;
+ }
+
+ //! Stringify to string or URI fragment representation.
+ /*!
+ \tparam uriFragment True for stringifying to URI fragment representation. False for string representation.
+ \tparam OutputStream type of output stream.
+ \param os The output stream.
+ */
+ template<bool uriFragment, typename OutputStream>
+ bool Stringify(OutputStream& os) const {
+ RAPIDJSON_ASSERT(IsValid());
+
+ if (uriFragment)
+ os.Put('#');
+
+ for (Token *t = tokens_; t != tokens_ + tokenCount_; ++t) {
+ os.Put('/');
+ for (size_t j = 0; j < t->length; j++) {
+ Ch c = t->name[j];
+ if (c == '~') {
+ os.Put('~');
+ os.Put('0');
+ }
+ else if (c == '/') {
+ os.Put('~');
+ os.Put('1');
+ }
+ else if (uriFragment && NeedPercentEncode(c)) {
+ // Transcode to UTF8 sequence
+ GenericStringStream<typename ValueType::EncodingType> source(&t->name[j]);
+ PercentEncodeStream<OutputStream> target(os);
+ if (!Transcoder<EncodingType, UTF8<> >().Validate(source, target))
+ return false;
+ j += source.Tell() - 1;
+ }
+ else
+ os.Put(c);
+ }
+ }
+ return true;
+ }
+
+ //! A helper stream for decoding a percent-encoded sequence into code unit.
+ /*!
+ This stream decodes %XY triplet into code unit (0-255).
+ If it encounters invalid characters, it sets output code unit as 0 and
+ mark invalid, and to be checked by IsValid().
+ */
+ class PercentDecodeStream {
+ public:
+ typedef typename ValueType::Ch Ch;
+
+ //! Constructor
+ /*!
+ \param source Start of the stream
+ \param end Past-the-end of the stream.
+ */
+ PercentDecodeStream(const Ch* source, const Ch* end) : src_(source), head_(source), end_(end), valid_(true) {}
+
+ Ch Take() {
+ if (*src_ != '%' || src_ + 3 > end_) { // %XY triplet
+ valid_ = false;
+ return 0;
+ }
+ src_++;
+ Ch c = 0;
+ for (int j = 0; j < 2; j++) {
+ c = static_cast<Ch>(c << 4);
+ Ch h = *src_;
+ if (h >= '0' && h <= '9') c = static_cast<Ch>(c + h - '0');
+ else if (h >= 'A' && h <= 'F') c = static_cast<Ch>(c + h - 'A' + 10);
+ else if (h >= 'a' && h <= 'f') c = static_cast<Ch>(c + h - 'a' + 10);
+ else {
+ valid_ = false;
+ return 0;
+ }
+ src_++;
+ }
+ return c;
+ }
+
+ size_t Tell() const { return static_cast<size_t>(src_ - head_); }
+ bool IsValid() const { return valid_; }
+
+ private:
+ const Ch* src_; //!< Current read position.
+ const Ch* head_; //!< Original head of the string.
+ const Ch* end_; //!< Past-the-end position.
+ bool valid_; //!< Whether the parsing is valid.
+ };
+
+ //! A helper stream to encode character (UTF-8 code unit) into percent-encoded sequence.
+ template <typename OutputStream>
+ class PercentEncodeStream {
+ public:
+ PercentEncodeStream(OutputStream& os) : os_(os) {}
+ void Put(char c) { // UTF-8 must be byte
+ unsigned char u = static_cast<unsigned char>(c);
+ static const char hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
+ os_.Put('%');
+ os_.Put(static_cast<typename OutputStream::Ch>(hexDigits[u >> 4]));
+ os_.Put(static_cast<typename OutputStream::Ch>(hexDigits[u & 15]));
+ }
+ private:
+ OutputStream& os_;
+ };
+
+ Allocator* allocator_; //!< The current allocator. It is either user-supplied or equal to ownAllocator_.
+ Allocator* ownAllocator_; //!< Allocator owned by this Pointer.
+ Ch* nameBuffer_; //!< A buffer containing all names in tokens.
+ Token* tokens_; //!< A list of tokens.
+ size_t tokenCount_; //!< Number of tokens in tokens_.
+ size_t parseErrorOffset_; //!< Offset in code unit when parsing fail.
+ PointerParseErrorCode parseErrorCode_; //!< Parsing error code.
+};
+
+//! GenericPointer for Value (UTF-8, default allocator).
+typedef GenericPointer<Value> Pointer;
+
+//!@name Helper functions for GenericPointer
+//@{
+
+//////////////////////////////////////////////////////////////////////////////
+
+template <typename T>
+typename T::ValueType& CreateValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, typename T::AllocatorType& a) {
+ return pointer.Create(root, a);
+}
+
+template <typename T, typename CharType, size_t N>
+typename T::ValueType& CreateValueByPointer(T& root, const CharType(&source)[N], typename T::AllocatorType& a) {
+ return GenericPointer<typename T::ValueType>(source, N - 1).Create(root, a);
+}
+
+// No allocator parameter
+
+template <typename DocumentType>
+typename DocumentType::ValueType& CreateValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer) {
+ return pointer.Create(document);
+}
+
+template <typename DocumentType, typename CharType, size_t N>
+typename DocumentType::ValueType& CreateValueByPointer(DocumentType& document, const CharType(&source)[N]) {
+ return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Create(document);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+template <typename T>
+typename T::ValueType* GetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, size_t* unresolvedTokenIndex = 0) {
+ return pointer.Get(root, unresolvedTokenIndex);
+}
+
+template <typename T>
+const typename T::ValueType* GetValueByPointer(const T& root, const GenericPointer<typename T::ValueType>& pointer, size_t* unresolvedTokenIndex = 0) {
+ return pointer.Get(root, unresolvedTokenIndex);
+}
+
+template <typename T, typename CharType, size_t N>
+typename T::ValueType* GetValueByPointer(T& root, const CharType (&source)[N], size_t* unresolvedTokenIndex = 0) {
+ return GenericPointer<typename T::ValueType>(source, N - 1).Get(root, unresolvedTokenIndex);
+}
+
+template <typename T, typename CharType, size_t N>
+const typename T::ValueType* GetValueByPointer(const T& root, const CharType(&source)[N], size_t* unresolvedTokenIndex = 0) {
+ return GenericPointer<typename T::ValueType>(source, N - 1).Get(root, unresolvedTokenIndex);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+template <typename T>
+typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer<typename T::ValueType>& pointer, const typename T::ValueType& defaultValue, typename T::AllocatorType& a) {
+ return pointer.GetWithDefault(root, defaultValue, a);
+}
+
+template <typename T>
+typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer<typename T::ValueType>& pointer, const typename T::Ch* defaultValue, typename T::AllocatorType& a) {
+ return pointer.GetWithDefault(root, defaultValue, a);
+}
+
+#if RAPIDJSON_HAS_STDSTRING
+template <typename T>
+typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer<typename T::ValueType>& pointer, const std::basic_string<typename T::Ch>& defaultValue, typename T::AllocatorType& a) {
+ return pointer.GetWithDefault(root, defaultValue, a);
+}
+#endif
+
+template <typename T, typename T2>
+RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename T::ValueType&))
+GetValueByPointerWithDefault(T& root, const GenericPointer<typename T::ValueType>& pointer, T2 defaultValue, typename T::AllocatorType& a) {
+ return pointer.GetWithDefault(root, defaultValue, a);
+}
+
+template <typename T, typename CharType, size_t N>
+typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const typename T::ValueType& defaultValue, typename T::AllocatorType& a) {
+ return GenericPointer<typename T::ValueType>(source, N - 1).GetWithDefault(root, defaultValue, a);
+}
+
+template <typename T, typename CharType, size_t N>
+typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const typename T::Ch* defaultValue, typename T::AllocatorType& a) {
+ return GenericPointer<typename T::ValueType>(source, N - 1).GetWithDefault(root, defaultValue, a);
+}
+
+#if RAPIDJSON_HAS_STDSTRING
+template <typename T, typename CharType, size_t N>
+typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const std::basic_string<typename T::Ch>& defaultValue, typename T::AllocatorType& a) {
+ return GenericPointer<typename T::ValueType>(source, N - 1).GetWithDefault(root, defaultValue, a);
+}
+#endif
+
+template <typename T, typename CharType, size_t N, typename T2>
+RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename T::ValueType&))
+GetValueByPointerWithDefault(T& root, const CharType(&source)[N], T2 defaultValue, typename T::AllocatorType& a) {
+ return GenericPointer<typename T::ValueType>(source, N - 1).GetWithDefault(root, defaultValue, a);
+}
+
+// No allocator parameter
+
+template <typename DocumentType>
+typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const typename DocumentType::ValueType& defaultValue) {
+ return pointer.GetWithDefault(document, defaultValue);
+}
+
+template <typename DocumentType>
+typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const typename DocumentType::Ch* defaultValue) {
+ return pointer.GetWithDefault(document, defaultValue);
+}
+
+#if RAPIDJSON_HAS_STDSTRING
+template <typename DocumentType>
+typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const std::basic_string<typename DocumentType::Ch>& defaultValue) {
+ return pointer.GetWithDefault(document, defaultValue);
+}
+#endif
+
+template <typename DocumentType, typename T2>
+RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename DocumentType::ValueType&))
+GetValueByPointerWithDefault(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, T2 defaultValue) {
+ return pointer.GetWithDefault(document, defaultValue);
+}
+
+template <typename DocumentType, typename CharType, size_t N>
+typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const typename DocumentType::ValueType& defaultValue) {
+ return GenericPointer<typename DocumentType::ValueType>(source, N - 1).GetWithDefault(document, defaultValue);
+}
+
+template <typename DocumentType, typename CharType, size_t N>
+typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const typename DocumentType::Ch* defaultValue) {
+ return GenericPointer<typename DocumentType::ValueType>(source, N - 1).GetWithDefault(document, defaultValue);
+}
+
+#if RAPIDJSON_HAS_STDSTRING
+template <typename DocumentType, typename CharType, size_t N>
+typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const std::basic_string<typename DocumentType::Ch>& defaultValue) {
+ return GenericPointer<typename DocumentType::ValueType>(source, N - 1).GetWithDefault(document, defaultValue);
+}
+#endif
+
+template <typename DocumentType, typename CharType, size_t N, typename T2>
+RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename DocumentType::ValueType&))
+GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], T2 defaultValue) {
+ return GenericPointer<typename DocumentType::ValueType>(source, N - 1).GetWithDefault(document, defaultValue);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+template <typename T>
+typename T::ValueType& SetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, typename T::ValueType& value, typename T::AllocatorType& a) {
+ return pointer.Set(root, value, a);
+}
+
+template <typename T>
+typename T::ValueType& SetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, const typename T::ValueType& value, typename T::AllocatorType& a) {
+ return pointer.Set(root, value, a);
+}
+
+template <typename T>
+typename T::ValueType& SetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, const typename T::Ch* value, typename T::AllocatorType& a) {
+ return pointer.Set(root, value, a);
+}
+
+#if RAPIDJSON_HAS_STDSTRING
+template <typename T>
+typename T::ValueType& SetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, const std::basic_string<typename T::Ch>& value, typename T::AllocatorType& a) {
+ return pointer.Set(root, value, a);
+}
+#endif
+
+template <typename T, typename T2>
+RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename T::ValueType&))
+SetValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, T2 value, typename T::AllocatorType& a) {
+ return pointer.Set(root, value, a);
+}
+
+template <typename T, typename CharType, size_t N>
+typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], typename T::ValueType& value, typename T::AllocatorType& a) {
+ return GenericPointer<typename T::ValueType>(source, N - 1).Set(root, value, a);
+}
+
+template <typename T, typename CharType, size_t N>
+typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const typename T::ValueType& value, typename T::AllocatorType& a) {
+ return GenericPointer<typename T::ValueType>(source, N - 1).Set(root, value, a);
+}
+
+template <typename T, typename CharType, size_t N>
+typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const typename T::Ch* value, typename T::AllocatorType& a) {
+ return GenericPointer<typename T::ValueType>(source, N - 1).Set(root, value, a);
+}
+
+#if RAPIDJSON_HAS_STDSTRING
+template <typename T, typename CharType, size_t N>
+typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const std::basic_string<typename T::Ch>& value, typename T::AllocatorType& a) {
+ return GenericPointer<typename T::ValueType>(source, N - 1).Set(root, value, a);
+}
+#endif
+
+template <typename T, typename CharType, size_t N, typename T2>
+RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename T::ValueType&))
+SetValueByPointer(T& root, const CharType(&source)[N], T2 value, typename T::AllocatorType& a) {
+ return GenericPointer<typename T::ValueType>(source, N - 1).Set(root, value, a);
+}
+
+// No allocator parameter
+
+template <typename DocumentType>
+typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, typename DocumentType::ValueType& value) {
+ return pointer.Set(document, value);
+}
+
+template <typename DocumentType>
+typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const typename DocumentType::ValueType& value) {
+ return pointer.Set(document, value);
+}
+
+template <typename DocumentType>
+typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const typename DocumentType::Ch* value) {
+ return pointer.Set(document, value);
+}
+
+#if RAPIDJSON_HAS_STDSTRING
+template <typename DocumentType>
+typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, const std::basic_string<typename DocumentType::Ch>& value) {
+ return pointer.Set(document, value);
+}
+#endif
+
+template <typename DocumentType, typename T2>
+RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename DocumentType::ValueType&))
+SetValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, T2 value) {
+ return pointer.Set(document, value);
+}
+
+template <typename DocumentType, typename CharType, size_t N>
+typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], typename DocumentType::ValueType& value) {
+ return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Set(document, value);
+}
+
+template <typename DocumentType, typename CharType, size_t N>
+typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const typename DocumentType::ValueType& value) {
+ return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Set(document, value);
+}
+
+template <typename DocumentType, typename CharType, size_t N>
+typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const typename DocumentType::Ch* value) {
+ return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Set(document, value);
+}
+
+#if RAPIDJSON_HAS_STDSTRING
+template <typename DocumentType, typename CharType, size_t N>
+typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const std::basic_string<typename DocumentType::Ch>& value) {
+ return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Set(document, value);
+}
+#endif
+
+template <typename DocumentType, typename CharType, size_t N, typename T2>
+RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr<internal::IsPointer<T2>, internal::IsGenericValue<T2> >), (typename DocumentType::ValueType&))
+SetValueByPointer(DocumentType& document, const CharType(&source)[N], T2 value) {
+ return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Set(document, value);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+template <typename T>
+typename T::ValueType& SwapValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer, typename T::ValueType& value, typename T::AllocatorType& a) {
+ return pointer.Swap(root, value, a);
+}
+
+template <typename T, typename CharType, size_t N>
+typename T::ValueType& SwapValueByPointer(T& root, const CharType(&source)[N], typename T::ValueType& value, typename T::AllocatorType& a) {
+ return GenericPointer<typename T::ValueType>(source, N - 1).Swap(root, value, a);
+}
+
+template <typename DocumentType>
+typename DocumentType::ValueType& SwapValueByPointer(DocumentType& document, const GenericPointer<typename DocumentType::ValueType>& pointer, typename DocumentType::ValueType& value) {
+ return pointer.Swap(document, value);
+}
+
+template <typename DocumentType, typename CharType, size_t N>
+typename DocumentType::ValueType& SwapValueByPointer(DocumentType& document, const CharType(&source)[N], typename DocumentType::ValueType& value) {
+ return GenericPointer<typename DocumentType::ValueType>(source, N - 1).Swap(document, value);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+template <typename T>
+bool EraseValueByPointer(T& root, const GenericPointer<typename T::ValueType>& pointer) {
+ return pointer.Erase(root);
+}
+
+template <typename T, typename CharType, size_t N>
+bool EraseValueByPointer(T& root, const CharType(&source)[N]) {
+ return GenericPointer<typename T::ValueType>(source, N - 1).Erase(root);
+}
+
+//@}
+
+RAPIDJSON_NAMESPACE_END
+
+#if defined(__clang__) || defined(_MSC_VER)
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_POINTER_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_prettywriter.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_prettywriter.h
new file mode 100644
index 00000000..d353faa7
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_prettywriter.h
@@ -0,0 +1,277 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_PRETTYWRITER_H_
+#define RAPIDJSON_PRETTYWRITER_H_
+
+#include "lottie_rapidjson_writer.h"
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+
+#if defined(__clang__)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(c++98-compat)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+//! Combination of PrettyWriter format flags.
+/*! \see PrettyWriter::SetFormatOptions
+ */
+enum PrettyFormatOptions {
+ kFormatDefault = 0, //!< Default pretty formatting.
+ kFormatSingleLineArray = 1 //!< Format arrays on a single line.
+};
+
+//! Writer with indentation and spacing.
+/*!
+ \tparam OutputStream Type of output os.
+ \tparam SourceEncoding Encoding of source string.
+ \tparam TargetEncoding Encoding of output stream.
+ \tparam StackAllocator Type of allocator for allocating memory of stack.
+*/
+template<typename OutputStream, typename SourceEncoding = UTF8<>, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator, unsigned writeFlags = kWriteDefaultFlags>
+class PrettyWriter : public Writer<OutputStream, SourceEncoding, TargetEncoding, StackAllocator, writeFlags> {
+public:
+ typedef Writer<OutputStream, SourceEncoding, TargetEncoding, StackAllocator, writeFlags> Base;
+ typedef typename Base::Ch Ch;
+
+ //! Constructor
+ /*! \param os Output stream.
+ \param allocator User supplied allocator. If it is null, it will create a private one.
+ \param levelDepth Initial capacity of stack.
+ */
+ explicit PrettyWriter(OutputStream& os, StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) :
+ Base(os, allocator, levelDepth), indentChar_(' '), indentCharCount_(4), formatOptions_(kFormatDefault) {}
+
+
+ explicit PrettyWriter(StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) :
+ Base(allocator, levelDepth), indentChar_(' '), indentCharCount_(4), formatOptions_(kFormatDefault) {}
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ PrettyWriter(PrettyWriter&& rhs) :
+ Base(std::forward<PrettyWriter>(rhs)), indentChar_(rhs.indentChar_), indentCharCount_(rhs.indentCharCount_), formatOptions_(rhs.formatOptions_) {}
+#endif
+
+ //! Set custom indentation.
+ /*! \param indentChar Character for indentation. Must be whitespace character (' ', '\\t', '\\n', '\\r').
+ \param indentCharCount Number of indent characters for each indentation level.
+ \note The default indentation is 4 spaces.
+ */
+ PrettyWriter& SetIndent(Ch indentChar, unsigned indentCharCount) {
+ RAPIDJSON_ASSERT(indentChar == ' ' || indentChar == '\t' || indentChar == '\n' || indentChar == '\r');
+ indentChar_ = indentChar;
+ indentCharCount_ = indentCharCount;
+ return *this;
+ }
+
+ //! Set pretty writer formatting options.
+ /*! \param options Formatting options.
+ */
+ PrettyWriter& SetFormatOptions(PrettyFormatOptions options) {
+ formatOptions_ = options;
+ return *this;
+ }
+
+ /*! @name Implementation of Handler
+ \see Handler
+ */
+ //@{
+
+ bool Null() { PrettyPrefix(kNullType); return Base::EndValue(Base::WriteNull()); }
+ bool Bool(bool b) { PrettyPrefix(b ? kTrueType : kFalseType); return Base::EndValue(Base::WriteBool(b)); }
+ bool Int(int i) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteInt(i)); }
+ bool Uint(unsigned u) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteUint(u)); }
+ bool Int64(int64_t i64) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteInt64(i64)); }
+ bool Uint64(uint64_t u64) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteUint64(u64)); }
+ bool Double(double d) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteDouble(d)); }
+
+ bool RawNumber(const Ch* str, SizeType length, bool copy = false) {
+ RAPIDJSON_ASSERT(str != 0);
+ (void)copy;
+ PrettyPrefix(kNumberType);
+ return Base::EndValue(Base::WriteString(str, length));
+ }
+
+ bool String(const Ch* str, SizeType length, bool copy = false) {
+ RAPIDJSON_ASSERT(str != 0);
+ (void)copy;
+ PrettyPrefix(kStringType);
+ return Base::EndValue(Base::WriteString(str, length));
+ }
+
+#if RAPIDJSON_HAS_STDSTRING
+ bool String(const std::basic_string<Ch>& str) {
+ return String(str.data(), SizeType(str.size()));
+ }
+#endif
+
+ bool StartObject() {
+ PrettyPrefix(kObjectType);
+ new (Base::level_stack_.template Push<typename Base::Level>()) typename Base::Level(false);
+ return Base::WriteStartObject();
+ }
+
+ bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); }
+
+#if RAPIDJSON_HAS_STDSTRING
+ bool Key(const std::basic_string<Ch>& str) {
+ return Key(str.data(), SizeType(str.size()));
+ }
+#endif
+
+ bool EndObject(SizeType memberCount = 0) {
+ (void)memberCount;
+ RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level)); // not inside an Object
+ RAPIDJSON_ASSERT(!Base::level_stack_.template Top<typename Base::Level>()->inArray); // currently inside an Array, not Object
+ RAPIDJSON_ASSERT(0 == Base::level_stack_.template Top<typename Base::Level>()->valueCount % 2); // Object has a Key without a Value
+
+ bool empty = Base::level_stack_.template Pop<typename Base::Level>(1)->valueCount == 0;
+
+ if (!empty) {
+ Base::os_->Put('\n');
+ WriteIndent();
+ }
+ bool ret = Base::EndValue(Base::WriteEndObject());
+ (void)ret;
+ RAPIDJSON_ASSERT(ret == true);
+ if (Base::level_stack_.Empty()) // end of json text
+ Base::Flush();
+ return true;
+ }
+
+ bool StartArray() {
+ PrettyPrefix(kArrayType);
+ new (Base::level_stack_.template Push<typename Base::Level>()) typename Base::Level(true);
+ return Base::WriteStartArray();
+ }
+
+ bool EndArray(SizeType memberCount = 0) {
+ (void)memberCount;
+ RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level));
+ RAPIDJSON_ASSERT(Base::level_stack_.template Top<typename Base::Level>()->inArray);
+ bool empty = Base::level_stack_.template Pop<typename Base::Level>(1)->valueCount == 0;
+
+ if (!empty && !(formatOptions_ & kFormatSingleLineArray)) {
+ Base::os_->Put('\n');
+ WriteIndent();
+ }
+ bool ret = Base::EndValue(Base::WriteEndArray());
+ (void)ret;
+ RAPIDJSON_ASSERT(ret == true);
+ if (Base::level_stack_.Empty()) // end of json text
+ Base::Flush();
+ return true;
+ }
+
+ //@}
+
+ /*! @name Convenience extensions */
+ //@{
+
+ //! Simpler but slower overload.
+ bool String(const Ch* str) { return String(str, internal::StrLen(str)); }
+ bool Key(const Ch* str) { return Key(str, internal::StrLen(str)); }
+
+ //@}
+
+ //! Write a raw JSON value.
+ /*!
+ For user to write a stringified JSON as a value.
+
+ \param json A well-formed JSON value. It should not contain null character within [0, length - 1] range.
+ \param length Length of the json.
+ \param type Type of the root of json.
+ \note When using PrettyWriter::RawValue(), the result json may not be indented correctly.
+ */
+ bool RawValue(const Ch* json, size_t length, Type type) {
+ RAPIDJSON_ASSERT(json != 0);
+ PrettyPrefix(type);
+ return Base::EndValue(Base::WriteRawValue(json, length));
+ }
+
+protected:
+ void PrettyPrefix(Type type) {
+ (void)type;
+ if (Base::level_stack_.GetSize() != 0) { // this value is not at root
+ typename Base::Level* level = Base::level_stack_.template Top<typename Base::Level>();
+
+ if (level->inArray) {
+ if (level->valueCount > 0) {
+ Base::os_->Put(','); // add comma if it is not the first element in array
+ if (formatOptions_ & kFormatSingleLineArray)
+ Base::os_->Put(' ');
+ }
+
+ if (!(formatOptions_ & kFormatSingleLineArray)) {
+ Base::os_->Put('\n');
+ WriteIndent();
+ }
+ }
+ else { // in object
+ if (level->valueCount > 0) {
+ if (level->valueCount % 2 == 0) {
+ Base::os_->Put(',');
+ Base::os_->Put('\n');
+ }
+ else {
+ Base::os_->Put(':');
+ Base::os_->Put(' ');
+ }
+ }
+ else
+ Base::os_->Put('\n');
+
+ if (level->valueCount % 2 == 0)
+ WriteIndent();
+ }
+ if (!level->inArray && level->valueCount % 2 == 0)
+ RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name
+ level->valueCount++;
+ }
+ else {
+ RAPIDJSON_ASSERT(!Base::hasRoot_); // Should only has one and only one root.
+ Base::hasRoot_ = true;
+ }
+ }
+
+ void WriteIndent() {
+ size_t count = (Base::level_stack_.GetSize() / sizeof(typename Base::Level)) * indentCharCount_;
+ PutN(*Base::os_, static_cast<typename OutputStream::Ch>(indentChar_), count);
+ }
+
+ Ch indentChar_;
+ unsigned indentCharCount_;
+ PrettyFormatOptions formatOptions_;
+
+private:
+ // Prohibit copy constructor & assignment operator.
+ PrettyWriter(const PrettyWriter&);
+ PrettyWriter& operator=(const PrettyWriter&);
+};
+
+RAPIDJSON_NAMESPACE_END
+
+#if defined(__clang__)
+RAPIDJSON_DIAG_POP
+#endif
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_RAPIDJSON_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_rapidjson.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_rapidjson.h
new file mode 100644
index 00000000..f1fd5ba2
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_rapidjson.h
@@ -0,0 +1,692 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_RAPIDJSON_H_
+#define RAPIDJSON_RAPIDJSON_H_
+
+/*!\file rapidjson.h
+ \brief common definitions and configuration
+
+ \see RAPIDJSON_CONFIG
+ */
+
+/*! \defgroup RAPIDJSON_CONFIG RapidJSON configuration
+ \brief Configuration macros for library features
+
+ Some RapidJSON features are configurable to adapt the library to a wide
+ variety of platforms, environments and usage scenarios. Most of the
+ features can be configured in terms of overridden or predefined
+ preprocessor macros at compile-time.
+
+ Some additional customization is available in the \ref RAPIDJSON_ERRORS APIs.
+
+ \note These macros should be given on the compiler command-line
+ (where applicable) to avoid inconsistent values when compiling
+ different translation units of a single application.
+ */
+
+#include <cstdlib> // malloc(), realloc(), free(), size_t
+#include <cstring> // memset(), memcpy(), memmove(), memcmp()
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_VERSION_STRING
+//
+// ALWAYS synchronize the following 3 macros with corresponding variables in /CMakeLists.txt.
+//
+
+//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
+// token stringification
+#define RAPIDJSON_STRINGIFY(x) RAPIDJSON_DO_STRINGIFY(x)
+#define RAPIDJSON_DO_STRINGIFY(x) #x
+
+// token concatenation
+#define RAPIDJSON_JOIN(X, Y) RAPIDJSON_DO_JOIN(X, Y)
+#define RAPIDJSON_DO_JOIN(X, Y) RAPIDJSON_DO_JOIN2(X, Y)
+#define RAPIDJSON_DO_JOIN2(X, Y) X##Y
+//!@endcond
+
+/*! \def RAPIDJSON_MAJOR_VERSION
+ \ingroup RAPIDJSON_CONFIG
+ \brief Major version of RapidJSON in integer.
+*/
+/*! \def RAPIDJSON_MINOR_VERSION
+ \ingroup RAPIDJSON_CONFIG
+ \brief Minor version of RapidJSON in integer.
+*/
+/*! \def RAPIDJSON_PATCH_VERSION
+ \ingroup RAPIDJSON_CONFIG
+ \brief Patch version of RapidJSON in integer.
+*/
+/*! \def RAPIDJSON_VERSION_STRING
+ \ingroup RAPIDJSON_CONFIG
+ \brief Version of RapidJSON in "<major>.<minor>.<patch>" string format.
+*/
+#define RAPIDJSON_MAJOR_VERSION 1
+#define RAPIDJSON_MINOR_VERSION 1
+#define RAPIDJSON_PATCH_VERSION 0
+#define RAPIDJSON_VERSION_STRING \
+ RAPIDJSON_STRINGIFY(RAPIDJSON_MAJOR_VERSION.RAPIDJSON_MINOR_VERSION.RAPIDJSON_PATCH_VERSION)
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_NAMESPACE_(BEGIN|END)
+/*! \def RAPIDJSON_NAMESPACE
+ \ingroup RAPIDJSON_CONFIG
+ \brief provide custom rapidjson namespace
+
+ In order to avoid symbol clashes and/or "One Definition Rule" errors
+ between multiple inclusions of (different versions of) RapidJSON in
+ a single binary, users can customize the name of the main RapidJSON
+ namespace.
+
+ In case of a single nesting level, defining \c RAPIDJSON_NAMESPACE
+ to a custom name (e.g. \c MyRapidJSON) is sufficient. If multiple
+ levels are needed, both \ref RAPIDJSON_NAMESPACE_BEGIN and \ref
+ RAPIDJSON_NAMESPACE_END need to be defined as well:
+
+ \code
+ // in some .cpp file
+ #define RAPIDJSON_NAMESPACE my::rapidjson
+ #define RAPIDJSON_NAMESPACE_BEGIN namespace my { namespace rapidjson {
+ #define RAPIDJSON_NAMESPACE_END } }
+ #include "rapidjson/..."
+ \endcode
+
+ \see rapidjson
+ */
+/*! \def RAPIDJSON_NAMESPACE_BEGIN
+ \ingroup RAPIDJSON_CONFIG
+ \brief provide custom rapidjson namespace (opening expression)
+ \see RAPIDJSON_NAMESPACE
+*/
+/*! \def RAPIDJSON_NAMESPACE_END
+ \ingroup RAPIDJSON_CONFIG
+ \brief provide custom rapidjson namespace (closing expression)
+ \see RAPIDJSON_NAMESPACE
+*/
+#ifndef RAPIDJSON_NAMESPACE
+#define RAPIDJSON_NAMESPACE rapidjson
+#endif
+#ifndef RAPIDJSON_NAMESPACE_BEGIN
+#define RAPIDJSON_NAMESPACE_BEGIN namespace RAPIDJSON_NAMESPACE {
+#endif
+#ifndef RAPIDJSON_NAMESPACE_END
+#define RAPIDJSON_NAMESPACE_END }
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_HAS_STDSTRING
+
+#ifndef RAPIDJSON_HAS_STDSTRING
+#ifdef RAPIDJSON_DOXYGEN_RUNNING
+#define RAPIDJSON_HAS_STDSTRING 1 // force generation of documentation
+#else
+#define RAPIDJSON_HAS_STDSTRING 0 // no std::string support by default
+#endif
+/*! \def RAPIDJSON_HAS_STDSTRING
+ \ingroup RAPIDJSON_CONFIG
+ \brief Enable RapidJSON support for \c std::string
+
+ By defining this preprocessor symbol to \c 1, several convenience functions for using
+ \ref rapidjson::GenericValue with \c std::string are enabled, especially
+ for construction and comparison.
+
+ \hideinitializer
+*/
+#endif // !defined(RAPIDJSON_HAS_STDSTRING)
+
+#if RAPIDJSON_HAS_STDSTRING
+#include <string>
+#endif // RAPIDJSON_HAS_STDSTRING
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_NO_INT64DEFINE
+
+/*! \def RAPIDJSON_NO_INT64DEFINE
+ \ingroup RAPIDJSON_CONFIG
+ \brief Use external 64-bit integer types.
+
+ RapidJSON requires the 64-bit integer types \c int64_t and \c uint64_t types
+ to be available at global scope.
+
+ If users have their own definition, define RAPIDJSON_NO_INT64DEFINE to
+ prevent RapidJSON from defining its own types.
+*/
+#ifndef RAPIDJSON_NO_INT64DEFINE
+//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
+#if defined(_MSC_VER) && (_MSC_VER < 1800) // Visual Studio 2013
+#include "lottie_rapidjson_msinttypes_stdint.h"
+#include "lottie_rapidjson_msinttypes_inttypes.h"
+#else
+// Other compilers should have this.
+#include <stdint.h>
+#include <inttypes.h>
+#endif
+//!@endcond
+#ifdef RAPIDJSON_DOXYGEN_RUNNING
+#define RAPIDJSON_NO_INT64DEFINE
+#endif
+#endif // RAPIDJSON_NO_INT64TYPEDEF
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_FORCEINLINE
+
+#ifndef RAPIDJSON_FORCEINLINE
+//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
+#if defined(_MSC_VER) && defined(NDEBUG)
+#define RAPIDJSON_FORCEINLINE __forceinline
+#elif defined(__GNUC__) && __GNUC__ >= 4 && defined(NDEBUG)
+#define RAPIDJSON_FORCEINLINE __attribute__((always_inline))
+#else
+#define RAPIDJSON_FORCEINLINE
+#endif
+//!@endcond
+#endif // RAPIDJSON_FORCEINLINE
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_ENDIAN
+#define RAPIDJSON_LITTLEENDIAN 0 //!< Little endian machine
+#define RAPIDJSON_BIGENDIAN 1 //!< Big endian machine
+
+//! Endianness of the machine.
+/*!
+ \def RAPIDJSON_ENDIAN
+ \ingroup RAPIDJSON_CONFIG
+
+ GCC 4.6 provided macro for detecting endianness of the target machine. But other
+ compilers may not have this. User can define RAPIDJSON_ENDIAN to either
+ \ref RAPIDJSON_LITTLEENDIAN or \ref RAPIDJSON_BIGENDIAN.
+
+ Default detection implemented with reference to
+ \li https://gcc.gnu.org/onlinedocs/gcc-4.6.0/cpp/Common-Predefined-Macros.html
+ \li http://www.boost.org/doc/libs/1_42_0/boost/detail/endian.hpp
+*/
+#ifndef RAPIDJSON_ENDIAN
+// Detect with GCC 4.6's macro
+# ifdef __BYTE_ORDER__
+# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
+# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
+# else
+# error Unknown machine endianness detected. User needs to define RAPIDJSON_ENDIAN.
+# endif // __BYTE_ORDER__
+// Detect with GLIBC's endian.h
+# elif defined(__GLIBC__)
+# include <endian.h>
+# if (__BYTE_ORDER == __LITTLE_ENDIAN)
+# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
+# elif (__BYTE_ORDER == __BIG_ENDIAN)
+# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
+# else
+# error Unknown machine endianness detected. User needs to define RAPIDJSON_ENDIAN.
+# endif // __GLIBC__
+// Detect with _LITTLE_ENDIAN and _BIG_ENDIAN macro
+# elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)
+# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
+# elif defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN)
+# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
+// Detect with architecture macros
+# elif defined(__sparc) || defined(__sparc__) || defined(_POWER) || defined(__powerpc__) || defined(__ppc__) || defined(__hpux) || defined(__hppa) || defined(_MIPSEB) || defined(_POWER) || defined(__s390__)
+# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
+# elif defined(__i386__) || defined(__alpha__) || defined(__ia64) || defined(__ia64__) || defined(_M_IX86) || defined(_M_IA64) || defined(_M_ALPHA) || defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || defined(__bfin__)
+# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
+# elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_ARM64))
+# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
+# elif defined(RAPIDJSON_DOXYGEN_RUNNING)
+# define RAPIDJSON_ENDIAN
+# else
+# error Unknown machine endianness detected. User needs to define RAPIDJSON_ENDIAN.
+# endif
+#endif // RAPIDJSON_ENDIAN
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_64BIT
+
+//! Whether using 64-bit architecture
+#ifndef RAPIDJSON_64BIT
+#if defined(__LP64__) || (defined(__x86_64__) && defined(__ILP32__)) || defined(_WIN64) || defined(__EMSCRIPTEN__)
+#define RAPIDJSON_64BIT 1
+#else
+#define RAPIDJSON_64BIT 0
+#endif
+#endif // RAPIDJSON_64BIT
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_ALIGN
+
+//! Data alignment of the machine.
+/*! \ingroup RAPIDJSON_CONFIG
+ \param x pointer to align
+
+ Some machines require strict data alignment. The default is 8 bytes.
+ User can customize by defining the RAPIDJSON_ALIGN function macro.
+*/
+#ifndef RAPIDJSON_ALIGN
+#define RAPIDJSON_ALIGN(x) (((x) + static_cast<size_t>(7u)) & ~static_cast<size_t>(7u))
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_UINT64_C2
+
+//! Construct a 64-bit literal by a pair of 32-bit integer.
+/*!
+ 64-bit literal with or without ULL suffix is prone to compiler warnings.
+ UINT64_C() is C macro which cause compilation problems.
+ Use this macro to define 64-bit constants by a pair of 32-bit integer.
+*/
+#ifndef RAPIDJSON_UINT64_C2
+#define RAPIDJSON_UINT64_C2(high32, low32) ((static_cast<uint64_t>(high32) << 32) | static_cast<uint64_t>(low32))
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_48BITPOINTER_OPTIMIZATION
+
+//! Use only lower 48-bit address for some pointers.
+/*!
+ \ingroup RAPIDJSON_CONFIG
+
+ This optimization uses the fact that current X86-64 architecture only implement lower 48-bit virtual address.
+ The higher 16-bit can be used for storing other data.
+ \c GenericValue uses this optimization to reduce its size form 24 bytes to 16 bytes in 64-bit architecture.
+*/
+#ifndef RAPIDJSON_48BITPOINTER_OPTIMIZATION
+#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64)
+#define RAPIDJSON_48BITPOINTER_OPTIMIZATION 1
+#else
+#define RAPIDJSON_48BITPOINTER_OPTIMIZATION 0
+#endif
+#endif // RAPIDJSON_48BITPOINTER_OPTIMIZATION
+
+#if RAPIDJSON_48BITPOINTER_OPTIMIZATION == 1
+#if RAPIDJSON_64BIT != 1
+#error RAPIDJSON_48BITPOINTER_OPTIMIZATION can only be set to 1 when RAPIDJSON_64BIT=1
+#endif
+#define RAPIDJSON_SETPOINTER(type, p, x) (p = reinterpret_cast<type *>((reinterpret_cast<uintptr_t>(p) & static_cast<uintptr_t>(RAPIDJSON_UINT64_C2(0xFFFF0000, 0x00000000))) | reinterpret_cast<uintptr_t>(reinterpret_cast<const void*>(x))))
+#define RAPIDJSON_GETPOINTER(type, p) (reinterpret_cast<type *>(reinterpret_cast<uintptr_t>(p) & static_cast<uintptr_t>(RAPIDJSON_UINT64_C2(0x0000FFFF, 0xFFFFFFFF))))
+#else
+#define RAPIDJSON_SETPOINTER(type, p, x) (p = (x))
+#define RAPIDJSON_GETPOINTER(type, p) (p)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_SSE2/RAPIDJSON_SSE42/RAPIDJSON_NEON/RAPIDJSON_SIMD
+
+/*! \def RAPIDJSON_SIMD
+ \ingroup RAPIDJSON_CONFIG
+ \brief Enable SSE2/SSE4.2/Neon optimization.
+
+ RapidJSON supports optimized implementations for some parsing operations
+ based on the SSE2, SSE4.2 or NEon SIMD extensions on modern Intel
+ or ARM compatible processors.
+
+ To enable these optimizations, three different symbols can be defined;
+ \code
+ // Enable SSE2 optimization.
+ #define RAPIDJSON_SSE2
+
+ // Enable SSE4.2 optimization.
+ #define RAPIDJSON_SSE42
+ \endcode
+
+ // Enable ARM Neon optimization.
+ #define RAPIDJSON_NEON
+ \endcode
+
+ \c RAPIDJSON_SSE42 takes precedence over SSE2, if both are defined.
+
+ If any of these symbols is defined, RapidJSON defines the macro
+ \c RAPIDJSON_SIMD to indicate the availability of the optimized code.
+*/
+#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) \
+ || defined(RAPIDJSON_NEON) || defined(RAPIDJSON_DOXYGEN_RUNNING)
+#define RAPIDJSON_SIMD
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_NO_SIZETYPEDEFINE
+
+#ifndef RAPIDJSON_NO_SIZETYPEDEFINE
+/*! \def RAPIDJSON_NO_SIZETYPEDEFINE
+ \ingroup RAPIDJSON_CONFIG
+ \brief User-provided \c SizeType definition.
+
+ In order to avoid using 32-bit size types for indexing strings and arrays,
+ define this preprocessor symbol and provide the type rapidjson::SizeType
+ before including RapidJSON:
+ \code
+ #define RAPIDJSON_NO_SIZETYPEDEFINE
+ namespace rapidjson { typedef ::std::size_t SizeType; }
+ #include "rapidjson/..."
+ \endcode
+
+ \see rapidjson::SizeType
+*/
+#ifdef RAPIDJSON_DOXYGEN_RUNNING
+#define RAPIDJSON_NO_SIZETYPEDEFINE
+#endif
+RAPIDJSON_NAMESPACE_BEGIN
+//! Size type (for string lengths, array sizes, etc.)
+/*! RapidJSON uses 32-bit array/string indices even on 64-bit platforms,
+ instead of using \c size_t. Users may override the SizeType by defining
+ \ref RAPIDJSON_NO_SIZETYPEDEFINE.
+*/
+typedef unsigned SizeType;
+RAPIDJSON_NAMESPACE_END
+#endif
+
+// always import std::size_t to rapidjson namespace
+RAPIDJSON_NAMESPACE_BEGIN
+using std::size_t;
+RAPIDJSON_NAMESPACE_END
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_ASSERT
+
+//! Assertion.
+/*! \ingroup RAPIDJSON_CONFIG
+ By default, rapidjson uses C \c assert() for internal assertions.
+ User can override it by defining RAPIDJSON_ASSERT(x) macro.
+
+ \note Parsing errors are handled and can be customized by the
+ \ref RAPIDJSON_ERRORS APIs.
+*/
+#ifndef RAPIDJSON_ASSERT
+#include <cassert>
+#define RAPIDJSON_ASSERT(x) assert(x)
+#endif // RAPIDJSON_ASSERT
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_STATIC_ASSERT
+
+// Prefer C++11 static_assert, if available
+#ifndef RAPIDJSON_STATIC_ASSERT
+#if __cplusplus >= 201103L || ( defined(_MSC_VER) && _MSC_VER >= 1800 )
+#define RAPIDJSON_STATIC_ASSERT(x) \
+ static_assert(x, RAPIDJSON_STRINGIFY(x))
+#endif // C++11
+#endif // RAPIDJSON_STATIC_ASSERT
+
+// Adopt C++03 implementation from boost
+#ifndef RAPIDJSON_STATIC_ASSERT
+#ifndef __clang__
+//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
+#endif
+RAPIDJSON_NAMESPACE_BEGIN
+template <bool x> struct STATIC_ASSERTION_FAILURE;
+template <> struct STATIC_ASSERTION_FAILURE<true> { enum { value = 1 }; };
+template <size_t x> struct StaticAssertTest {};
+RAPIDJSON_NAMESPACE_END
+
+#if defined(__GNUC__) || defined(__clang__)
+#define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused))
+#else
+#define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE
+#endif
+#ifndef __clang__
+//!@endcond
+#endif
+
+/*! \def RAPIDJSON_STATIC_ASSERT
+ \brief (Internal) macro to check for conditions at compile-time
+ \param x compile-time condition
+ \hideinitializer
+ */
+#define RAPIDJSON_STATIC_ASSERT(x) \
+ typedef ::RAPIDJSON_NAMESPACE::StaticAssertTest< \
+ sizeof(::RAPIDJSON_NAMESPACE::STATIC_ASSERTION_FAILURE<bool(x) >)> \
+ RAPIDJSON_JOIN(StaticAssertTypedef, __LINE__) RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE
+#endif // RAPIDJSON_STATIC_ASSERT
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_LIKELY, RAPIDJSON_UNLIKELY
+
+//! Compiler branching hint for expression with high probability to be true.
+/*!
+ \ingroup RAPIDJSON_CONFIG
+ \param x Boolean expression likely to be true.
+*/
+#ifndef RAPIDJSON_LIKELY
+#if defined(__GNUC__) || defined(__clang__)
+#define RAPIDJSON_LIKELY(x) __builtin_expect(!!(x), 1)
+#else
+#define RAPIDJSON_LIKELY(x) (x)
+#endif
+#endif
+
+//! Compiler branching hint for expression with low probability to be true.
+/*!
+ \ingroup RAPIDJSON_CONFIG
+ \param x Boolean expression unlikely to be true.
+*/
+#ifndef RAPIDJSON_UNLIKELY
+#if defined(__GNUC__) || defined(__clang__)
+#define RAPIDJSON_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define RAPIDJSON_UNLIKELY(x) (x)
+#endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// Helpers
+
+//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
+
+#define RAPIDJSON_MULTILINEMACRO_BEGIN do {
+#define RAPIDJSON_MULTILINEMACRO_END \
+} while((void)0, 0)
+
+// adopted from Boost
+#define RAPIDJSON_VERSION_CODE(x,y,z) \
+ (((x)*100000) + ((y)*100) + (z))
+
+#if defined(__has_builtin)
+#define RAPIDJSON_HAS_BUILTIN(x) __has_builtin(x)
+#else
+#define RAPIDJSON_HAS_BUILTIN(x) 0
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_DIAG_PUSH/POP, RAPIDJSON_DIAG_OFF
+
+#if defined(__GNUC__)
+#define RAPIDJSON_GNUC \
+ RAPIDJSON_VERSION_CODE(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__)
+#endif
+
+#if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,2,0))
+
+#define RAPIDJSON_PRAGMA(x) _Pragma(RAPIDJSON_STRINGIFY(x))
+#define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(GCC diagnostic x)
+#define RAPIDJSON_DIAG_OFF(x) \
+ RAPIDJSON_DIAG_PRAGMA(ignored RAPIDJSON_STRINGIFY(RAPIDJSON_JOIN(-W,x)))
+
+// push/pop support in Clang and GCC>=4.6
+#if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0))
+#define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push)
+#define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop)
+#else // GCC >= 4.2, < 4.6
+#define RAPIDJSON_DIAG_PUSH /* ignored */
+#define RAPIDJSON_DIAG_POP /* ignored */
+#endif
+
+#elif defined(_MSC_VER)
+
+// pragma (MSVC specific)
+#define RAPIDJSON_PRAGMA(x) __pragma(x)
+#define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(warning(x))
+
+#define RAPIDJSON_DIAG_OFF(x) RAPIDJSON_DIAG_PRAGMA(disable: x)
+#define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push)
+#define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop)
+
+#else
+
+#define RAPIDJSON_DIAG_OFF(x) /* ignored */
+#define RAPIDJSON_DIAG_PUSH /* ignored */
+#define RAPIDJSON_DIAG_POP /* ignored */
+
+#endif // RAPIDJSON_DIAG_*
+
+///////////////////////////////////////////////////////////////////////////////
+// C++11 features
+
+#ifndef RAPIDJSON_HAS_CXX11_RVALUE_REFS
+#if defined(__clang__)
+#if __has_feature(cxx_rvalue_references) && \
+ (defined(_MSC_VER) || defined(_LIBCPP_VERSION) || defined(__GLIBCXX__) && __GLIBCXX__ >= 20080306)
+#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1
+#else
+#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0
+#endif
+#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \
+ (defined(_MSC_VER) && _MSC_VER >= 1600) || \
+ (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 && defined(__GXX_EXPERIMENTAL_CXX0X__))
+
+#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1
+#else
+#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0
+#endif
+#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS
+
+#ifndef RAPIDJSON_HAS_CXX11_NOEXCEPT
+#if defined(__clang__)
+#define RAPIDJSON_HAS_CXX11_NOEXCEPT __has_feature(cxx_noexcept)
+#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \
+ (defined(_MSC_VER) && _MSC_VER >= 1900) || \
+ (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 && defined(__GXX_EXPERIMENTAL_CXX0X__))
+#define RAPIDJSON_HAS_CXX11_NOEXCEPT 1
+#else
+#define RAPIDJSON_HAS_CXX11_NOEXCEPT 0
+#endif
+#endif
+#if RAPIDJSON_HAS_CXX11_NOEXCEPT
+#define RAPIDJSON_NOEXCEPT noexcept
+#else
+#define RAPIDJSON_NOEXCEPT /* noexcept */
+#endif // RAPIDJSON_HAS_CXX11_NOEXCEPT
+
+// no automatic detection, yet
+#ifndef RAPIDJSON_HAS_CXX11_TYPETRAITS
+#if (defined(_MSC_VER) && _MSC_VER >= 1700)
+#define RAPIDJSON_HAS_CXX11_TYPETRAITS 1
+#else
+#define RAPIDJSON_HAS_CXX11_TYPETRAITS 0
+#endif
+#endif
+
+#ifndef RAPIDJSON_HAS_CXX11_RANGE_FOR
+#if defined(__clang__)
+#define RAPIDJSON_HAS_CXX11_RANGE_FOR __has_feature(cxx_range_for)
+#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \
+ (defined(_MSC_VER) && _MSC_VER >= 1700) || \
+ (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 && defined(__GXX_EXPERIMENTAL_CXX0X__))
+#define RAPIDJSON_HAS_CXX11_RANGE_FOR 1
+#else
+#define RAPIDJSON_HAS_CXX11_RANGE_FOR 0
+#endif
+#endif // RAPIDJSON_HAS_CXX11_RANGE_FOR
+
+///////////////////////////////////////////////////////////////////////////////
+// C++17 features
+
+#if defined(__has_cpp_attribute)
+# if __has_cpp_attribute(fallthrough)
+# define RAPIDJSON_DELIBERATE_FALLTHROUGH [[fallthrough]]
+# else
+# define RAPIDJSON_DELIBERATE_FALLTHROUGH
+# endif
+#else
+# define RAPIDJSON_DELIBERATE_FALLTHROUGH
+#endif
+
+//!@endcond
+
+//! Assertion (in non-throwing contexts).
+ /*! \ingroup RAPIDJSON_CONFIG
+ Some functions provide a \c noexcept guarantee, if the compiler supports it.
+ In these cases, the \ref RAPIDJSON_ASSERT macro cannot be overridden to
+ throw an exception. This macro adds a separate customization point for
+ such cases.
+
+ Defaults to C \c assert() (as \ref RAPIDJSON_ASSERT), if \c noexcept is
+ supported, and to \ref RAPIDJSON_ASSERT otherwise.
+ */
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_NOEXCEPT_ASSERT
+
+#ifndef RAPIDJSON_NOEXCEPT_ASSERT
+#ifdef RAPIDJSON_ASSERT_THROWS
+#if RAPIDJSON_HAS_CXX11_NOEXCEPT
+#define RAPIDJSON_NOEXCEPT_ASSERT(x)
+#else
+#include <cassert>
+#define RAPIDJSON_NOEXCEPT_ASSERT(x) assert(x)
+#endif // RAPIDJSON_HAS_CXX11_NOEXCEPT
+#else
+#define RAPIDJSON_NOEXCEPT_ASSERT(x) RAPIDJSON_ASSERT(x)
+#endif // RAPIDJSON_ASSERT_THROWS
+#endif // RAPIDJSON_NOEXCEPT_ASSERT
+
+///////////////////////////////////////////////////////////////////////////////
+// malloc/realloc/free
+
+#ifndef RAPIDJSON_MALLOC
+///! customization point for global \c malloc
+#define RAPIDJSON_MALLOC(size) std::malloc(size)
+#endif
+#ifndef RAPIDJSON_REALLOC
+///! customization point for global \c realloc
+#define RAPIDJSON_REALLOC(ptr, new_size) std::realloc(ptr, new_size)
+#endif
+#ifndef RAPIDJSON_FREE
+///! customization point for global \c free
+#define RAPIDJSON_FREE(ptr) std::free(ptr)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// new/delete
+
+#ifndef RAPIDJSON_NEW
+///! customization point for global \c new
+#define RAPIDJSON_NEW(TypeName) new TypeName
+#endif
+#ifndef RAPIDJSON_DELETE
+///! customization point for global \c delete
+#define RAPIDJSON_DELETE(x) delete x
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// Type
+
+/*! \namespace rapidjson
+ \brief main RapidJSON namespace
+ \see RAPIDJSON_NAMESPACE
+*/
+RAPIDJSON_NAMESPACE_BEGIN
+
+//! Type of JSON value
+enum Type {
+ kNullType = 0, //!< null
+ kFalseType = 1, //!< false
+ kTrueType = 2, //!< true
+ kObjectType = 3, //!< object
+ kArrayType = 4, //!< array
+ kStringType = 5, //!< string
+ kNumberType = 6 //!< number
+};
+
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_RAPIDJSON_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_reader.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_reader.h
new file mode 100644
index 00000000..caa783f9
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_reader.h
@@ -0,0 +1,2244 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_READER_H_
+#define RAPIDJSON_READER_H_
+
+/*! \file reader.h */
+
+#include "lottie_rapidjson_allocators.h"
+#include "lottie_rapidjson_stream.h"
+#include "lottie_rapidjson_encodedstream.h"
+#include "lottie_rapidjson_internal_clzll.h"
+#include "lottie_rapidjson_internal_meta.h"
+#include "lottie_rapidjson_internal_stack.h"
+#include "lottie_rapidjson_internal_strtod.h"
+#include <limits>
+
+#if defined(RAPIDJSON_SIMD) && defined(_MSC_VER)
+#include <intrin.h>
+#pragma intrinsic(_BitScanForward)
+#endif
+#ifdef RAPIDJSON_SSE42
+#include <nmmintrin.h>
+#elif defined(RAPIDJSON_SSE2)
+#include <emmintrin.h>
+#elif defined(RAPIDJSON_NEON)
+#include <arm_neon.h>
+#endif
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(old-style-cast)
+RAPIDJSON_DIAG_OFF(padded)
+RAPIDJSON_DIAG_OFF(switch-enum)
+#elif defined(_MSC_VER)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant
+RAPIDJSON_DIAG_OFF(4702) // unreachable code
+#endif
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+
+//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
+#define RAPIDJSON_NOTHING /* deliberately empty */
+#ifndef RAPIDJSON_PARSE_ERROR_EARLY_RETURN
+#define RAPIDJSON_PARSE_ERROR_EARLY_RETURN(value) \
+ RAPIDJSON_MULTILINEMACRO_BEGIN \
+ if (RAPIDJSON_UNLIKELY(HasParseError())) { return value; } \
+ RAPIDJSON_MULTILINEMACRO_END
+#endif
+#define RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID \
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN(RAPIDJSON_NOTHING)
+//!@endcond
+
+/*! \def RAPIDJSON_PARSE_ERROR_NORETURN
+ \ingroup RAPIDJSON_ERRORS
+ \brief Macro to indicate a parse error.
+ \param parseErrorCode \ref rapidjson::ParseErrorCode of the error
+ \param offset position of the error in JSON input (\c size_t)
+
+ This macros can be used as a customization point for the internal
+ error handling mechanism of RapidJSON.
+
+ A common usage model is to throw an exception instead of requiring the
+ caller to explicitly check the \ref rapidjson::GenericReader::Parse's
+ return value:
+
+ \code
+ #define RAPIDJSON_PARSE_ERROR_NORETURN(parseErrorCode,offset) \
+ throw ParseException(parseErrorCode, #parseErrorCode, offset)
+
+ #include <stdexcept> // std::runtime_error
+ #include "rapidjson/error/error.h" // rapidjson::ParseResult
+
+ struct ParseException : std::runtime_error, rapidjson::ParseResult {
+ ParseException(rapidjson::ParseErrorCode code, const char* msg, size_t offset)
+ : std::runtime_error(msg), ParseResult(code, offset) {}
+ };
+
+ #include "rapidjson/reader.h"
+ \endcode
+
+ \see RAPIDJSON_PARSE_ERROR, rapidjson::GenericReader::Parse
+ */
+#ifndef RAPIDJSON_PARSE_ERROR_NORETURN
+#define RAPIDJSON_PARSE_ERROR_NORETURN(parseErrorCode, offset) \
+ RAPIDJSON_MULTILINEMACRO_BEGIN \
+ RAPIDJSON_ASSERT(!HasParseError()); /* Error can only be assigned once */ \
+ SetParseError(parseErrorCode, offset); \
+ RAPIDJSON_MULTILINEMACRO_END
+#endif
+
+/*! \def RAPIDJSON_PARSE_ERROR
+ \ingroup RAPIDJSON_ERRORS
+ \brief (Internal) macro to indicate and handle a parse error.
+ \param parseErrorCode \ref rapidjson::ParseErrorCode of the error
+ \param offset position of the error in JSON input (\c size_t)
+
+ Invokes RAPIDJSON_PARSE_ERROR_NORETURN and stops the parsing.
+
+ \see RAPIDJSON_PARSE_ERROR_NORETURN
+ \hideinitializer
+ */
+#ifndef RAPIDJSON_PARSE_ERROR
+#define RAPIDJSON_PARSE_ERROR(parseErrorCode, offset) \
+ RAPIDJSON_MULTILINEMACRO_BEGIN \
+ RAPIDJSON_PARSE_ERROR_NORETURN(parseErrorCode, offset); \
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; \
+ RAPIDJSON_MULTILINEMACRO_END
+#endif
+
+#include "lottie_rapidjson_error_error.h"
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+///////////////////////////////////////////////////////////////////////////////
+// ParseFlag
+
+/*! \def RAPIDJSON_PARSE_DEFAULT_FLAGS
+ \ingroup RAPIDJSON_CONFIG
+ \brief User-defined kParseDefaultFlags definition.
+
+ User can define this as any \c ParseFlag combinations.
+*/
+#ifndef RAPIDJSON_PARSE_DEFAULT_FLAGS
+#define RAPIDJSON_PARSE_DEFAULT_FLAGS kParseNoFlags
+#endif
+
+//! Combination of parseFlags
+/*! \see Reader::Parse, Document::Parse, Document::ParseInsitu, Document::ParseStream
+ */
+enum ParseFlag {
+ kParseNoFlags = 0, //!< No flags are set.
+ kParseInsituFlag = 1, //!< In-situ(destructive) parsing.
+ kParseValidateEncodingFlag = 2, //!< Validate encoding of JSON strings.
+ kParseIterativeFlag = 4, //!< Iterative(constant complexity in terms of function call stack size) parsing.
+ kParseStopWhenDoneFlag = 8, //!< After parsing a complete JSON root from stream, stop further processing the rest of stream. When this flag is used, parser will not generate kParseErrorDocumentRootNotSingular error.
+ kParseFullPrecisionFlag = 16, //!< Parse number in full precision (but slower).
+ kParseCommentsFlag = 32, //!< Allow one-line (//) and multi-line (/**/) comments.
+ kParseNumbersAsStringsFlag = 64, //!< Parse all numbers (ints/doubles) as strings.
+ kParseTrailingCommasFlag = 128, //!< Allow trailing commas at the end of objects and arrays.
+ kParseNanAndInfFlag = 256, //!< Allow parsing NaN, Inf, Infinity, -Inf and -Infinity as doubles.
+ kParseEscapedApostropheFlag = 512, //!< Allow escaped apostrophe in strings.
+ kParseDefaultFlags = RAPIDJSON_PARSE_DEFAULT_FLAGS //!< Default parse flags. Can be customized by defining RAPIDJSON_PARSE_DEFAULT_FLAGS
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// Handler
+
+/*! \class rapidjson::Handler
+ \brief Concept for receiving events from GenericReader upon parsing.
+ The functions return true if no error occurs. If they return false,
+ the event publisher should terminate the process.
+\code
+concept Handler {
+ typename Ch;
+
+ bool Null();
+ bool Bool(bool b);
+ bool Int(int i);
+ bool Uint(unsigned i);
+ bool Int64(int64_t i);
+ bool Uint64(uint64_t i);
+ bool Double(double d);
+ /// enabled via kParseNumbersAsStringsFlag, string is not null-terminated (use length)
+ bool RawNumber(const Ch* str, SizeType length, bool copy);
+ bool String(const Ch* str, SizeType length, bool copy);
+ bool StartObject();
+ bool Key(const Ch* str, SizeType length, bool copy);
+ bool EndObject(SizeType memberCount);
+ bool StartArray();
+ bool EndArray(SizeType elementCount);
+};
+\endcode
+*/
+///////////////////////////////////////////////////////////////////////////////
+// BaseReaderHandler
+
+//! Default implementation of Handler.
+/*! This can be used as base class of any reader handler.
+ \note implements Handler concept
+*/
+template<typename Encoding = UTF8<>, typename Derived = void>
+struct BaseReaderHandler {
+ typedef typename Encoding::Ch Ch;
+
+ typedef typename internal::SelectIf<internal::IsSame<Derived, void>, BaseReaderHandler, Derived>::Type Override;
+
+ bool Default() { return true; }
+ bool Null() { return static_cast<Override&>(*this).Default(); }
+ bool Bool(bool) { return static_cast<Override&>(*this).Default(); }
+ bool Int(int) { return static_cast<Override&>(*this).Default(); }
+ bool Uint(unsigned) { return static_cast<Override&>(*this).Default(); }
+ bool Int64(int64_t) { return static_cast<Override&>(*this).Default(); }
+ bool Uint64(uint64_t) { return static_cast<Override&>(*this).Default(); }
+ bool Double(double) { return static_cast<Override&>(*this).Default(); }
+ /// enabled via kParseNumbersAsStringsFlag, string is not null-terminated (use length)
+ bool RawNumber(const Ch* str, SizeType len, bool copy) { return static_cast<Override&>(*this).String(str, len, copy); }
+ bool String(const Ch*, SizeType, bool) { return static_cast<Override&>(*this).Default(); }
+ bool StartObject() { return static_cast<Override&>(*this).Default(); }
+ bool Key(const Ch* str, SizeType len, bool copy) { return static_cast<Override&>(*this).String(str, len, copy); }
+ bool EndObject(SizeType) { return static_cast<Override&>(*this).Default(); }
+ bool StartArray() { return static_cast<Override&>(*this).Default(); }
+ bool EndArray(SizeType) { return static_cast<Override&>(*this).Default(); }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// StreamLocalCopy
+
+namespace internal {
+
+template<typename Stream, int = StreamTraits<Stream>::copyOptimization>
+class StreamLocalCopy;
+
+//! Do copy optimization.
+template<typename Stream>
+class StreamLocalCopy<Stream, 1> {
+public:
+ StreamLocalCopy(Stream& original) : s(original), original_(original) {}
+ ~StreamLocalCopy() { original_ = s; }
+
+ Stream s;
+
+private:
+ StreamLocalCopy& operator=(const StreamLocalCopy&) /* = delete */;
+
+ Stream& original_;
+};
+
+//! Keep reference.
+template<typename Stream>
+class StreamLocalCopy<Stream, 0> {
+public:
+ StreamLocalCopy(Stream& original) : s(original) {}
+
+ Stream& s;
+
+private:
+ StreamLocalCopy& operator=(const StreamLocalCopy&) /* = delete */;
+};
+
+} // namespace internal
+
+///////////////////////////////////////////////////////////////////////////////
+// SkipWhitespace
+
+//! Skip the JSON white spaces in a stream.
+/*! \param is A input stream for skipping white spaces.
+ \note This function has SSE2/SSE4.2 specialization.
+*/
+template<typename InputStream>
+void SkipWhitespace(InputStream& is) {
+ internal::StreamLocalCopy<InputStream> copy(is);
+ InputStream& s(copy.s);
+
+ typename InputStream::Ch c;
+ while ((c = s.Peek()) == ' ' || c == '\n' || c == '\r' || c == '\t')
+ s.Take();
+}
+
+inline const char* SkipWhitespace(const char* p, const char* end) {
+ while (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t'))
+ ++p;
+ return p;
+}
+
+#ifdef RAPIDJSON_SSE42
+//! Skip whitespace with SSE 4.2 pcmpistrm instruction, testing 16 8-byte characters at once.
+inline const char *SkipWhitespace_SIMD(const char* p) {
+ // Fast return for single non-whitespace
+ if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')
+ ++p;
+ else
+ return p;
+
+ // 16-byte align to the next boundary
+ const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15));
+ while (p != nextAligned)
+ if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')
+ ++p;
+ else
+ return p;
+
+ // The rest of string using SIMD
+ static const char whitespace[16] = " \n\r\t";
+ const __m128i w = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespace[0]));
+
+ for (;; p += 16) {
+ const __m128i s = _mm_load_si128(reinterpret_cast<const __m128i *>(p));
+ const int r = _mm_cmpistri(w, s, _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | _SIDD_NEGATIVE_POLARITY);
+ if (r != 16) // some of characters is non-whitespace
+ return p + r;
+ }
+}
+
+inline const char *SkipWhitespace_SIMD(const char* p, const char* end) {
+ // Fast return for single non-whitespace
+ if (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t'))
+ ++p;
+ else
+ return p;
+
+ // The middle of string using SIMD
+ static const char whitespace[16] = " \n\r\t";
+ const __m128i w = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespace[0]));
+
+ for (; p <= end - 16; p += 16) {
+ const __m128i s = _mm_loadu_si128(reinterpret_cast<const __m128i *>(p));
+ const int r = _mm_cmpistri(w, s, _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | _SIDD_NEGATIVE_POLARITY);
+ if (r != 16) // some of characters is non-whitespace
+ return p + r;
+ }
+
+ return SkipWhitespace(p, end);
+}
+
+#elif defined(RAPIDJSON_SSE2)
+
+//! Skip whitespace with SSE2 instructions, testing 16 8-byte characters at once.
+inline const char *SkipWhitespace_SIMD(const char* p) {
+ // Fast return for single non-whitespace
+ if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')
+ ++p;
+ else
+ return p;
+
+ // 16-byte align to the next boundary
+ const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15));
+ while (p != nextAligned)
+ if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')
+ ++p;
+ else
+ return p;
+
+ // The rest of string
+ #define C16(c) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c }
+ static const char whitespaces[4][16] = { C16(' '), C16('\n'), C16('\r'), C16('\t') };
+ #undef C16
+
+ const __m128i w0 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespaces[0][0]));
+ const __m128i w1 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespaces[1][0]));
+ const __m128i w2 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespaces[2][0]));
+ const __m128i w3 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespaces[3][0]));
+
+ for (;; p += 16) {
+ const __m128i s = _mm_load_si128(reinterpret_cast<const __m128i *>(p));
+ __m128i x = _mm_cmpeq_epi8(s, w0);
+ x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w1));
+ x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w2));
+ x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w3));
+ unsigned short r = static_cast<unsigned short>(~_mm_movemask_epi8(x));
+ if (r != 0) { // some of characters may be non-whitespace
+#ifdef _MSC_VER // Find the index of first non-whitespace
+ unsigned long offset;
+ _BitScanForward(&offset, r);
+ return p + offset;
+#else
+ return p + __builtin_ffs(r) - 1;
+#endif
+ }
+ }
+}
+
+inline const char *SkipWhitespace_SIMD(const char* p, const char* end) {
+ // Fast return for single non-whitespace
+ if (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t'))
+ ++p;
+ else
+ return p;
+
+ // The rest of string
+ #define C16(c) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c }
+ static const char whitespaces[4][16] = { C16(' '), C16('\n'), C16('\r'), C16('\t') };
+ #undef C16
+
+ const __m128i w0 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespaces[0][0]));
+ const __m128i w1 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespaces[1][0]));
+ const __m128i w2 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespaces[2][0]));
+ const __m128i w3 = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&whitespaces[3][0]));
+
+ for (; p <= end - 16; p += 16) {
+ const __m128i s = _mm_loadu_si128(reinterpret_cast<const __m128i *>(p));
+ __m128i x = _mm_cmpeq_epi8(s, w0);
+ x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w1));
+ x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w2));
+ x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w3));
+ unsigned short r = static_cast<unsigned short>(~_mm_movemask_epi8(x));
+ if (r != 0) { // some of characters may be non-whitespace
+#ifdef _MSC_VER // Find the index of first non-whitespace
+ unsigned long offset;
+ _BitScanForward(&offset, r);
+ return p + offset;
+#else
+ return p + __builtin_ffs(r) - 1;
+#endif
+ }
+ }
+
+ return SkipWhitespace(p, end);
+}
+
+#elif defined(RAPIDJSON_NEON)
+
+//! Skip whitespace with ARM Neon instructions, testing 16 8-byte characters at once.
+inline const char *SkipWhitespace_SIMD(const char* p) {
+ // Fast return for single non-whitespace
+ if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')
+ ++p;
+ else
+ return p;
+
+ // 16-byte align to the next boundary
+ const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15));
+ while (p != nextAligned)
+ if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')
+ ++p;
+ else
+ return p;
+
+ const uint8x16_t w0 = vmovq_n_u8(' ');
+ const uint8x16_t w1 = vmovq_n_u8('\n');
+ const uint8x16_t w2 = vmovq_n_u8('\r');
+ const uint8x16_t w3 = vmovq_n_u8('\t');
+
+ for (;; p += 16) {
+ const uint8x16_t s = vld1q_u8(reinterpret_cast<const uint8_t *>(p));
+ uint8x16_t x = vceqq_u8(s, w0);
+ x = vorrq_u8(x, vceqq_u8(s, w1));
+ x = vorrq_u8(x, vceqq_u8(s, w2));
+ x = vorrq_u8(x, vceqq_u8(s, w3));
+
+ x = vmvnq_u8(x); // Negate
+ x = vrev64q_u8(x); // Rev in 64
+ uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract
+ uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract
+
+ if (low == 0) {
+ if (high != 0) {
+ uint32_t lz = internal::clzll(high);
+ return p + 8 + (lz >> 3);
+ }
+ } else {
+ uint32_t lz = internal::clzll(low);
+ return p + (lz >> 3);
+ }
+ }
+}
+
+inline const char *SkipWhitespace_SIMD(const char* p, const char* end) {
+ // Fast return for single non-whitespace
+ if (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t'))
+ ++p;
+ else
+ return p;
+
+ const uint8x16_t w0 = vmovq_n_u8(' ');
+ const uint8x16_t w1 = vmovq_n_u8('\n');
+ const uint8x16_t w2 = vmovq_n_u8('\r');
+ const uint8x16_t w3 = vmovq_n_u8('\t');
+
+ for (; p <= end - 16; p += 16) {
+ const uint8x16_t s = vld1q_u8(reinterpret_cast<const uint8_t *>(p));
+ uint8x16_t x = vceqq_u8(s, w0);
+ x = vorrq_u8(x, vceqq_u8(s, w1));
+ x = vorrq_u8(x, vceqq_u8(s, w2));
+ x = vorrq_u8(x, vceqq_u8(s, w3));
+
+ x = vmvnq_u8(x); // Negate
+ x = vrev64q_u8(x); // Rev in 64
+ uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract
+ uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract
+
+ if (low == 0) {
+ if (high != 0) {
+ uint32_t lz = internal::clzll(high);
+ return p + 8 + (lz >> 3);
+ }
+ } else {
+ uint32_t lz = internal::clzll(low);
+ return p + (lz >> 3);
+ }
+ }
+
+ return SkipWhitespace(p, end);
+}
+
+#endif // RAPIDJSON_NEON
+
+#ifdef RAPIDJSON_SIMD
+//! Template function specialization for InsituStringStream
+template<> inline void SkipWhitespace(InsituStringStream& is) {
+ is.src_ = const_cast<char*>(SkipWhitespace_SIMD(is.src_));
+}
+
+//! Template function specialization for StringStream
+template<> inline void SkipWhitespace(StringStream& is) {
+ is.src_ = SkipWhitespace_SIMD(is.src_);
+}
+
+template<> inline void SkipWhitespace(EncodedInputStream<UTF8<>, MemoryStream>& is) {
+ is.is_.src_ = SkipWhitespace_SIMD(is.is_.src_, is.is_.end_);
+}
+#endif // RAPIDJSON_SIMD
+
+///////////////////////////////////////////////////////////////////////////////
+// GenericReader
+
+//! SAX-style JSON parser. Use \ref Reader for UTF8 encoding and default allocator.
+/*! GenericReader parses JSON text from a stream, and send events synchronously to an
+ object implementing Handler concept.
+
+ It needs to allocate a stack for storing a single decoded string during
+ non-destructive parsing.
+
+ For in-situ parsing, the decoded string is directly written to the source
+ text string, no temporary buffer is required.
+
+ A GenericReader object can be reused for parsing multiple JSON text.
+
+ \tparam SourceEncoding Encoding of the input stream.
+ \tparam TargetEncoding Encoding of the parse output.
+ \tparam StackAllocator Allocator type for stack.
+*/
+template <typename SourceEncoding, typename TargetEncoding, typename StackAllocator = CrtAllocator>
+class GenericReader {
+public:
+ typedef typename SourceEncoding::Ch Ch; //!< SourceEncoding character type
+
+ //! Constructor.
+ /*! \param stackAllocator Optional allocator for allocating stack memory. (Only use for non-destructive parsing)
+ \param stackCapacity stack capacity in bytes for storing a single decoded string. (Only use for non-destructive parsing)
+ */
+ GenericReader(StackAllocator* stackAllocator = 0, size_t stackCapacity = kDefaultStackCapacity) :
+ stack_(stackAllocator, stackCapacity), parseResult_(), state_(IterativeParsingStartState) {}
+
+ //! Parse JSON text.
+ /*! \tparam parseFlags Combination of \ref ParseFlag.
+ \tparam InputStream Type of input stream, implementing Stream concept.
+ \tparam Handler Type of handler, implementing Handler concept.
+ \param is Input stream to be parsed.
+ \param handler The handler to receive events.
+ \return Whether the parsing is successful.
+ */
+ template <unsigned parseFlags, typename InputStream, typename Handler>
+ ParseResult Parse(InputStream& is, Handler& handler) {
+ if (parseFlags & kParseIterativeFlag)
+ return IterativeParse<parseFlags>(is, handler);
+
+ parseResult_.Clear();
+
+ ClearStackOnExit scope(*this);
+
+ SkipWhitespaceAndComments<parseFlags>(is);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_);
+
+ if (RAPIDJSON_UNLIKELY(is.Peek() == '\0')) {
+ RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorDocumentEmpty, is.Tell());
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_);
+ }
+ else {
+ ParseValue<parseFlags>(is, handler);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_);
+
+ if (!(parseFlags & kParseStopWhenDoneFlag)) {
+ SkipWhitespaceAndComments<parseFlags>(is);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_);
+
+ if (RAPIDJSON_UNLIKELY(is.Peek() != '\0')) {
+ RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorDocumentRootNotSingular, is.Tell());
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_);
+ }
+ }
+ }
+
+ return parseResult_;
+ }
+
+ //! Parse JSON text (with \ref kParseDefaultFlags)
+ /*! \tparam InputStream Type of input stream, implementing Stream concept
+ \tparam Handler Type of handler, implementing Handler concept.
+ \param is Input stream to be parsed.
+ \param handler The handler to receive events.
+ \return Whether the parsing is successful.
+ */
+ template <typename InputStream, typename Handler>
+ ParseResult Parse(InputStream& is, Handler& handler) {
+ return Parse<kParseDefaultFlags>(is, handler);
+ }
+
+ //! Initialize JSON text token-by-token parsing
+ /*!
+ */
+ void IterativeParseInit() {
+ parseResult_.Clear();
+ state_ = IterativeParsingStartState;
+ }
+
+ //! Parse one token from JSON text
+ /*! \tparam InputStream Type of input stream, implementing Stream concept
+ \tparam Handler Type of handler, implementing Handler concept.
+ \param is Input stream to be parsed.
+ \param handler The handler to receive events.
+ \return Whether the parsing is successful.
+ */
+ template <unsigned parseFlags, typename InputStream, typename Handler>
+ bool IterativeParseNext(InputStream& is, Handler& handler) {
+ while (RAPIDJSON_LIKELY(is.Peek() != '\0')) {
+ SkipWhitespaceAndComments<parseFlags>(is);
+
+ Token t = Tokenize(is.Peek());
+ IterativeParsingState n = Predict(state_, t);
+ IterativeParsingState d = Transit<parseFlags>(state_, t, n, is, handler);
+
+ // If we've finished or hit an error...
+ if (RAPIDJSON_UNLIKELY(IsIterativeParsingCompleteState(d))) {
+ // Report errors.
+ if (d == IterativeParsingErrorState) {
+ HandleError(state_, is);
+ return false;
+ }
+
+ // Transition to the finish state.
+ RAPIDJSON_ASSERT(d == IterativeParsingFinishState);
+ state_ = d;
+
+ // If StopWhenDone is not set...
+ if (!(parseFlags & kParseStopWhenDoneFlag)) {
+ // ... and extra non-whitespace data is found...
+ SkipWhitespaceAndComments<parseFlags>(is);
+ if (is.Peek() != '\0') {
+ // ... this is considered an error.
+ HandleError(state_, is);
+ return false;
+ }
+ }
+
+ // Success! We are done!
+ return true;
+ }
+
+ // Transition to the new state.
+ state_ = d;
+
+ // If we parsed anything other than a delimiter, we invoked the handler, so we can return true now.
+ if (!IsIterativeParsingDelimiterState(n))
+ return true;
+ }
+
+ // We reached the end of file.
+ stack_.Clear();
+
+ if (state_ != IterativeParsingFinishState) {
+ HandleError(state_, is);
+ return false;
+ }
+
+ return true;
+ }
+
+ //! Check if token-by-token parsing JSON text is complete
+ /*! \return Whether the JSON has been fully decoded.
+ */
+ RAPIDJSON_FORCEINLINE bool IterativeParseComplete() const {
+ return IsIterativeParsingCompleteState(state_);
+ }
+
+ //! Whether a parse error has occurred in the last parsing.
+ bool HasParseError() const { return parseResult_.IsError(); }
+
+ //! Get the \ref ParseErrorCode of last parsing.
+ ParseErrorCode GetParseErrorCode() const { return parseResult_.Code(); }
+
+ //! Get the position of last parsing error in input, 0 otherwise.
+ size_t GetErrorOffset() const { return parseResult_.Offset(); }
+
+protected:
+ void SetParseError(ParseErrorCode code, size_t offset) { parseResult_.Set(code, offset); }
+
+private:
+ // Prohibit copy constructor & assignment operator.
+ GenericReader(const GenericReader&);
+ GenericReader& operator=(const GenericReader&);
+
+ void ClearStack() { stack_.Clear(); }
+
+ // clear stack on any exit from ParseStream, e.g. due to exception
+ struct ClearStackOnExit {
+ explicit ClearStackOnExit(GenericReader& r) : r_(r) {}
+ ~ClearStackOnExit() { r_.ClearStack(); }
+ private:
+ GenericReader& r_;
+ ClearStackOnExit(const ClearStackOnExit&);
+ ClearStackOnExit& operator=(const ClearStackOnExit&);
+ };
+
+ template<unsigned parseFlags, typename InputStream>
+ void SkipWhitespaceAndComments(InputStream& is) {
+ SkipWhitespace(is);
+
+ if (parseFlags & kParseCommentsFlag) {
+ while (RAPIDJSON_UNLIKELY(Consume(is, '/'))) {
+ if (Consume(is, '*')) {
+ while (true) {
+ if (RAPIDJSON_UNLIKELY(is.Peek() == '\0'))
+ RAPIDJSON_PARSE_ERROR(kParseErrorUnspecificSyntaxError, is.Tell());
+ else if (Consume(is, '*')) {
+ if (Consume(is, '/'))
+ break;
+ }
+ else
+ is.Take();
+ }
+ }
+ else if (RAPIDJSON_LIKELY(Consume(is, '/')))
+ while (is.Peek() != '\0' && is.Take() != '\n') {}
+ else
+ RAPIDJSON_PARSE_ERROR(kParseErrorUnspecificSyntaxError, is.Tell());
+
+ SkipWhitespace(is);
+ }
+ }
+ }
+
+ // Parse object: { string : value, ... }
+ template<unsigned parseFlags, typename InputStream, typename Handler>
+ void ParseObject(InputStream& is, Handler& handler) {
+ RAPIDJSON_ASSERT(is.Peek() == '{');
+ is.Take(); // Skip '{'
+
+ if (RAPIDJSON_UNLIKELY(!handler.StartObject()))
+ RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell());
+
+ SkipWhitespaceAndComments<parseFlags>(is);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
+
+ if (Consume(is, '}')) {
+ if (RAPIDJSON_UNLIKELY(!handler.EndObject(0))) // empty object
+ RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell());
+ return;
+ }
+
+ for (SizeType memberCount = 0;;) {
+ if (RAPIDJSON_UNLIKELY(is.Peek() != '"'))
+ RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissName, is.Tell());
+
+ ParseString<parseFlags>(is, handler, true);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
+
+ SkipWhitespaceAndComments<parseFlags>(is);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
+
+ if (RAPIDJSON_UNLIKELY(!Consume(is, ':')))
+ RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissColon, is.Tell());
+
+ SkipWhitespaceAndComments<parseFlags>(is);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
+
+ ParseValue<parseFlags>(is, handler);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
+
+ SkipWhitespaceAndComments<parseFlags>(is);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
+
+ ++memberCount;
+
+ switch (is.Peek()) {
+ case ',':
+ is.Take();
+ SkipWhitespaceAndComments<parseFlags>(is);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
+ break;
+ case '}':
+ is.Take();
+ if (RAPIDJSON_UNLIKELY(!handler.EndObject(memberCount)))
+ RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell());
+ return;
+ default:
+ RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissCommaOrCurlyBracket, is.Tell()); break; // This useless break is only for making warning and coverage happy
+ }
+
+ if (parseFlags & kParseTrailingCommasFlag) {
+ if (is.Peek() == '}') {
+ if (RAPIDJSON_UNLIKELY(!handler.EndObject(memberCount)))
+ RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell());
+ is.Take();
+ return;
+ }
+ }
+ }
+ }
+
+ // Parse array: [ value, ... ]
+ template<unsigned parseFlags, typename InputStream, typename Handler>
+ void ParseArray(InputStream& is, Handler& handler) {
+ RAPIDJSON_ASSERT(is.Peek() == '[');
+ is.Take(); // Skip '['
+
+ if (RAPIDJSON_UNLIKELY(!handler.StartArray()))
+ RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell());
+
+ SkipWhitespaceAndComments<parseFlags>(is);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
+
+ if (Consume(is, ']')) {
+ if (RAPIDJSON_UNLIKELY(!handler.EndArray(0))) // empty array
+ RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell());
+ return;
+ }
+
+ for (SizeType elementCount = 0;;) {
+ ParseValue<parseFlags>(is, handler);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
+
+ ++elementCount;
+ SkipWhitespaceAndComments<parseFlags>(is);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
+
+ if (Consume(is, ',')) {
+ SkipWhitespaceAndComments<parseFlags>(is);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
+ }
+ else if (Consume(is, ']')) {
+ if (RAPIDJSON_UNLIKELY(!handler.EndArray(elementCount)))
+ RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell());
+ return;
+ }
+ else
+ RAPIDJSON_PARSE_ERROR(kParseErrorArrayMissCommaOrSquareBracket, is.Tell());
+
+ if (parseFlags & kParseTrailingCommasFlag) {
+ if (is.Peek() == ']') {
+ if (RAPIDJSON_UNLIKELY(!handler.EndArray(elementCount)))
+ RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell());
+ is.Take();
+ return;
+ }
+ }
+ }
+ }
+
+ template<unsigned parseFlags, typename InputStream, typename Handler>
+ void ParseNull(InputStream& is, Handler& handler) {
+ RAPIDJSON_ASSERT(is.Peek() == 'n');
+ is.Take();
+
+ if (RAPIDJSON_LIKELY(Consume(is, 'u') && Consume(is, 'l') && Consume(is, 'l'))) {
+ if (RAPIDJSON_UNLIKELY(!handler.Null()))
+ RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell());
+ }
+ else
+ RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell());
+ }
+
+ template<unsigned parseFlags, typename InputStream, typename Handler>
+ void ParseTrue(InputStream& is, Handler& handler) {
+ RAPIDJSON_ASSERT(is.Peek() == 't');
+ is.Take();
+
+ if (RAPIDJSON_LIKELY(Consume(is, 'r') && Consume(is, 'u') && Consume(is, 'e'))) {
+ if (RAPIDJSON_UNLIKELY(!handler.Bool(true)))
+ RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell());
+ }
+ else
+ RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell());
+ }
+
+ template<unsigned parseFlags, typename InputStream, typename Handler>
+ void ParseFalse(InputStream& is, Handler& handler) {
+ RAPIDJSON_ASSERT(is.Peek() == 'f');
+ is.Take();
+
+ if (RAPIDJSON_LIKELY(Consume(is, 'a') && Consume(is, 'l') && Consume(is, 's') && Consume(is, 'e'))) {
+ if (RAPIDJSON_UNLIKELY(!handler.Bool(false)))
+ RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell());
+ }
+ else
+ RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell());
+ }
+
+ template<typename InputStream>
+ RAPIDJSON_FORCEINLINE static bool Consume(InputStream& is, typename InputStream::Ch expect) {
+ if (RAPIDJSON_LIKELY(is.Peek() == expect)) {
+ is.Take();
+ return true;
+ }
+ else
+ return false;
+ }
+
+ // Helper function to parse four hexadecimal digits in \uXXXX in ParseString().
+ template<typename InputStream>
+ unsigned ParseHex4(InputStream& is, size_t escapeOffset) {
+ unsigned codepoint = 0;
+ for (int i = 0; i < 4; i++) {
+ Ch c = is.Peek();
+ codepoint <<= 4;
+ codepoint += static_cast<unsigned>(c);
+ if (c >= '0' && c <= '9')
+ codepoint -= '0';
+ else if (c >= 'A' && c <= 'F')
+ codepoint -= 'A' - 10;
+ else if (c >= 'a' && c <= 'f')
+ codepoint -= 'a' - 10;
+ else {
+ RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorStringUnicodeEscapeInvalidHex, escapeOffset);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN(0);
+ }
+ is.Take();
+ }
+ return codepoint;
+ }
+
+ template <typename CharType>
+ class StackStream {
+ public:
+ typedef CharType Ch;
+
+ StackStream(internal::Stack<StackAllocator>& stack) : stack_(stack), length_(0) {}
+ RAPIDJSON_FORCEINLINE void Put(Ch c) {
+ *stack_.template Push<Ch>() = c;
+ ++length_;
+ }
+
+ RAPIDJSON_FORCEINLINE void* Push(SizeType count) {
+ length_ += count;
+ return stack_.template Push<Ch>(count);
+ }
+
+ size_t Length() const { return length_; }
+
+ Ch* Pop() {
+ return stack_.template Pop<Ch>(length_);
+ }
+
+ private:
+ StackStream(const StackStream&);
+ StackStream& operator=(const StackStream&);
+
+ internal::Stack<StackAllocator>& stack_;
+ SizeType length_;
+ };
+
+ // Parse string and generate String event. Different code paths for kParseInsituFlag.
+ template<unsigned parseFlags, typename InputStream, typename Handler>
+ void ParseString(InputStream& is, Handler& handler, bool isKey = false) {
+ internal::StreamLocalCopy<InputStream> copy(is);
+ InputStream& s(copy.s);
+
+ RAPIDJSON_ASSERT(s.Peek() == '\"');
+ s.Take(); // Skip '\"'
+
+ bool success = false;
+ if (parseFlags & kParseInsituFlag) {
+ typename InputStream::Ch *head = s.PutBegin();
+ ParseStringToStream<parseFlags, SourceEncoding, SourceEncoding>(s, s);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
+ size_t length = s.PutEnd(head) - 1;
+ RAPIDJSON_ASSERT(length <= 0xFFFFFFFF);
+ const typename TargetEncoding::Ch* const str = reinterpret_cast<typename TargetEncoding::Ch*>(head);
+ success = (isKey ? handler.Key(str, SizeType(length), false) : handler.String(str, SizeType(length), false));
+ }
+ else {
+ StackStream<typename TargetEncoding::Ch> stackStream(stack_);
+ ParseStringToStream<parseFlags, SourceEncoding, TargetEncoding>(s, stackStream);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
+ SizeType length = static_cast<SizeType>(stackStream.Length()) - 1;
+ const typename TargetEncoding::Ch* const str = stackStream.Pop();
+ success = (isKey ? handler.Key(str, length, true) : handler.String(str, length, true));
+ }
+ if (RAPIDJSON_UNLIKELY(!success))
+ RAPIDJSON_PARSE_ERROR(kParseErrorTermination, s.Tell());
+ }
+
+ // Parse string to an output is
+ // This function handles the prefix/suffix double quotes, escaping, and optional encoding validation.
+ template<unsigned parseFlags, typename SEncoding, typename TEncoding, typename InputStream, typename OutputStream>
+ RAPIDJSON_FORCEINLINE void ParseStringToStream(InputStream& is, OutputStream& os) {
+//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
+#define Z16 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+ static const char escape[256] = {
+ Z16, Z16, 0, 0,'\"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, '/',
+ Z16, Z16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'\\', 0, 0, 0,
+ 0, 0,'\b', 0, 0, 0,'\f', 0, 0, 0, 0, 0, 0, 0,'\n', 0,
+ 0, 0,'\r', 0,'\t', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16
+ };
+#undef Z16
+//!@endcond
+
+ for (;;) {
+ // Scan and copy string before "\\\"" or < 0x20. This is an optional optimzation.
+ if (!(parseFlags & kParseValidateEncodingFlag))
+ ScanCopyUnescapedString(is, os);
+
+ Ch c = is.Peek();
+ if (RAPIDJSON_UNLIKELY(c == '\\')) { // Escape
+ size_t escapeOffset = is.Tell(); // For invalid escaping, report the initial '\\' as error offset
+ is.Take();
+ Ch e = is.Peek();
+ if ((sizeof(Ch) == 1 || unsigned(e) < 256) && RAPIDJSON_LIKELY(escape[static_cast<unsigned char>(e)])) {
+ is.Take();
+ os.Put(static_cast<typename TEncoding::Ch>(escape[static_cast<unsigned char>(e)]));
+ }
+ else if ((parseFlags & kParseEscapedApostropheFlag) && RAPIDJSON_LIKELY(e == '\'')) { // Allow escaped apostrophe
+ is.Take();
+ os.Put('\'');
+ }
+ else if (RAPIDJSON_LIKELY(e == 'u')) { // Unicode
+ is.Take();
+ unsigned codepoint = ParseHex4(is, escapeOffset);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
+ if (RAPIDJSON_UNLIKELY(codepoint >= 0xD800 && codepoint <= 0xDFFF)) {
+ // high surrogate, check if followed by valid low surrogate
+ if (RAPIDJSON_LIKELY(codepoint <= 0xDBFF)) {
+ // Handle UTF-16 surrogate pair
+ if (RAPIDJSON_UNLIKELY(!Consume(is, '\\') || !Consume(is, 'u')))
+ RAPIDJSON_PARSE_ERROR(kParseErrorStringUnicodeSurrogateInvalid, escapeOffset);
+ unsigned codepoint2 = ParseHex4(is, escapeOffset);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID;
+ if (RAPIDJSON_UNLIKELY(codepoint2 < 0xDC00 || codepoint2 > 0xDFFF))
+ RAPIDJSON_PARSE_ERROR(kParseErrorStringUnicodeSurrogateInvalid, escapeOffset);
+ codepoint = (((codepoint - 0xD800) << 10) | (codepoint2 - 0xDC00)) + 0x10000;
+ }
+ // single low surrogate
+ else
+ {
+ RAPIDJSON_PARSE_ERROR(kParseErrorStringUnicodeSurrogateInvalid, escapeOffset);
+ }
+ }
+ TEncoding::Encode(os, codepoint);
+ }
+ else
+ RAPIDJSON_PARSE_ERROR(kParseErrorStringEscapeInvalid, escapeOffset);
+ }
+ else if (RAPIDJSON_UNLIKELY(c == '"')) { // Closing double quote
+ is.Take();
+ os.Put('\0'); // null-terminate the string
+ return;
+ }
+ else if (RAPIDJSON_UNLIKELY(static_cast<unsigned>(c) < 0x20)) { // RFC 4627: unescaped = %x20-21 / %x23-5B / %x5D-10FFFF
+ if (c == '\0')
+ RAPIDJSON_PARSE_ERROR(kParseErrorStringMissQuotationMark, is.Tell());
+ else
+ RAPIDJSON_PARSE_ERROR(kParseErrorStringInvalidEncoding, is.Tell());
+ }
+ else {
+ size_t offset = is.Tell();
+ if (RAPIDJSON_UNLIKELY((parseFlags & kParseValidateEncodingFlag ?
+ !Transcoder<SEncoding, TEncoding>::Validate(is, os) :
+ !Transcoder<SEncoding, TEncoding>::Transcode(is, os))))
+ RAPIDJSON_PARSE_ERROR(kParseErrorStringInvalidEncoding, offset);
+ }
+ }
+ }
+
+ template<typename InputStream, typename OutputStream>
+ static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(InputStream&, OutputStream&) {
+ // Do nothing for generic version
+ }
+
+#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42)
+ // StringStream -> StackStream<char>
+ static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(StringStream& is, StackStream<char>& os) {
+ const char* p = is.src_;
+
+ // Scan one by one until alignment (unaligned load may cross page boundary and cause crash)
+ const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15));
+ while (p != nextAligned)
+ if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast<unsigned>(*p) < 0x20)) {
+ is.src_ = p;
+ return;
+ }
+ else
+ os.Put(*p++);
+
+ // The rest of string using SIMD
+ static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' };
+ static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' };
+ static const char space[16] = { 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F };
+ const __m128i dq = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&dquote[0]));
+ const __m128i bs = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&bslash[0]));
+ const __m128i sp = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&space[0]));
+
+ for (;; p += 16) {
+ const __m128i s = _mm_load_si128(reinterpret_cast<const __m128i *>(p));
+ const __m128i t1 = _mm_cmpeq_epi8(s, dq);
+ const __m128i t2 = _mm_cmpeq_epi8(s, bs);
+ const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x1F) == 0x1F
+ const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3);
+ unsigned short r = static_cast<unsigned short>(_mm_movemask_epi8(x));
+ if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped
+ SizeType length;
+ #ifdef _MSC_VER // Find the index of first escaped
+ unsigned long offset;
+ _BitScanForward(&offset, r);
+ length = offset;
+ #else
+ length = static_cast<SizeType>(__builtin_ffs(r) - 1);
+ #endif
+ if (length != 0) {
+ char* q = reinterpret_cast<char*>(os.Push(length));
+ for (size_t i = 0; i < length; i++)
+ q[i] = p[i];
+
+ p += length;
+ }
+ break;
+ }
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(os.Push(16)), s);
+ }
+
+ is.src_ = p;
+ }
+
+ // InsituStringStream -> InsituStringStream
+ static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(InsituStringStream& is, InsituStringStream& os) {
+ RAPIDJSON_ASSERT(&is == &os);
+ (void)os;
+
+ if (is.src_ == is.dst_) {
+ SkipUnescapedString(is);
+ return;
+ }
+
+ char* p = is.src_;
+ char *q = is.dst_;
+
+ // Scan one by one until alignment (unaligned load may cross page boundary and cause crash)
+ const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15));
+ while (p != nextAligned)
+ if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast<unsigned>(*p) < 0x20)) {
+ is.src_ = p;
+ is.dst_ = q;
+ return;
+ }
+ else
+ *q++ = *p++;
+
+ // The rest of string using SIMD
+ static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' };
+ static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' };
+ static const char space[16] = { 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F };
+ const __m128i dq = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&dquote[0]));
+ const __m128i bs = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&bslash[0]));
+ const __m128i sp = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&space[0]));
+
+ for (;; p += 16, q += 16) {
+ const __m128i s = _mm_load_si128(reinterpret_cast<const __m128i *>(p));
+ const __m128i t1 = _mm_cmpeq_epi8(s, dq);
+ const __m128i t2 = _mm_cmpeq_epi8(s, bs);
+ const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x1F) == 0x1F
+ const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3);
+ unsigned short r = static_cast<unsigned short>(_mm_movemask_epi8(x));
+ if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped
+ size_t length;
+#ifdef _MSC_VER // Find the index of first escaped
+ unsigned long offset;
+ _BitScanForward(&offset, r);
+ length = offset;
+#else
+ length = static_cast<size_t>(__builtin_ffs(r) - 1);
+#endif
+ for (const char* pend = p + length; p != pend; )
+ *q++ = *p++;
+ break;
+ }
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(q), s);
+ }
+
+ is.src_ = p;
+ is.dst_ = q;
+ }
+
+ // When read/write pointers are the same for insitu stream, just skip unescaped characters
+ static RAPIDJSON_FORCEINLINE void SkipUnescapedString(InsituStringStream& is) {
+ RAPIDJSON_ASSERT(is.src_ == is.dst_);
+ char* p = is.src_;
+
+ // Scan one by one until alignment (unaligned load may cross page boundary and cause crash)
+ const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15));
+ for (; p != nextAligned; p++)
+ if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast<unsigned>(*p) < 0x20)) {
+ is.src_ = is.dst_ = p;
+ return;
+ }
+
+ // The rest of string using SIMD
+ static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' };
+ static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' };
+ static const char space[16] = { 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F };
+ const __m128i dq = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&dquote[0]));
+ const __m128i bs = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&bslash[0]));
+ const __m128i sp = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&space[0]));
+
+ for (;; p += 16) {
+ const __m128i s = _mm_load_si128(reinterpret_cast<const __m128i *>(p));
+ const __m128i t1 = _mm_cmpeq_epi8(s, dq);
+ const __m128i t2 = _mm_cmpeq_epi8(s, bs);
+ const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x1F) == 0x1F
+ const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3);
+ unsigned short r = static_cast<unsigned short>(_mm_movemask_epi8(x));
+ if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped
+ size_t length;
+#ifdef _MSC_VER // Find the index of first escaped
+ unsigned long offset;
+ _BitScanForward(&offset, r);
+ length = offset;
+#else
+ length = static_cast<size_t>(__builtin_ffs(r) - 1);
+#endif
+ p += length;
+ break;
+ }
+ }
+
+ is.src_ = is.dst_ = p;
+ }
+#elif defined(RAPIDJSON_NEON)
+ // StringStream -> StackStream<char>
+ static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(StringStream& is, StackStream<char>& os) {
+ const char* p = is.src_;
+
+ // Scan one by one until alignment (unaligned load may cross page boundary and cause crash)
+ const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15));
+ while (p != nextAligned)
+ if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast<unsigned>(*p) < 0x20)) {
+ is.src_ = p;
+ return;
+ }
+ else
+ os.Put(*p++);
+
+ // The rest of string using SIMD
+ const uint8x16_t s0 = vmovq_n_u8('"');
+ const uint8x16_t s1 = vmovq_n_u8('\\');
+ const uint8x16_t s2 = vmovq_n_u8('\b');
+ const uint8x16_t s3 = vmovq_n_u8(32);
+
+ for (;; p += 16) {
+ const uint8x16_t s = vld1q_u8(reinterpret_cast<const uint8_t *>(p));
+ uint8x16_t x = vceqq_u8(s, s0);
+ x = vorrq_u8(x, vceqq_u8(s, s1));
+ x = vorrq_u8(x, vceqq_u8(s, s2));
+ x = vorrq_u8(x, vcltq_u8(s, s3));
+
+ x = vrev64q_u8(x); // Rev in 64
+ uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract
+ uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract
+
+ SizeType length = 0;
+ bool escaped = false;
+ if (low == 0) {
+ if (high != 0) {
+ uint32_t lz = internal::clzll(high);
+ length = 8 + (lz >> 3);
+ escaped = true;
+ }
+ } else {
+ uint32_t lz = internal::clzll(low);
+ length = lz >> 3;
+ escaped = true;
+ }
+ if (RAPIDJSON_UNLIKELY(escaped)) { // some of characters is escaped
+ if (length != 0) {
+ char* q = reinterpret_cast<char*>(os.Push(length));
+ for (size_t i = 0; i < length; i++)
+ q[i] = p[i];
+
+ p += length;
+ }
+ break;
+ }
+ vst1q_u8(reinterpret_cast<uint8_t *>(os.Push(16)), s);
+ }
+
+ is.src_ = p;
+ }
+
+ // InsituStringStream -> InsituStringStream
+ static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(InsituStringStream& is, InsituStringStream& os) {
+ RAPIDJSON_ASSERT(&is == &os);
+ (void)os;
+
+ if (is.src_ == is.dst_) {
+ SkipUnescapedString(is);
+ return;
+ }
+
+ char* p = is.src_;
+ char *q = is.dst_;
+
+ // Scan one by one until alignment (unaligned load may cross page boundary and cause crash)
+ const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15));
+ while (p != nextAligned)
+ if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast<unsigned>(*p) < 0x20)) {
+ is.src_ = p;
+ is.dst_ = q;
+ return;
+ }
+ else
+ *q++ = *p++;
+
+ // The rest of string using SIMD
+ const uint8x16_t s0 = vmovq_n_u8('"');
+ const uint8x16_t s1 = vmovq_n_u8('\\');
+ const uint8x16_t s2 = vmovq_n_u8('\b');
+ const uint8x16_t s3 = vmovq_n_u8(32);
+
+ for (;; p += 16, q += 16) {
+ const uint8x16_t s = vld1q_u8(reinterpret_cast<uint8_t *>(p));
+ uint8x16_t x = vceqq_u8(s, s0);
+ x = vorrq_u8(x, vceqq_u8(s, s1));
+ x = vorrq_u8(x, vceqq_u8(s, s2));
+ x = vorrq_u8(x, vcltq_u8(s, s3));
+
+ x = vrev64q_u8(x); // Rev in 64
+ uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract
+ uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract
+
+ SizeType length = 0;
+ bool escaped = false;
+ if (low == 0) {
+ if (high != 0) {
+ uint32_t lz = internal::clzll(high);
+ length = 8 + (lz >> 3);
+ escaped = true;
+ }
+ } else {
+ uint32_t lz = internal::clzll(low);
+ length = lz >> 3;
+ escaped = true;
+ }
+ if (RAPIDJSON_UNLIKELY(escaped)) { // some of characters is escaped
+ for (const char* pend = p + length; p != pend; ) {
+ *q++ = *p++;
+ }
+ break;
+ }
+ vst1q_u8(reinterpret_cast<uint8_t *>(q), s);
+ }
+
+ is.src_ = p;
+ is.dst_ = q;
+ }
+
+ // When read/write pointers are the same for insitu stream, just skip unescaped characters
+ static RAPIDJSON_FORCEINLINE void SkipUnescapedString(InsituStringStream& is) {
+ RAPIDJSON_ASSERT(is.src_ == is.dst_);
+ char* p = is.src_;
+
+ // Scan one by one until alignment (unaligned load may cross page boundary and cause crash)
+ const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15));
+ for (; p != nextAligned; p++)
+ if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast<unsigned>(*p) < 0x20)) {
+ is.src_ = is.dst_ = p;
+ return;
+ }
+
+ // The rest of string using SIMD
+ const uint8x16_t s0 = vmovq_n_u8('"');
+ const uint8x16_t s1 = vmovq_n_u8('\\');
+ const uint8x16_t s2 = vmovq_n_u8('\b');
+ const uint8x16_t s3 = vmovq_n_u8(32);
+
+ for (;; p += 16) {
+ const uint8x16_t s = vld1q_u8(reinterpret_cast<uint8_t *>(p));
+ uint8x16_t x = vceqq_u8(s, s0);
+ x = vorrq_u8(x, vceqq_u8(s, s1));
+ x = vorrq_u8(x, vceqq_u8(s, s2));
+ x = vorrq_u8(x, vcltq_u8(s, s3));
+
+ x = vrev64q_u8(x); // Rev in 64
+ uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract
+ uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract
+
+ if (low == 0) {
+ if (high != 0) {
+ uint32_t lz = internal::clzll(high);
+ p += 8 + (lz >> 3);
+ break;
+ }
+ } else {
+ uint32_t lz = internal::clzll(low);
+ p += lz >> 3;
+ break;
+ }
+ }
+
+ is.src_ = is.dst_ = p;
+ }
+#endif // RAPIDJSON_NEON
+
+ template<typename InputStream, bool backup, bool pushOnTake>
+ class NumberStream;
+
+ template<typename InputStream>
+ class NumberStream<InputStream, false, false> {
+ public:
+ typedef typename InputStream::Ch Ch;
+
+ NumberStream(GenericReader& reader, InputStream& s) : is(s) { (void)reader; }
+
+ RAPIDJSON_FORCEINLINE Ch Peek() const { return is.Peek(); }
+ RAPIDJSON_FORCEINLINE Ch TakePush() { return is.Take(); }
+ RAPIDJSON_FORCEINLINE Ch Take() { return is.Take(); }
+ RAPIDJSON_FORCEINLINE void Push(char) {}
+
+ size_t Tell() { return is.Tell(); }
+ size_t Length() { return 0; }
+ const char* Pop() { return 0; }
+
+ protected:
+ NumberStream& operator=(const NumberStream&);
+
+ InputStream& is;
+ };
+
+ template<typename InputStream>
+ class NumberStream<InputStream, true, false> : public NumberStream<InputStream, false, false> {
+ typedef NumberStream<InputStream, false, false> Base;
+ public:
+ NumberStream(GenericReader& reader, InputStream& is) : Base(reader, is), stackStream(reader.stack_) {}
+
+ RAPIDJSON_FORCEINLINE Ch TakePush() {
+ stackStream.Put(static_cast<char>(Base::is.Peek()));
+ return Base::is.Take();
+ }
+
+ RAPIDJSON_FORCEINLINE void Push(char c) {
+ stackStream.Put(c);
+ }
+
+ size_t Length() { return stackStream.Length(); }
+
+ const char* Pop() {
+ stackStream.Put('\0');
+ return stackStream.Pop();
+ }
+
+ private:
+ StackStream<char> stackStream;
+ };
+
+ template<typename InputStream>
+ class NumberStream<InputStream, true, true> : public NumberStream<InputStream, true, false> {
+ typedef NumberStream<InputStream, true, false> Base;
+ public:
+ NumberStream(GenericReader& reader, InputStream& is) : Base(reader, is) {}
+
+ RAPIDJSON_FORCEINLINE Ch Take() { return Base::TakePush(); }
+ };
+
+ template<unsigned parseFlags, typename InputStream, typename Handler>
+ void ParseNumber(InputStream& is, Handler& handler) {
+ internal::StreamLocalCopy<InputStream> copy(is);
+ NumberStream<InputStream,
+ ((parseFlags & kParseNumbersAsStringsFlag) != 0) ?
+ ((parseFlags & kParseInsituFlag) == 0) :
+ ((parseFlags & kParseFullPrecisionFlag) != 0),
+ (parseFlags & kParseNumbersAsStringsFlag) != 0 &&
+ (parseFlags & kParseInsituFlag) == 0> s(*this, copy.s);
+
+ size_t startOffset = s.Tell();
+ double d = 0.0;
+ bool useNanOrInf = false;
+
+ // Parse minus
+ bool minus = Consume(s, '-');
+
+ // Parse int: zero / ( digit1-9 *DIGIT )
+ unsigned i = 0;
+ uint64_t i64 = 0;
+ bool use64bit = false;
+ int significandDigit = 0;
+ if (RAPIDJSON_UNLIKELY(s.Peek() == '0')) {
+ i = 0;
+ s.TakePush();
+ }
+ else if (RAPIDJSON_LIKELY(s.Peek() >= '1' && s.Peek() <= '9')) {
+ i = static_cast<unsigned>(s.TakePush() - '0');
+
+ if (minus)
+ while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) {
+ if (RAPIDJSON_UNLIKELY(i >= 214748364)) { // 2^31 = 2147483648
+ if (RAPIDJSON_LIKELY(i != 214748364 || s.Peek() > '8')) {
+ i64 = i;
+ use64bit = true;
+ break;
+ }
+ }
+ i = i * 10 + static_cast<unsigned>(s.TakePush() - '0');
+ significandDigit++;
+ }
+ else
+ while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) {
+ if (RAPIDJSON_UNLIKELY(i >= 429496729)) { // 2^32 - 1 = 4294967295
+ if (RAPIDJSON_LIKELY(i != 429496729 || s.Peek() > '5')) {
+ i64 = i;
+ use64bit = true;
+ break;
+ }
+ }
+ i = i * 10 + static_cast<unsigned>(s.TakePush() - '0');
+ significandDigit++;
+ }
+ }
+ // Parse NaN or Infinity here
+ else if ((parseFlags & kParseNanAndInfFlag) && RAPIDJSON_LIKELY((s.Peek() == 'I' || s.Peek() == 'N'))) {
+ if (Consume(s, 'N')) {
+ if (Consume(s, 'a') && Consume(s, 'N')) {
+ d = std::numeric_limits<double>::quiet_NaN();
+ useNanOrInf = true;
+ }
+ }
+ else if (RAPIDJSON_LIKELY(Consume(s, 'I'))) {
+ if (Consume(s, 'n') && Consume(s, 'f')) {
+ d = (minus ? -std::numeric_limits<double>::infinity() : std::numeric_limits<double>::infinity());
+ useNanOrInf = true;
+
+ if (RAPIDJSON_UNLIKELY(s.Peek() == 'i' && !(Consume(s, 'i') && Consume(s, 'n')
+ && Consume(s, 'i') && Consume(s, 't') && Consume(s, 'y')))) {
+ RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell());
+ }
+ }
+ }
+
+ if (RAPIDJSON_UNLIKELY(!useNanOrInf)) {
+ RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell());
+ }
+ }
+ else
+ RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell());
+
+ // Parse 64bit int
+ bool useDouble = false;
+ if (use64bit) {
+ if (minus)
+ while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) {
+ if (RAPIDJSON_UNLIKELY(i64 >= RAPIDJSON_UINT64_C2(0x0CCCCCCC, 0xCCCCCCCC))) // 2^63 = 9223372036854775808
+ if (RAPIDJSON_LIKELY(i64 != RAPIDJSON_UINT64_C2(0x0CCCCCCC, 0xCCCCCCCC) || s.Peek() > '8')) {
+ d = static_cast<double>(i64);
+ useDouble = true;
+ break;
+ }
+ i64 = i64 * 10 + static_cast<unsigned>(s.TakePush() - '0');
+ significandDigit++;
+ }
+ else
+ while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) {
+ if (RAPIDJSON_UNLIKELY(i64 >= RAPIDJSON_UINT64_C2(0x19999999, 0x99999999))) // 2^64 - 1 = 18446744073709551615
+ if (RAPIDJSON_LIKELY(i64 != RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) || s.Peek() > '5')) {
+ d = static_cast<double>(i64);
+ useDouble = true;
+ break;
+ }
+ i64 = i64 * 10 + static_cast<unsigned>(s.TakePush() - '0');
+ significandDigit++;
+ }
+ }
+
+ // Force double for big integer
+ if (useDouble) {
+ while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) {
+ d = d * 10 + (s.TakePush() - '0');
+ }
+ }
+
+ // Parse frac = decimal-point 1*DIGIT
+ int expFrac = 0;
+ size_t decimalPosition;
+ if (Consume(s, '.')) {
+ decimalPosition = s.Length();
+
+ if (RAPIDJSON_UNLIKELY(!(s.Peek() >= '0' && s.Peek() <= '9')))
+ RAPIDJSON_PARSE_ERROR(kParseErrorNumberMissFraction, s.Tell());
+
+ if (!useDouble) {
+#if RAPIDJSON_64BIT
+ // Use i64 to store significand in 64-bit architecture
+ if (!use64bit)
+ i64 = i;
+
+ while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) {
+ if (i64 > RAPIDJSON_UINT64_C2(0x1FFFFF, 0xFFFFFFFF)) // 2^53 - 1 for fast path
+ break;
+ else {
+ i64 = i64 * 10 + static_cast<unsigned>(s.TakePush() - '0');
+ --expFrac;
+ if (i64 != 0)
+ significandDigit++;
+ }
+ }
+
+ d = static_cast<double>(i64);
+#else
+ // Use double to store significand in 32-bit architecture
+ d = static_cast<double>(use64bit ? i64 : i);
+#endif
+ useDouble = true;
+ }
+
+ while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) {
+ if (significandDigit < 17) {
+ d = d * 10.0 + (s.TakePush() - '0');
+ --expFrac;
+ if (RAPIDJSON_LIKELY(d > 0.0))
+ significandDigit++;
+ }
+ else
+ s.TakePush();
+ }
+ }
+ else
+ decimalPosition = s.Length(); // decimal position at the end of integer.
+
+ // Parse exp = e [ minus / plus ] 1*DIGIT
+ int exp = 0;
+ if (Consume(s, 'e') || Consume(s, 'E')) {
+ if (!useDouble) {
+ d = static_cast<double>(use64bit ? i64 : i);
+ useDouble = true;
+ }
+
+ bool expMinus = false;
+ if (Consume(s, '+'))
+ ;
+ else if (Consume(s, '-'))
+ expMinus = true;
+
+ if (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) {
+ exp = static_cast<int>(s.Take() - '0');
+ if (expMinus) {
+ // (exp + expFrac) must not underflow int => we're detecting when -exp gets
+ // dangerously close to INT_MIN (a pessimistic next digit 9 would push it into
+ // underflow territory):
+ //
+ // -(exp * 10 + 9) + expFrac >= INT_MIN
+ // <=> exp <= (expFrac - INT_MIN - 9) / 10
+ RAPIDJSON_ASSERT(expFrac <= 0);
+ int maxExp = (expFrac + 2147483639) / 10;
+
+ while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) {
+ exp = exp * 10 + static_cast<int>(s.Take() - '0');
+ if (RAPIDJSON_UNLIKELY(exp > maxExp)) {
+ while (RAPIDJSON_UNLIKELY(s.Peek() >= '0' && s.Peek() <= '9')) // Consume the rest of exponent
+ s.Take();
+ }
+ }
+ }
+ else { // positive exp
+ int maxExp = 308 - expFrac;
+ while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) {
+ exp = exp * 10 + static_cast<int>(s.Take() - '0');
+ if (RAPIDJSON_UNLIKELY(exp > maxExp))
+ RAPIDJSON_PARSE_ERROR(kParseErrorNumberTooBig, startOffset);
+ }
+ }
+ }
+ else
+ RAPIDJSON_PARSE_ERROR(kParseErrorNumberMissExponent, s.Tell());
+
+ if (expMinus)
+ exp = -exp;
+ }
+
+ // Finish parsing, call event according to the type of number.
+ bool cont = true;
+
+ if (parseFlags & kParseNumbersAsStringsFlag) {
+ if (parseFlags & kParseInsituFlag) {
+ s.Pop(); // Pop stack no matter if it will be used or not.
+ typename InputStream::Ch* head = is.PutBegin();
+ const size_t length = s.Tell() - startOffset;
+ RAPIDJSON_ASSERT(length <= 0xFFFFFFFF);
+ // unable to insert the \0 character here, it will erase the comma after this number
+ const typename TargetEncoding::Ch* const str = reinterpret_cast<typename TargetEncoding::Ch*>(head);
+ cont = handler.RawNumber(str, SizeType(length), false);
+ }
+ else {
+ SizeType numCharsToCopy = static_cast<SizeType>(s.Length());
+ StringStream srcStream(s.Pop());
+ StackStream<typename TargetEncoding::Ch> dstStream(stack_);
+ while (numCharsToCopy--) {
+ Transcoder<UTF8<>, TargetEncoding>::Transcode(srcStream, dstStream);
+ }
+ dstStream.Put('\0');
+ const typename TargetEncoding::Ch* str = dstStream.Pop();
+ const SizeType length = static_cast<SizeType>(dstStream.Length()) - 1;
+ cont = handler.RawNumber(str, SizeType(length), true);
+ }
+ }
+ else {
+ size_t length = s.Length();
+ const char* decimal = s.Pop(); // Pop stack no matter if it will be used or not.
+
+ if (useDouble) {
+ int p = exp + expFrac;
+ if (parseFlags & kParseFullPrecisionFlag)
+ d = internal::StrtodFullPrecision(d, p, decimal, length, decimalPosition, exp);
+ else
+ d = internal::StrtodNormalPrecision(d, p);
+
+ // Use > max, instead of == inf, to fix bogus warning -Wfloat-equal
+ if (d > (std::numeric_limits<double>::max)()) {
+ // Overflow
+ // TODO: internal::StrtodX should report overflow (or underflow)
+ RAPIDJSON_PARSE_ERROR(kParseErrorNumberTooBig, startOffset);
+ }
+
+ cont = handler.Double(minus ? -d : d);
+ }
+ else if (useNanOrInf) {
+ cont = handler.Double(d);
+ }
+ else {
+ if (use64bit) {
+ if (minus)
+ cont = handler.Int64(static_cast<int64_t>(~i64 + 1));
+ else
+ cont = handler.Uint64(i64);
+ }
+ else {
+ if (minus)
+ cont = handler.Int(static_cast<int32_t>(~i + 1));
+ else
+ cont = handler.Uint(i);
+ }
+ }
+ }
+ if (RAPIDJSON_UNLIKELY(!cont))
+ RAPIDJSON_PARSE_ERROR(kParseErrorTermination, startOffset);
+ }
+
+ // Parse any JSON value
+ template<unsigned parseFlags, typename InputStream, typename Handler>
+ void ParseValue(InputStream& is, Handler& handler) {
+ switch (is.Peek()) {
+ case 'n': ParseNull <parseFlags>(is, handler); break;
+ case 't': ParseTrue <parseFlags>(is, handler); break;
+ case 'f': ParseFalse <parseFlags>(is, handler); break;
+ case '"': ParseString<parseFlags>(is, handler); break;
+ case '{': ParseObject<parseFlags>(is, handler); break;
+ case '[': ParseArray <parseFlags>(is, handler); break;
+ default :
+ ParseNumber<parseFlags>(is, handler);
+ break;
+
+ }
+ }
+
+ // Iterative Parsing
+
+ // States
+ enum IterativeParsingState {
+ IterativeParsingFinishState = 0, // sink states at top
+ IterativeParsingErrorState, // sink states at top
+ IterativeParsingStartState,
+
+ // Object states
+ IterativeParsingObjectInitialState,
+ IterativeParsingMemberKeyState,
+ IterativeParsingMemberValueState,
+ IterativeParsingObjectFinishState,
+
+ // Array states
+ IterativeParsingArrayInitialState,
+ IterativeParsingElementState,
+ IterativeParsingArrayFinishState,
+
+ // Single value state
+ IterativeParsingValueState,
+
+ // Delimiter states (at bottom)
+ IterativeParsingElementDelimiterState,
+ IterativeParsingMemberDelimiterState,
+ IterativeParsingKeyValueDelimiterState,
+
+ cIterativeParsingStateCount
+ };
+
+ // Tokens
+ enum Token {
+ LeftBracketToken = 0,
+ RightBracketToken,
+
+ LeftCurlyBracketToken,
+ RightCurlyBracketToken,
+
+ CommaToken,
+ ColonToken,
+
+ StringToken,
+ FalseToken,
+ TrueToken,
+ NullToken,
+ NumberToken,
+
+ kTokenCount
+ };
+
+ RAPIDJSON_FORCEINLINE Token Tokenize(Ch c) const {
+
+//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
+#define N NumberToken
+#define N16 N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N
+ // Maps from ASCII to Token
+ static const unsigned char tokenMap[256] = {
+ N16, // 00~0F
+ N16, // 10~1F
+ N, N, StringToken, N, N, N, N, N, N, N, N, N, CommaToken, N, N, N, // 20~2F
+ N, N, N, N, N, N, N, N, N, N, ColonToken, N, N, N, N, N, // 30~3F
+ N16, // 40~4F
+ N, N, N, N, N, N, N, N, N, N, N, LeftBracketToken, N, RightBracketToken, N, N, // 50~5F
+ N, N, N, N, N, N, FalseToken, N, N, N, N, N, N, N, NullToken, N, // 60~6F
+ N, N, N, N, TrueToken, N, N, N, N, N, N, LeftCurlyBracketToken, N, RightCurlyBracketToken, N, N, // 70~7F
+ N16, N16, N16, N16, N16, N16, N16, N16 // 80~FF
+ };
+#undef N
+#undef N16
+//!@endcond
+
+ if (sizeof(Ch) == 1 || static_cast<unsigned>(c) < 256)
+ return static_cast<Token>(tokenMap[static_cast<unsigned char>(c)]);
+ else
+ return NumberToken;
+ }
+
+ RAPIDJSON_FORCEINLINE IterativeParsingState Predict(IterativeParsingState state, Token token) const {
+ // current state x one lookahead token -> new state
+ static const char G[cIterativeParsingStateCount][kTokenCount] = {
+ // Finish(sink state)
+ {
+ IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState,
+ IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState,
+ IterativeParsingErrorState
+ },
+ // Error(sink state)
+ {
+ IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState,
+ IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState,
+ IterativeParsingErrorState
+ },
+ // Start
+ {
+ IterativeParsingArrayInitialState, // Left bracket
+ IterativeParsingErrorState, // Right bracket
+ IterativeParsingObjectInitialState, // Left curly bracket
+ IterativeParsingErrorState, // Right curly bracket
+ IterativeParsingErrorState, // Comma
+ IterativeParsingErrorState, // Colon
+ IterativeParsingValueState, // String
+ IterativeParsingValueState, // False
+ IterativeParsingValueState, // True
+ IterativeParsingValueState, // Null
+ IterativeParsingValueState // Number
+ },
+ // ObjectInitial
+ {
+ IterativeParsingErrorState, // Left bracket
+ IterativeParsingErrorState, // Right bracket
+ IterativeParsingErrorState, // Left curly bracket
+ IterativeParsingObjectFinishState, // Right curly bracket
+ IterativeParsingErrorState, // Comma
+ IterativeParsingErrorState, // Colon
+ IterativeParsingMemberKeyState, // String
+ IterativeParsingErrorState, // False
+ IterativeParsingErrorState, // True
+ IterativeParsingErrorState, // Null
+ IterativeParsingErrorState // Number
+ },
+ // MemberKey
+ {
+ IterativeParsingErrorState, // Left bracket
+ IterativeParsingErrorState, // Right bracket
+ IterativeParsingErrorState, // Left curly bracket
+ IterativeParsingErrorState, // Right curly bracket
+ IterativeParsingErrorState, // Comma
+ IterativeParsingKeyValueDelimiterState, // Colon
+ IterativeParsingErrorState, // String
+ IterativeParsingErrorState, // False
+ IterativeParsingErrorState, // True
+ IterativeParsingErrorState, // Null
+ IterativeParsingErrorState // Number
+ },
+ // MemberValue
+ {
+ IterativeParsingErrorState, // Left bracket
+ IterativeParsingErrorState, // Right bracket
+ IterativeParsingErrorState, // Left curly bracket
+ IterativeParsingObjectFinishState, // Right curly bracket
+ IterativeParsingMemberDelimiterState, // Comma
+ IterativeParsingErrorState, // Colon
+ IterativeParsingErrorState, // String
+ IterativeParsingErrorState, // False
+ IterativeParsingErrorState, // True
+ IterativeParsingErrorState, // Null
+ IterativeParsingErrorState // Number
+ },
+ // ObjectFinish(sink state)
+ {
+ IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState,
+ IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState,
+ IterativeParsingErrorState
+ },
+ // ArrayInitial
+ {
+ IterativeParsingArrayInitialState, // Left bracket(push Element state)
+ IterativeParsingArrayFinishState, // Right bracket
+ IterativeParsingObjectInitialState, // Left curly bracket(push Element state)
+ IterativeParsingErrorState, // Right curly bracket
+ IterativeParsingErrorState, // Comma
+ IterativeParsingErrorState, // Colon
+ IterativeParsingElementState, // String
+ IterativeParsingElementState, // False
+ IterativeParsingElementState, // True
+ IterativeParsingElementState, // Null
+ IterativeParsingElementState // Number
+ },
+ // Element
+ {
+ IterativeParsingErrorState, // Left bracket
+ IterativeParsingArrayFinishState, // Right bracket
+ IterativeParsingErrorState, // Left curly bracket
+ IterativeParsingErrorState, // Right curly bracket
+ IterativeParsingElementDelimiterState, // Comma
+ IterativeParsingErrorState, // Colon
+ IterativeParsingErrorState, // String
+ IterativeParsingErrorState, // False
+ IterativeParsingErrorState, // True
+ IterativeParsingErrorState, // Null
+ IterativeParsingErrorState // Number
+ },
+ // ArrayFinish(sink state)
+ {
+ IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState,
+ IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState,
+ IterativeParsingErrorState
+ },
+ // Single Value (sink state)
+ {
+ IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState,
+ IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState,
+ IterativeParsingErrorState
+ },
+ // ElementDelimiter
+ {
+ IterativeParsingArrayInitialState, // Left bracket(push Element state)
+ IterativeParsingArrayFinishState, // Right bracket
+ IterativeParsingObjectInitialState, // Left curly bracket(push Element state)
+ IterativeParsingErrorState, // Right curly bracket
+ IterativeParsingErrorState, // Comma
+ IterativeParsingErrorState, // Colon
+ IterativeParsingElementState, // String
+ IterativeParsingElementState, // False
+ IterativeParsingElementState, // True
+ IterativeParsingElementState, // Null
+ IterativeParsingElementState // Number
+ },
+ // MemberDelimiter
+ {
+ IterativeParsingErrorState, // Left bracket
+ IterativeParsingErrorState, // Right bracket
+ IterativeParsingErrorState, // Left curly bracket
+ IterativeParsingObjectFinishState, // Right curly bracket
+ IterativeParsingErrorState, // Comma
+ IterativeParsingErrorState, // Colon
+ IterativeParsingMemberKeyState, // String
+ IterativeParsingErrorState, // False
+ IterativeParsingErrorState, // True
+ IterativeParsingErrorState, // Null
+ IterativeParsingErrorState // Number
+ },
+ // KeyValueDelimiter
+ {
+ IterativeParsingArrayInitialState, // Left bracket(push MemberValue state)
+ IterativeParsingErrorState, // Right bracket
+ IterativeParsingObjectInitialState, // Left curly bracket(push MemberValue state)
+ IterativeParsingErrorState, // Right curly bracket
+ IterativeParsingErrorState, // Comma
+ IterativeParsingErrorState, // Colon
+ IterativeParsingMemberValueState, // String
+ IterativeParsingMemberValueState, // False
+ IterativeParsingMemberValueState, // True
+ IterativeParsingMemberValueState, // Null
+ IterativeParsingMemberValueState // Number
+ },
+ }; // End of G
+
+ return static_cast<IterativeParsingState>(G[state][token]);
+ }
+
+ // Make an advance in the token stream and state based on the candidate destination state which was returned by Transit().
+ // May return a new state on state pop.
+ template <unsigned parseFlags, typename InputStream, typename Handler>
+ RAPIDJSON_FORCEINLINE IterativeParsingState Transit(IterativeParsingState src, Token token, IterativeParsingState dst, InputStream& is, Handler& handler) {
+ (void)token;
+
+ switch (dst) {
+ case IterativeParsingErrorState:
+ return dst;
+
+ case IterativeParsingObjectInitialState:
+ case IterativeParsingArrayInitialState:
+ {
+ // Push the state(Element or MemeberValue) if we are nested in another array or value of member.
+ // In this way we can get the correct state on ObjectFinish or ArrayFinish by frame pop.
+ IterativeParsingState n = src;
+ if (src == IterativeParsingArrayInitialState || src == IterativeParsingElementDelimiterState)
+ n = IterativeParsingElementState;
+ else if (src == IterativeParsingKeyValueDelimiterState)
+ n = IterativeParsingMemberValueState;
+ // Push current state.
+ *stack_.template Push<SizeType>(1) = n;
+ // Initialize and push the member/element count.
+ *stack_.template Push<SizeType>(1) = 0;
+ // Call handler
+ bool hr = (dst == IterativeParsingObjectInitialState) ? handler.StartObject() : handler.StartArray();
+ // On handler short circuits the parsing.
+ if (!hr) {
+ RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorTermination, is.Tell());
+ return IterativeParsingErrorState;
+ }
+ else {
+ is.Take();
+ return dst;
+ }
+ }
+
+ case IterativeParsingMemberKeyState:
+ ParseString<parseFlags>(is, handler, true);
+ if (HasParseError())
+ return IterativeParsingErrorState;
+ else
+ return dst;
+
+ case IterativeParsingKeyValueDelimiterState:
+ RAPIDJSON_ASSERT(token == ColonToken);
+ is.Take();
+ return dst;
+
+ case IterativeParsingMemberValueState:
+ // Must be non-compound value. Or it would be ObjectInitial or ArrayInitial state.
+ ParseValue<parseFlags>(is, handler);
+ if (HasParseError()) {
+ return IterativeParsingErrorState;
+ }
+ return dst;
+
+ case IterativeParsingElementState:
+ // Must be non-compound value. Or it would be ObjectInitial or ArrayInitial state.
+ ParseValue<parseFlags>(is, handler);
+ if (HasParseError()) {
+ return IterativeParsingErrorState;
+ }
+ return dst;
+
+ case IterativeParsingMemberDelimiterState:
+ case IterativeParsingElementDelimiterState:
+ is.Take();
+ // Update member/element count.
+ *stack_.template Top<SizeType>() = *stack_.template Top<SizeType>() + 1;
+ return dst;
+
+ case IterativeParsingObjectFinishState:
+ {
+ // Transit from delimiter is only allowed when trailing commas are enabled
+ if (!(parseFlags & kParseTrailingCommasFlag) && src == IterativeParsingMemberDelimiterState) {
+ RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorObjectMissName, is.Tell());
+ return IterativeParsingErrorState;
+ }
+ // Get member count.
+ SizeType c = *stack_.template Pop<SizeType>(1);
+ // If the object is not empty, count the last member.
+ if (src == IterativeParsingMemberValueState)
+ ++c;
+ // Restore the state.
+ IterativeParsingState n = static_cast<IterativeParsingState>(*stack_.template Pop<SizeType>(1));
+ // Transit to Finish state if this is the topmost scope.
+ if (n == IterativeParsingStartState)
+ n = IterativeParsingFinishState;
+ // Call handler
+ bool hr = handler.EndObject(c);
+ // On handler short circuits the parsing.
+ if (!hr) {
+ RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorTermination, is.Tell());
+ return IterativeParsingErrorState;
+ }
+ else {
+ is.Take();
+ return n;
+ }
+ }
+
+ case IterativeParsingArrayFinishState:
+ {
+ // Transit from delimiter is only allowed when trailing commas are enabled
+ if (!(parseFlags & kParseTrailingCommasFlag) && src == IterativeParsingElementDelimiterState) {
+ RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorValueInvalid, is.Tell());
+ return IterativeParsingErrorState;
+ }
+ // Get element count.
+ SizeType c = *stack_.template Pop<SizeType>(1);
+ // If the array is not empty, count the last element.
+ if (src == IterativeParsingElementState)
+ ++c;
+ // Restore the state.
+ IterativeParsingState n = static_cast<IterativeParsingState>(*stack_.template Pop<SizeType>(1));
+ // Transit to Finish state if this is the topmost scope.
+ if (n == IterativeParsingStartState)
+ n = IterativeParsingFinishState;
+ // Call handler
+ bool hr = handler.EndArray(c);
+ // On handler short circuits the parsing.
+ if (!hr) {
+ RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorTermination, is.Tell());
+ return IterativeParsingErrorState;
+ }
+ else {
+ is.Take();
+ return n;
+ }
+ }
+
+ default:
+ // This branch is for IterativeParsingValueState actually.
+ // Use `default:` rather than
+ // `case IterativeParsingValueState:` is for code coverage.
+
+ // The IterativeParsingStartState is not enumerated in this switch-case.
+ // It is impossible for that case. And it can be caught by following assertion.
+
+ // The IterativeParsingFinishState is not enumerated in this switch-case either.
+ // It is a "derivative" state which cannot triggered from Predict() directly.
+ // Therefore it cannot happen here. And it can be caught by following assertion.
+ RAPIDJSON_ASSERT(dst == IterativeParsingValueState);
+
+ // Must be non-compound value. Or it would be ObjectInitial or ArrayInitial state.
+ ParseValue<parseFlags>(is, handler);
+ if (HasParseError()) {
+ return IterativeParsingErrorState;
+ }
+ return IterativeParsingFinishState;
+ }
+ }
+
+ template <typename InputStream>
+ void HandleError(IterativeParsingState src, InputStream& is) {
+ if (HasParseError()) {
+ // Error flag has been set.
+ return;
+ }
+
+ switch (src) {
+ case IterativeParsingStartState: RAPIDJSON_PARSE_ERROR(kParseErrorDocumentEmpty, is.Tell()); return;
+ case IterativeParsingFinishState: RAPIDJSON_PARSE_ERROR(kParseErrorDocumentRootNotSingular, is.Tell()); return;
+ case IterativeParsingObjectInitialState:
+ case IterativeParsingMemberDelimiterState: RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissName, is.Tell()); return;
+ case IterativeParsingMemberKeyState: RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissColon, is.Tell()); return;
+ case IterativeParsingMemberValueState: RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissCommaOrCurlyBracket, is.Tell()); return;
+ case IterativeParsingKeyValueDelimiterState:
+ case IterativeParsingArrayInitialState:
+ case IterativeParsingElementDelimiterState: RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); return;
+ default: RAPIDJSON_ASSERT(src == IterativeParsingElementState); RAPIDJSON_PARSE_ERROR(kParseErrorArrayMissCommaOrSquareBracket, is.Tell()); return;
+ }
+ }
+
+ RAPIDJSON_FORCEINLINE bool IsIterativeParsingDelimiterState(IterativeParsingState s) const {
+ return s >= IterativeParsingElementDelimiterState;
+ }
+
+ RAPIDJSON_FORCEINLINE bool IsIterativeParsingCompleteState(IterativeParsingState s) const {
+ return s <= IterativeParsingErrorState;
+ }
+
+ template <unsigned parseFlags, typename InputStream, typename Handler>
+ ParseResult IterativeParse(InputStream& is, Handler& handler) {
+ parseResult_.Clear();
+ ClearStackOnExit scope(*this);
+ IterativeParsingState state = IterativeParsingStartState;
+
+ SkipWhitespaceAndComments<parseFlags>(is);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_);
+ while (is.Peek() != '\0') {
+ Token t = Tokenize(is.Peek());
+ IterativeParsingState n = Predict(state, t);
+ IterativeParsingState d = Transit<parseFlags>(state, t, n, is, handler);
+
+ if (d == IterativeParsingErrorState) {
+ HandleError(state, is);
+ break;
+ }
+
+ state = d;
+
+ // Do not further consume streams if a root JSON has been parsed.
+ if ((parseFlags & kParseStopWhenDoneFlag) && state == IterativeParsingFinishState)
+ break;
+
+ SkipWhitespaceAndComments<parseFlags>(is);
+ RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_);
+ }
+
+ // Handle the end of file.
+ if (state != IterativeParsingFinishState)
+ HandleError(state, is);
+
+ return parseResult_;
+ }
+
+ static const size_t kDefaultStackCapacity = 256; //!< Default stack capacity in bytes for storing a single decoded string.
+ internal::Stack<StackAllocator> stack_; //!< A stack for storing decoded string temporarily during non-destructive parsing.
+ ParseResult parseResult_;
+ IterativeParsingState state_;
+}; // class GenericReader
+
+//! Reader with UTF8 encoding and default allocator.
+typedef GenericReader<UTF8<>, UTF8<> > Reader;
+
+RAPIDJSON_NAMESPACE_END
+
+#if defined(__clang__) || defined(_MSC_VER)
+RAPIDJSON_DIAG_POP
+#endif
+
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_READER_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_schema.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_schema.h
new file mode 100644
index 00000000..963dff2c
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_schema.h
@@ -0,0 +1,2496 @@
+// Tencent is pleased to support the open source community by making RapidJSON available->
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip-> All rights reserved->
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License-> You may obtain a copy of the License at
+//
+// http://opensource->org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied-> See the License for the
+// specific language governing permissions and limitations under the License->
+
+#ifndef RAPIDJSON_SCHEMA_H_
+#define RAPIDJSON_SCHEMA_H_
+
+#include "lottie_rapidjson_document.h"
+#include "lottie_rapidjson_pointer.h"
+#include "lottie_rapidjson_stringbuffer.h"
+#include <cmath> // abs, floor
+
+#if !defined(RAPIDJSON_SCHEMA_USE_INTERNALREGEX)
+#define RAPIDJSON_SCHEMA_USE_INTERNALREGEX 1
+#else
+#define RAPIDJSON_SCHEMA_USE_INTERNALREGEX 0
+#endif
+
+#if !RAPIDJSON_SCHEMA_USE_INTERNALREGEX && defined(RAPIDJSON_SCHEMA_USE_STDREGEX) && (__cplusplus >=201103L || (defined(_MSC_VER) && _MSC_VER >= 1800))
+#define RAPIDJSON_SCHEMA_USE_STDREGEX 1
+#else
+#define RAPIDJSON_SCHEMA_USE_STDREGEX 0
+#endif
+
+#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX
+#include "lottie_rapidjson_internal_regex.h"
+#elif RAPIDJSON_SCHEMA_USE_STDREGEX
+#include <regex>
+#endif
+
+#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX || RAPIDJSON_SCHEMA_USE_STDREGEX
+#define RAPIDJSON_SCHEMA_HAS_REGEX 1
+#else
+#define RAPIDJSON_SCHEMA_HAS_REGEX 0
+#endif
+
+#ifndef RAPIDJSON_SCHEMA_VERBOSE
+#define RAPIDJSON_SCHEMA_VERBOSE 0
+#endif
+
+#if RAPIDJSON_SCHEMA_VERBOSE
+#include "lottie_rapidjson_stringbuffer.h"
+#endif
+
+RAPIDJSON_DIAG_PUSH
+
+#if defined(__GNUC__)
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+
+#ifdef __clang__
+RAPIDJSON_DIAG_OFF(weak-vtables)
+RAPIDJSON_DIAG_OFF(exit-time-destructors)
+RAPIDJSON_DIAG_OFF(c++98-compat-pedantic)
+RAPIDJSON_DIAG_OFF(variadic-macros)
+#elif defined(_MSC_VER)
+RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+///////////////////////////////////////////////////////////////////////////////
+// Verbose Utilities
+
+#if RAPIDJSON_SCHEMA_VERBOSE
+
+namespace internal {
+
+inline void PrintInvalidKeyword(const char* keyword) {
+ printf("Fail keyword: %s\n", keyword);
+}
+
+inline void PrintInvalidKeyword(const wchar_t* keyword) {
+ wprintf(L"Fail keyword: %ls\n", keyword);
+}
+
+inline void PrintInvalidDocument(const char* document) {
+ printf("Fail document: %s\n\n", document);
+}
+
+inline void PrintInvalidDocument(const wchar_t* document) {
+ wprintf(L"Fail document: %ls\n\n", document);
+}
+
+inline void PrintValidatorPointers(unsigned depth, const char* s, const char* d) {
+ printf("S: %*s%s\nD: %*s%s\n\n", depth * 4, " ", s, depth * 4, " ", d);
+}
+
+inline void PrintValidatorPointers(unsigned depth, const wchar_t* s, const wchar_t* d) {
+ wprintf(L"S: %*ls%ls\nD: %*ls%ls\n\n", depth * 4, L" ", s, depth * 4, L" ", d);
+}
+
+} // namespace internal
+
+#endif // RAPIDJSON_SCHEMA_VERBOSE
+
+///////////////////////////////////////////////////////////////////////////////
+// RAPIDJSON_INVALID_KEYWORD_RETURN
+
+#if RAPIDJSON_SCHEMA_VERBOSE
+#define RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword) internal::PrintInvalidKeyword(keyword)
+#else
+#define RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword)
+#endif
+
+#define RAPIDJSON_INVALID_KEYWORD_RETURN(keyword)\
+RAPIDJSON_MULTILINEMACRO_BEGIN\
+ context.invalidKeyword = keyword.GetString();\
+ RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword.GetString());\
+ return false;\
+RAPIDJSON_MULTILINEMACRO_END
+
+///////////////////////////////////////////////////////////////////////////////
+// Forward declarations
+
+template <typename ValueType, typename Allocator>
+class GenericSchemaDocument;
+
+namespace internal {
+
+template <typename SchemaDocumentType>
+class Schema;
+
+///////////////////////////////////////////////////////////////////////////////
+// ISchemaValidator
+
+class ISchemaValidator {
+public:
+ virtual ~ISchemaValidator() {}
+ virtual bool IsValid() const = 0;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// ISchemaStateFactory
+
+template <typename SchemaType>
+class ISchemaStateFactory {
+public:
+ virtual ~ISchemaStateFactory() {}
+ virtual ISchemaValidator* CreateSchemaValidator(const SchemaType&) = 0;
+ virtual void DestroySchemaValidator(ISchemaValidator* validator) = 0;
+ virtual void* CreateHasher() = 0;
+ virtual uint64_t GetHashCode(void* hasher) = 0;
+ virtual void DestroryHasher(void* hasher) = 0;
+ virtual void* MallocState(size_t size) = 0;
+ virtual void FreeState(void* p) = 0;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// IValidationErrorHandler
+
+template <typename SchemaType>
+class IValidationErrorHandler {
+public:
+ typedef typename SchemaType::Ch Ch;
+ typedef typename SchemaType::SValue SValue;
+
+ virtual ~IValidationErrorHandler() {}
+
+ virtual void NotMultipleOf(int64_t actual, const SValue& expected) = 0;
+ virtual void NotMultipleOf(uint64_t actual, const SValue& expected) = 0;
+ virtual void NotMultipleOf(double actual, const SValue& expected) = 0;
+ virtual void AboveMaximum(int64_t actual, const SValue& expected, bool exclusive) = 0;
+ virtual void AboveMaximum(uint64_t actual, const SValue& expected, bool exclusive) = 0;
+ virtual void AboveMaximum(double actual, const SValue& expected, bool exclusive) = 0;
+ virtual void BelowMinimum(int64_t actual, const SValue& expected, bool exclusive) = 0;
+ virtual void BelowMinimum(uint64_t actual, const SValue& expected, bool exclusive) = 0;
+ virtual void BelowMinimum(double actual, const SValue& expected, bool exclusive) = 0;
+
+ virtual void TooLong(const Ch* str, SizeType length, SizeType expected) = 0;
+ virtual void TooShort(const Ch* str, SizeType length, SizeType expected) = 0;
+ virtual void DoesNotMatch(const Ch* str, SizeType length) = 0;
+
+ virtual void DisallowedItem(SizeType index) = 0;
+ virtual void TooFewItems(SizeType actualCount, SizeType expectedCount) = 0;
+ virtual void TooManyItems(SizeType actualCount, SizeType expectedCount) = 0;
+ virtual void DuplicateItems(SizeType index1, SizeType index2) = 0;
+
+ virtual void TooManyProperties(SizeType actualCount, SizeType expectedCount) = 0;
+ virtual void TooFewProperties(SizeType actualCount, SizeType expectedCount) = 0;
+ virtual void StartMissingProperties() = 0;
+ virtual void AddMissingProperty(const SValue& name) = 0;
+ virtual bool EndMissingProperties() = 0;
+ virtual void PropertyViolations(ISchemaValidator** subvalidators, SizeType count) = 0;
+ virtual void DisallowedProperty(const Ch* name, SizeType length) = 0;
+
+ virtual void StartDependencyErrors() = 0;
+ virtual void StartMissingDependentProperties() = 0;
+ virtual void AddMissingDependentProperty(const SValue& targetName) = 0;
+ virtual void EndMissingDependentProperties(const SValue& sourceName) = 0;
+ virtual void AddDependencySchemaError(const SValue& souceName, ISchemaValidator* subvalidator) = 0;
+ virtual bool EndDependencyErrors() = 0;
+
+ virtual void DisallowedValue() = 0;
+ virtual void StartDisallowedType() = 0;
+ virtual void AddExpectedType(const typename SchemaType::ValueType& expectedType) = 0;
+ virtual void EndDisallowedType(const typename SchemaType::ValueType& actualType) = 0;
+ virtual void NotAllOf(ISchemaValidator** subvalidators, SizeType count) = 0;
+ virtual void NoneOf(ISchemaValidator** subvalidators, SizeType count) = 0;
+ virtual void NotOneOf(ISchemaValidator** subvalidators, SizeType count) = 0;
+ virtual void Disallowed() = 0;
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Hasher
+
+// For comparison of compound value
+template<typename Encoding, typename Allocator>
+class Hasher {
+public:
+ typedef typename Encoding::Ch Ch;
+
+ Hasher(Allocator* allocator = 0, size_t stackCapacity = kDefaultSize) : stack_(allocator, stackCapacity) {}
+
+ bool Null() { return WriteType(kNullType); }
+ bool Bool(bool b) { return WriteType(b ? kTrueType : kFalseType); }
+ bool Int(int i) { Number n; n.u.i = i; n.d = static_cast<double>(i); return WriteNumber(n); }
+ bool Uint(unsigned u) { Number n; n.u.u = u; n.d = static_cast<double>(u); return WriteNumber(n); }
+ bool Int64(int64_t i) { Number n; n.u.i = i; n.d = static_cast<double>(i); return WriteNumber(n); }
+ bool Uint64(uint64_t u) { Number n; n.u.u = u; n.d = static_cast<double>(u); return WriteNumber(n); }
+ bool Double(double d) {
+ Number n;
+ if (d < 0) n.u.i = static_cast<int64_t>(d);
+ else n.u.u = static_cast<uint64_t>(d);
+ n.d = d;
+ return WriteNumber(n);
+ }
+
+ bool RawNumber(const Ch* str, SizeType len, bool) {
+ WriteBuffer(kNumberType, str, len * sizeof(Ch));
+ return true;
+ }
+
+ bool String(const Ch* str, SizeType len, bool) {
+ WriteBuffer(kStringType, str, len * sizeof(Ch));
+ return true;
+ }
+
+ bool StartObject() { return true; }
+ bool Key(const Ch* str, SizeType len, bool copy) { return String(str, len, copy); }
+ bool EndObject(SizeType memberCount) {
+ uint64_t h = Hash(0, kObjectType);
+ uint64_t* kv = stack_.template Pop<uint64_t>(memberCount * 2);
+ for (SizeType i = 0; i < memberCount; i++)
+ h ^= Hash(kv[i * 2], kv[i * 2 + 1]); // Use xor to achieve member order insensitive
+ *stack_.template Push<uint64_t>() = h;
+ return true;
+ }
+
+ bool StartArray() { return true; }
+ bool EndArray(SizeType elementCount) {
+ uint64_t h = Hash(0, kArrayType);
+ uint64_t* e = stack_.template Pop<uint64_t>(elementCount);
+ for (SizeType i = 0; i < elementCount; i++)
+ h = Hash(h, e[i]); // Use hash to achieve element order sensitive
+ *stack_.template Push<uint64_t>() = h;
+ return true;
+ }
+
+ bool IsValid() const { return stack_.GetSize() == sizeof(uint64_t); }
+
+ uint64_t GetHashCode() const {
+ RAPIDJSON_ASSERT(IsValid());
+ return *stack_.template Top<uint64_t>();
+ }
+
+private:
+ static const size_t kDefaultSize = 256;
+ struct Number {
+ union U {
+ uint64_t u;
+ int64_t i;
+ }u;
+ double d;
+ };
+
+ bool WriteType(Type type) { return WriteBuffer(type, 0, 0); }
+
+ bool WriteNumber(const Number& n) { return WriteBuffer(kNumberType, &n, sizeof(n)); }
+
+ bool WriteBuffer(Type type, const void* data, size_t len) {
+ // FNV-1a from http://isthe.com/chongo/tech/comp/fnv/
+ uint64_t h = Hash(RAPIDJSON_UINT64_C2(0x84222325, 0xcbf29ce4), type);
+ const unsigned char* d = static_cast<const unsigned char*>(data);
+ for (size_t i = 0; i < len; i++)
+ h = Hash(h, d[i]);
+ *stack_.template Push<uint64_t>() = h;
+ return true;
+ }
+
+ static uint64_t Hash(uint64_t h, uint64_t d) {
+ static const uint64_t kPrime = RAPIDJSON_UINT64_C2(0x00000100, 0x000001b3);
+ h ^= d;
+ h *= kPrime;
+ return h;
+ }
+
+ Stack<Allocator> stack_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// SchemaValidationContext
+
+template <typename SchemaDocumentType>
+struct SchemaValidationContext {
+ typedef Schema<SchemaDocumentType> SchemaType;
+ typedef ISchemaStateFactory<SchemaType> SchemaValidatorFactoryType;
+ typedef IValidationErrorHandler<SchemaType> ErrorHandlerType;
+ typedef typename SchemaType::ValueType ValueType;
+ typedef typename ValueType::Ch Ch;
+
+ enum PatternValidatorType {
+ kPatternValidatorOnly,
+ kPatternValidatorWithProperty,
+ kPatternValidatorWithAdditionalProperty
+ };
+
+ SchemaValidationContext(SchemaValidatorFactoryType& f, ErrorHandlerType& eh, const SchemaType* s) :
+ factory(f),
+ error_handler(eh),
+ schema(s),
+ valueSchema(),
+ invalidKeyword(),
+ hasher(),
+ arrayElementHashCodes(),
+ validators(),
+ validatorCount(),
+ patternPropertiesValidators(),
+ patternPropertiesValidatorCount(),
+ patternPropertiesSchemas(),
+ patternPropertiesSchemaCount(),
+ valuePatternValidatorType(kPatternValidatorOnly),
+ propertyExist(),
+ inArray(false),
+ valueUniqueness(false),
+ arrayUniqueness(false)
+ {
+ }
+
+ ~SchemaValidationContext() {
+ if (hasher)
+ factory.DestroryHasher(hasher);
+ if (validators) {
+ for (SizeType i = 0; i < validatorCount; i++)
+ factory.DestroySchemaValidator(validators[i]);
+ factory.FreeState(validators);
+ }
+ if (patternPropertiesValidators) {
+ for (SizeType i = 0; i < patternPropertiesValidatorCount; i++)
+ factory.DestroySchemaValidator(patternPropertiesValidators[i]);
+ factory.FreeState(patternPropertiesValidators);
+ }
+ if (patternPropertiesSchemas)
+ factory.FreeState(patternPropertiesSchemas);
+ if (propertyExist)
+ factory.FreeState(propertyExist);
+ }
+
+ SchemaValidatorFactoryType& factory;
+ ErrorHandlerType& error_handler;
+ const SchemaType* schema;
+ const SchemaType* valueSchema;
+ const Ch* invalidKeyword;
+ void* hasher; // Only validator access
+ void* arrayElementHashCodes; // Only validator access this
+ ISchemaValidator** validators;
+ SizeType validatorCount;
+ ISchemaValidator** patternPropertiesValidators;
+ SizeType patternPropertiesValidatorCount;
+ const SchemaType** patternPropertiesSchemas;
+ SizeType patternPropertiesSchemaCount;
+ PatternValidatorType valuePatternValidatorType;
+ PatternValidatorType objectPatternValidatorType;
+ SizeType arrayElementIndex;
+ bool* propertyExist;
+ bool inArray;
+ bool valueUniqueness;
+ bool arrayUniqueness;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// Schema
+
+template <typename SchemaDocumentType>
+class Schema {
+public:
+ typedef typename SchemaDocumentType::ValueType ValueType;
+ typedef typename SchemaDocumentType::AllocatorType AllocatorType;
+ typedef typename SchemaDocumentType::PointerType PointerType;
+ typedef typename ValueType::EncodingType EncodingType;
+ typedef typename EncodingType::Ch Ch;
+ typedef SchemaValidationContext<SchemaDocumentType> Context;
+ typedef Schema<SchemaDocumentType> SchemaType;
+ typedef GenericValue<EncodingType, AllocatorType> SValue;
+ typedef IValidationErrorHandler<Schema> ErrorHandler;
+ friend class GenericSchemaDocument<ValueType, AllocatorType>;
+
+ Schema(SchemaDocumentType* schemaDocument, const PointerType& p, const ValueType& value, const ValueType& document, AllocatorType* allocator) :
+ allocator_(allocator),
+ uri_(schemaDocument->GetURI(), *allocator),
+ pointer_(p, allocator),
+ typeless_(schemaDocument->GetTypeless()),
+ enum_(),
+ enumCount_(),
+ not_(),
+ type_((1 << kTotalSchemaType) - 1), // typeless
+ validatorCount_(),
+ notValidatorIndex_(),
+ properties_(),
+ additionalPropertiesSchema_(),
+ patternProperties_(),
+ patternPropertyCount_(),
+ propertyCount_(),
+ minProperties_(),
+ maxProperties_(SizeType(~0)),
+ additionalProperties_(true),
+ hasDependencies_(),
+ hasRequired_(),
+ hasSchemaDependencies_(),
+ additionalItemsSchema_(),
+ itemsList_(),
+ itemsTuple_(),
+ itemsTupleCount_(),
+ minItems_(),
+ maxItems_(SizeType(~0)),
+ additionalItems_(true),
+ uniqueItems_(false),
+ pattern_(),
+ minLength_(0),
+ maxLength_(~SizeType(0)),
+ exclusiveMinimum_(false),
+ exclusiveMaximum_(false),
+ defaultValueLength_(0)
+ {
+ typedef typename ValueType::ConstValueIterator ConstValueIterator;
+ typedef typename ValueType::ConstMemberIterator ConstMemberIterator;
+
+ if (!value.IsObject())
+ return;
+
+ if (const ValueType* v = GetMember(value, GetTypeString())) {
+ type_ = 0;
+ if (v->IsString())
+ AddType(*v);
+ else if (v->IsArray())
+ for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr)
+ AddType(*itr);
+ }
+
+ if (const ValueType* v = GetMember(value, GetEnumString()))
+ if (v->IsArray() && v->Size() > 0) {
+ enum_ = static_cast<uint64_t*>(allocator_->Malloc(sizeof(uint64_t) * v->Size()));
+ for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr) {
+ typedef Hasher<EncodingType, MemoryPoolAllocator<> > EnumHasherType;
+ char buffer[256u + 24];
+ MemoryPoolAllocator<> hasherAllocator(buffer, sizeof(buffer));
+ EnumHasherType h(&hasherAllocator, 256);
+ itr->Accept(h);
+ enum_[enumCount_++] = h.GetHashCode();
+ }
+ }
+
+ if (schemaDocument) {
+ AssignIfExist(allOf_, *schemaDocument, p, value, GetAllOfString(), document);
+ AssignIfExist(anyOf_, *schemaDocument, p, value, GetAnyOfString(), document);
+ AssignIfExist(oneOf_, *schemaDocument, p, value, GetOneOfString(), document);
+ }
+
+ if (const ValueType* v = GetMember(value, GetNotString())) {
+ schemaDocument->CreateSchema(&not_, p.Append(GetNotString(), allocator_), *v, document);
+ notValidatorIndex_ = validatorCount_;
+ validatorCount_++;
+ }
+
+ // Object
+
+ const ValueType* properties = GetMember(value, GetPropertiesString());
+ const ValueType* required = GetMember(value, GetRequiredString());
+ const ValueType* dependencies = GetMember(value, GetDependenciesString());
+ {
+ // Gather properties from properties/required/dependencies
+ SValue allProperties(kArrayType);
+
+ if (properties && properties->IsObject())
+ for (ConstMemberIterator itr = properties->MemberBegin(); itr != properties->MemberEnd(); ++itr)
+ AddUniqueElement(allProperties, itr->name);
+
+ if (required && required->IsArray())
+ for (ConstValueIterator itr = required->Begin(); itr != required->End(); ++itr)
+ if (itr->IsString())
+ AddUniqueElement(allProperties, *itr);
+
+ if (dependencies && dependencies->IsObject())
+ for (ConstMemberIterator itr = dependencies->MemberBegin(); itr != dependencies->MemberEnd(); ++itr) {
+ AddUniqueElement(allProperties, itr->name);
+ if (itr->value.IsArray())
+ for (ConstValueIterator i = itr->value.Begin(); i != itr->value.End(); ++i)
+ if (i->IsString())
+ AddUniqueElement(allProperties, *i);
+ }
+
+ if (allProperties.Size() > 0) {
+ propertyCount_ = allProperties.Size();
+ properties_ = static_cast<Property*>(allocator_->Malloc(sizeof(Property) * propertyCount_));
+ for (SizeType i = 0; i < propertyCount_; i++) {
+ new (&properties_[i]) Property();
+ properties_[i].name = allProperties[i];
+ properties_[i].schema = typeless_;
+ }
+ }
+ }
+
+ if (properties && properties->IsObject()) {
+ PointerType q = p.Append(GetPropertiesString(), allocator_);
+ for (ConstMemberIterator itr = properties->MemberBegin(); itr != properties->MemberEnd(); ++itr) {
+ SizeType index;
+ if (FindPropertyIndex(itr->name, &index))
+ schemaDocument->CreateSchema(&properties_[index].schema, q.Append(itr->name, allocator_), itr->value, document);
+ }
+ }
+
+ if (const ValueType* v = GetMember(value, GetPatternPropertiesString())) {
+ PointerType q = p.Append(GetPatternPropertiesString(), allocator_);
+ patternProperties_ = static_cast<PatternProperty*>(allocator_->Malloc(sizeof(PatternProperty) * v->MemberCount()));
+ patternPropertyCount_ = 0;
+
+ for (ConstMemberIterator itr = v->MemberBegin(); itr != v->MemberEnd(); ++itr) {
+ new (&patternProperties_[patternPropertyCount_]) PatternProperty();
+ patternProperties_[patternPropertyCount_].pattern = CreatePattern(itr->name);
+ schemaDocument->CreateSchema(&patternProperties_[patternPropertyCount_].schema, q.Append(itr->name, allocator_), itr->value, document);
+ patternPropertyCount_++;
+ }
+ }
+
+ if (required && required->IsArray())
+ for (ConstValueIterator itr = required->Begin(); itr != required->End(); ++itr)
+ if (itr->IsString()) {
+ SizeType index;
+ if (FindPropertyIndex(*itr, &index)) {
+ properties_[index].required = true;
+ hasRequired_ = true;
+ }
+ }
+
+ if (dependencies && dependencies->IsObject()) {
+ PointerType q = p.Append(GetDependenciesString(), allocator_);
+ hasDependencies_ = true;
+ for (ConstMemberIterator itr = dependencies->MemberBegin(); itr != dependencies->MemberEnd(); ++itr) {
+ SizeType sourceIndex;
+ if (FindPropertyIndex(itr->name, &sourceIndex)) {
+ if (itr->value.IsArray()) {
+ properties_[sourceIndex].dependencies = static_cast<bool*>(allocator_->Malloc(sizeof(bool) * propertyCount_));
+ std::memset(properties_[sourceIndex].dependencies, 0, sizeof(bool)* propertyCount_);
+ for (ConstValueIterator targetItr = itr->value.Begin(); targetItr != itr->value.End(); ++targetItr) {
+ SizeType targetIndex;
+ if (FindPropertyIndex(*targetItr, &targetIndex))
+ properties_[sourceIndex].dependencies[targetIndex] = true;
+ }
+ }
+ else if (itr->value.IsObject()) {
+ hasSchemaDependencies_ = true;
+ schemaDocument->CreateSchema(&properties_[sourceIndex].dependenciesSchema, q.Append(itr->name, allocator_), itr->value, document);
+ properties_[sourceIndex].dependenciesValidatorIndex = validatorCount_;
+ validatorCount_++;
+ }
+ }
+ }
+ }
+
+ if (const ValueType* v = GetMember(value, GetAdditionalPropertiesString())) {
+ if (v->IsBool())
+ additionalProperties_ = v->GetBool();
+ else if (v->IsObject())
+ schemaDocument->CreateSchema(&additionalPropertiesSchema_, p.Append(GetAdditionalPropertiesString(), allocator_), *v, document);
+ }
+
+ AssignIfExist(minProperties_, value, GetMinPropertiesString());
+ AssignIfExist(maxProperties_, value, GetMaxPropertiesString());
+
+ // Array
+ if (const ValueType* v = GetMember(value, GetItemsString())) {
+ PointerType q = p.Append(GetItemsString(), allocator_);
+ if (v->IsObject()) // List validation
+ schemaDocument->CreateSchema(&itemsList_, q, *v, document);
+ else if (v->IsArray()) { // Tuple validation
+ itemsTuple_ = static_cast<const Schema**>(allocator_->Malloc(sizeof(const Schema*) * v->Size()));
+ SizeType index = 0;
+ for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr, index++)
+ schemaDocument->CreateSchema(&itemsTuple_[itemsTupleCount_++], q.Append(index, allocator_), *itr, document);
+ }
+ }
+
+ AssignIfExist(minItems_, value, GetMinItemsString());
+ AssignIfExist(maxItems_, value, GetMaxItemsString());
+
+ if (const ValueType* v = GetMember(value, GetAdditionalItemsString())) {
+ if (v->IsBool())
+ additionalItems_ = v->GetBool();
+ else if (v->IsObject())
+ schemaDocument->CreateSchema(&additionalItemsSchema_, p.Append(GetAdditionalItemsString(), allocator_), *v, document);
+ }
+
+ AssignIfExist(uniqueItems_, value, GetUniqueItemsString());
+
+ // String
+ AssignIfExist(minLength_, value, GetMinLengthString());
+ AssignIfExist(maxLength_, value, GetMaxLengthString());
+
+ if (const ValueType* v = GetMember(value, GetPatternString()))
+ pattern_ = CreatePattern(*v);
+
+ // Number
+ if (const ValueType* v = GetMember(value, GetMinimumString()))
+ if (v->IsNumber())
+ minimum_.CopyFrom(*v, *allocator_);
+
+ if (const ValueType* v = GetMember(value, GetMaximumString()))
+ if (v->IsNumber())
+ maximum_.CopyFrom(*v, *allocator_);
+
+ AssignIfExist(exclusiveMinimum_, value, GetExclusiveMinimumString());
+ AssignIfExist(exclusiveMaximum_, value, GetExclusiveMaximumString());
+
+ if (const ValueType* v = GetMember(value, GetMultipleOfString()))
+ if (v->IsNumber() && v->GetDouble() > 0.0)
+ multipleOf_.CopyFrom(*v, *allocator_);
+
+ // Default
+ if (const ValueType* v = GetMember(value, GetDefaultValueString()))
+ if (v->IsString())
+ defaultValueLength_ = v->GetStringLength();
+
+ }
+
+ ~Schema() {
+ AllocatorType::Free(enum_);
+ if (properties_) {
+ for (SizeType i = 0; i < propertyCount_; i++)
+ properties_[i].~Property();
+ AllocatorType::Free(properties_);
+ }
+ if (patternProperties_) {
+ for (SizeType i = 0; i < patternPropertyCount_; i++)
+ patternProperties_[i].~PatternProperty();
+ AllocatorType::Free(patternProperties_);
+ }
+ AllocatorType::Free(itemsTuple_);
+#if RAPIDJSON_SCHEMA_HAS_REGEX
+ if (pattern_) {
+ pattern_->~RegexType();
+ AllocatorType::Free(pattern_);
+ }
+#endif
+ }
+
+ const SValue& GetURI() const {
+ return uri_;
+ }
+
+ const PointerType& GetPointer() const {
+ return pointer_;
+ }
+
+ bool BeginValue(Context& context) const {
+ if (context.inArray) {
+ if (uniqueItems_)
+ context.valueUniqueness = true;
+
+ if (itemsList_)
+ context.valueSchema = itemsList_;
+ else if (itemsTuple_) {
+ if (context.arrayElementIndex < itemsTupleCount_)
+ context.valueSchema = itemsTuple_[context.arrayElementIndex];
+ else if (additionalItemsSchema_)
+ context.valueSchema = additionalItemsSchema_;
+ else if (additionalItems_)
+ context.valueSchema = typeless_;
+ else {
+ context.error_handler.DisallowedItem(context.arrayElementIndex);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetItemsString());
+ }
+ }
+ else
+ context.valueSchema = typeless_;
+
+ context.arrayElementIndex++;
+ }
+ return true;
+ }
+
+ RAPIDJSON_FORCEINLINE bool EndValue(Context& context) const {
+ if (context.patternPropertiesValidatorCount > 0) {
+ bool otherValid = false;
+ SizeType count = context.patternPropertiesValidatorCount;
+ if (context.objectPatternValidatorType != Context::kPatternValidatorOnly)
+ otherValid = context.patternPropertiesValidators[--count]->IsValid();
+
+ bool patternValid = true;
+ for (SizeType i = 0; i < count; i++)
+ if (!context.patternPropertiesValidators[i]->IsValid()) {
+ patternValid = false;
+ break;
+ }
+
+ if (context.objectPatternValidatorType == Context::kPatternValidatorOnly) {
+ if (!patternValid) {
+ context.error_handler.PropertyViolations(context.patternPropertiesValidators, count);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString());
+ }
+ }
+ else if (context.objectPatternValidatorType == Context::kPatternValidatorWithProperty) {
+ if (!patternValid || !otherValid) {
+ context.error_handler.PropertyViolations(context.patternPropertiesValidators, count + 1);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString());
+ }
+ }
+ else if (!patternValid && !otherValid) { // kPatternValidatorWithAdditionalProperty)
+ context.error_handler.PropertyViolations(context.patternPropertiesValidators, count + 1);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString());
+ }
+ }
+
+ if (enum_) {
+ const uint64_t h = context.factory.GetHashCode(context.hasher);
+ for (SizeType i = 0; i < enumCount_; i++)
+ if (enum_[i] == h)
+ goto foundEnum;
+ context.error_handler.DisallowedValue();
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetEnumString());
+ foundEnum:;
+ }
+
+ if (allOf_.schemas)
+ for (SizeType i = allOf_.begin; i < allOf_.begin + allOf_.count; i++)
+ if (!context.validators[i]->IsValid()) {
+ context.error_handler.NotAllOf(&context.validators[allOf_.begin], allOf_.count);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetAllOfString());
+ }
+
+ if (anyOf_.schemas) {
+ for (SizeType i = anyOf_.begin; i < anyOf_.begin + anyOf_.count; i++)
+ if (context.validators[i]->IsValid())
+ goto foundAny;
+ context.error_handler.NoneOf(&context.validators[anyOf_.begin], anyOf_.count);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetAnyOfString());
+ foundAny:;
+ }
+
+ if (oneOf_.schemas) {
+ bool oneValid = false;
+ for (SizeType i = oneOf_.begin; i < oneOf_.begin + oneOf_.count; i++)
+ if (context.validators[i]->IsValid()) {
+ if (oneValid) {
+ context.error_handler.NotOneOf(&context.validators[oneOf_.begin], oneOf_.count);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetOneOfString());
+ } else
+ oneValid = true;
+ }
+ if (!oneValid) {
+ context.error_handler.NotOneOf(&context.validators[oneOf_.begin], oneOf_.count);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetOneOfString());
+ }
+ }
+
+ if (not_ && context.validators[notValidatorIndex_]->IsValid()) {
+ context.error_handler.Disallowed();
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetNotString());
+ }
+
+ return true;
+ }
+
+ bool Null(Context& context) const {
+ if (!(type_ & (1 << kNullSchemaType))) {
+ DisallowedType(context, GetNullString());
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString());
+ }
+ return CreateParallelValidator(context);
+ }
+
+ bool Bool(Context& context, bool) const {
+ if (!(type_ & (1 << kBooleanSchemaType))) {
+ DisallowedType(context, GetBooleanString());
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString());
+ }
+ return CreateParallelValidator(context);
+ }
+
+ bool Int(Context& context, int i) const {
+ if (!CheckInt(context, i))
+ return false;
+ return CreateParallelValidator(context);
+ }
+
+ bool Uint(Context& context, unsigned u) const {
+ if (!CheckUint(context, u))
+ return false;
+ return CreateParallelValidator(context);
+ }
+
+ bool Int64(Context& context, int64_t i) const {
+ if (!CheckInt(context, i))
+ return false;
+ return CreateParallelValidator(context);
+ }
+
+ bool Uint64(Context& context, uint64_t u) const {
+ if (!CheckUint(context, u))
+ return false;
+ return CreateParallelValidator(context);
+ }
+
+ bool Double(Context& context, double d) const {
+ if (!(type_ & (1 << kNumberSchemaType))) {
+ DisallowedType(context, GetNumberString());
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString());
+ }
+
+ if (!minimum_.IsNull() && !CheckDoubleMinimum(context, d))
+ return false;
+
+ if (!maximum_.IsNull() && !CheckDoubleMaximum(context, d))
+ return false;
+
+ if (!multipleOf_.IsNull() && !CheckDoubleMultipleOf(context, d))
+ return false;
+
+ return CreateParallelValidator(context);
+ }
+
+ bool String(Context& context, const Ch* str, SizeType length, bool) const {
+ if (!(type_ & (1 << kStringSchemaType))) {
+ DisallowedType(context, GetStringString());
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString());
+ }
+
+ if (minLength_ != 0 || maxLength_ != SizeType(~0)) {
+ SizeType count;
+ if (internal::CountStringCodePoint<EncodingType>(str, length, &count)) {
+ if (count < minLength_) {
+ context.error_handler.TooShort(str, length, minLength_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinLengthString());
+ }
+ if (count > maxLength_) {
+ context.error_handler.TooLong(str, length, maxLength_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxLengthString());
+ }
+ }
+ }
+
+ if (pattern_ && !IsPatternMatch(pattern_, str, length)) {
+ context.error_handler.DoesNotMatch(str, length);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternString());
+ }
+
+ return CreateParallelValidator(context);
+ }
+
+ bool StartObject(Context& context) const {
+ if (!(type_ & (1 << kObjectSchemaType))) {
+ DisallowedType(context, GetObjectString());
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString());
+ }
+
+ if (hasDependencies_ || hasRequired_) {
+ context.propertyExist = static_cast<bool*>(context.factory.MallocState(sizeof(bool) * propertyCount_));
+ std::memset(context.propertyExist, 0, sizeof(bool) * propertyCount_);
+ }
+
+ if (patternProperties_) { // pre-allocate schema array
+ SizeType count = patternPropertyCount_ + 1; // extra for valuePatternValidatorType
+ context.patternPropertiesSchemas = static_cast<const SchemaType**>(context.factory.MallocState(sizeof(const SchemaType*) * count));
+ context.patternPropertiesSchemaCount = 0;
+ std::memset(context.patternPropertiesSchemas, 0, sizeof(SchemaType*) * count);
+ }
+
+ return CreateParallelValidator(context);
+ }
+
+ bool Key(Context& context, const Ch* str, SizeType len, bool) const {
+ if (patternProperties_) {
+ context.patternPropertiesSchemaCount = 0;
+ for (SizeType i = 0; i < patternPropertyCount_; i++)
+ if (patternProperties_[i].pattern && IsPatternMatch(patternProperties_[i].pattern, str, len)) {
+ context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = patternProperties_[i].schema;
+ context.valueSchema = typeless_;
+ }
+ }
+
+ SizeType index = 0;
+ if (FindPropertyIndex(ValueType(str, len).Move(), &index)) {
+ if (context.patternPropertiesSchemaCount > 0) {
+ context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = properties_[index].schema;
+ context.valueSchema = typeless_;
+ context.valuePatternValidatorType = Context::kPatternValidatorWithProperty;
+ }
+ else
+ context.valueSchema = properties_[index].schema;
+
+ if (context.propertyExist)
+ context.propertyExist[index] = true;
+
+ return true;
+ }
+
+ if (additionalPropertiesSchema_) {
+ if (additionalPropertiesSchema_ && context.patternPropertiesSchemaCount > 0) {
+ context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = additionalPropertiesSchema_;
+ context.valueSchema = typeless_;
+ context.valuePatternValidatorType = Context::kPatternValidatorWithAdditionalProperty;
+ }
+ else
+ context.valueSchema = additionalPropertiesSchema_;
+ return true;
+ }
+ else if (additionalProperties_) {
+ context.valueSchema = typeless_;
+ return true;
+ }
+
+ if (context.patternPropertiesSchemaCount == 0) { // patternProperties are not additional properties
+ context.error_handler.DisallowedProperty(str, len);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetAdditionalPropertiesString());
+ }
+
+ return true;
+ }
+
+ bool EndObject(Context& context, SizeType memberCount) const {
+ if (hasRequired_) {
+ context.error_handler.StartMissingProperties();
+ for (SizeType index = 0; index < propertyCount_; index++)
+ if (properties_[index].required && !context.propertyExist[index])
+ if (properties_[index].schema->defaultValueLength_ == 0 )
+ context.error_handler.AddMissingProperty(properties_[index].name);
+ if (context.error_handler.EndMissingProperties())
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetRequiredString());
+ }
+
+ if (memberCount < minProperties_) {
+ context.error_handler.TooFewProperties(memberCount, minProperties_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinPropertiesString());
+ }
+
+ if (memberCount > maxProperties_) {
+ context.error_handler.TooManyProperties(memberCount, maxProperties_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxPropertiesString());
+ }
+
+ if (hasDependencies_) {
+ context.error_handler.StartDependencyErrors();
+ for (SizeType sourceIndex = 0; sourceIndex < propertyCount_; sourceIndex++) {
+ const Property& source = properties_[sourceIndex];
+ if (context.propertyExist[sourceIndex]) {
+ if (source.dependencies) {
+ context.error_handler.StartMissingDependentProperties();
+ for (SizeType targetIndex = 0; targetIndex < propertyCount_; targetIndex++)
+ if (source.dependencies[targetIndex] && !context.propertyExist[targetIndex])
+ context.error_handler.AddMissingDependentProperty(properties_[targetIndex].name);
+ context.error_handler.EndMissingDependentProperties(source.name);
+ }
+ else if (source.dependenciesSchema) {
+ ISchemaValidator* dependenciesValidator = context.validators[source.dependenciesValidatorIndex];
+ if (!dependenciesValidator->IsValid())
+ context.error_handler.AddDependencySchemaError(source.name, dependenciesValidator);
+ }
+ }
+ }
+ if (context.error_handler.EndDependencyErrors())
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetDependenciesString());
+ }
+
+ return true;
+ }
+
+ bool StartArray(Context& context) const {
+ if (!(type_ & (1 << kArraySchemaType))) {
+ DisallowedType(context, GetArrayString());
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString());
+ }
+
+ context.arrayElementIndex = 0;
+ context.inArray = true;
+
+ return CreateParallelValidator(context);
+ }
+
+ bool EndArray(Context& context, SizeType elementCount) const {
+ context.inArray = false;
+
+ if (elementCount < minItems_) {
+ context.error_handler.TooFewItems(elementCount, minItems_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinItemsString());
+ }
+
+ if (elementCount > maxItems_) {
+ context.error_handler.TooManyItems(elementCount, maxItems_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxItemsString());
+ }
+
+ return true;
+ }
+
+ // Generate functions for string literal according to Ch
+#define RAPIDJSON_STRING_(name, ...) \
+ static const ValueType& Get##name##String() {\
+ static const Ch s[] = { __VA_ARGS__, '\0' };\
+ static const ValueType v(s, static_cast<SizeType>(sizeof(s) / sizeof(Ch) - 1));\
+ return v;\
+ }
+
+ RAPIDJSON_STRING_(Null, 'n', 'u', 'l', 'l')
+ RAPIDJSON_STRING_(Boolean, 'b', 'o', 'o', 'l', 'e', 'a', 'n')
+ RAPIDJSON_STRING_(Object, 'o', 'b', 'j', 'e', 'c', 't')
+ RAPIDJSON_STRING_(Array, 'a', 'r', 'r', 'a', 'y')
+ RAPIDJSON_STRING_(String, 's', 't', 'r', 'i', 'n', 'g')
+ RAPIDJSON_STRING_(Number, 'n', 'u', 'm', 'b', 'e', 'r')
+ RAPIDJSON_STRING_(Integer, 'i', 'n', 't', 'e', 'g', 'e', 'r')
+ RAPIDJSON_STRING_(Type, 't', 'y', 'p', 'e')
+ RAPIDJSON_STRING_(Enum, 'e', 'n', 'u', 'm')
+ RAPIDJSON_STRING_(AllOf, 'a', 'l', 'l', 'O', 'f')
+ RAPIDJSON_STRING_(AnyOf, 'a', 'n', 'y', 'O', 'f')
+ RAPIDJSON_STRING_(OneOf, 'o', 'n', 'e', 'O', 'f')
+ RAPIDJSON_STRING_(Not, 'n', 'o', 't')
+ RAPIDJSON_STRING_(Properties, 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's')
+ RAPIDJSON_STRING_(Required, 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd')
+ RAPIDJSON_STRING_(Dependencies, 'd', 'e', 'p', 'e', 'n', 'd', 'e', 'n', 'c', 'i', 'e', 's')
+ RAPIDJSON_STRING_(PatternProperties, 'p', 'a', 't', 't', 'e', 'r', 'n', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's')
+ RAPIDJSON_STRING_(AdditionalProperties, 'a', 'd', 'd', 'i', 't', 'i', 'o', 'n', 'a', 'l', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's')
+ RAPIDJSON_STRING_(MinProperties, 'm', 'i', 'n', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's')
+ RAPIDJSON_STRING_(MaxProperties, 'm', 'a', 'x', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's')
+ RAPIDJSON_STRING_(Items, 'i', 't', 'e', 'm', 's')
+ RAPIDJSON_STRING_(MinItems, 'm', 'i', 'n', 'I', 't', 'e', 'm', 's')
+ RAPIDJSON_STRING_(MaxItems, 'm', 'a', 'x', 'I', 't', 'e', 'm', 's')
+ RAPIDJSON_STRING_(AdditionalItems, 'a', 'd', 'd', 'i', 't', 'i', 'o', 'n', 'a', 'l', 'I', 't', 'e', 'm', 's')
+ RAPIDJSON_STRING_(UniqueItems, 'u', 'n', 'i', 'q', 'u', 'e', 'I', 't', 'e', 'm', 's')
+ RAPIDJSON_STRING_(MinLength, 'm', 'i', 'n', 'L', 'e', 'n', 'g', 't', 'h')
+ RAPIDJSON_STRING_(MaxLength, 'm', 'a', 'x', 'L', 'e', 'n', 'g', 't', 'h')
+ RAPIDJSON_STRING_(Pattern, 'p', 'a', 't', 't', 'e', 'r', 'n')
+ RAPIDJSON_STRING_(Minimum, 'm', 'i', 'n', 'i', 'm', 'u', 'm')
+ RAPIDJSON_STRING_(Maximum, 'm', 'a', 'x', 'i', 'm', 'u', 'm')
+ RAPIDJSON_STRING_(ExclusiveMinimum, 'e', 'x', 'c', 'l', 'u', 's', 'i', 'v', 'e', 'M', 'i', 'n', 'i', 'm', 'u', 'm')
+ RAPIDJSON_STRING_(ExclusiveMaximum, 'e', 'x', 'c', 'l', 'u', 's', 'i', 'v', 'e', 'M', 'a', 'x', 'i', 'm', 'u', 'm')
+ RAPIDJSON_STRING_(MultipleOf, 'm', 'u', 'l', 't', 'i', 'p', 'l', 'e', 'O', 'f')
+ RAPIDJSON_STRING_(DefaultValue, 'd', 'e', 'f', 'a', 'u', 'l', 't')
+
+#undef RAPIDJSON_STRING_
+
+private:
+ enum SchemaValueType {
+ kNullSchemaType,
+ kBooleanSchemaType,
+ kObjectSchemaType,
+ kArraySchemaType,
+ kStringSchemaType,
+ kNumberSchemaType,
+ kIntegerSchemaType,
+ kTotalSchemaType
+ };
+
+#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX
+ typedef internal::GenericRegex<EncodingType, AllocatorType> RegexType;
+#elif RAPIDJSON_SCHEMA_USE_STDREGEX
+ typedef std::basic_regex<Ch> RegexType;
+#else
+ typedef char RegexType;
+#endif
+
+ struct SchemaArray {
+ SchemaArray() : schemas(), count() {}
+ ~SchemaArray() { AllocatorType::Free(schemas); }
+ const SchemaType** schemas;
+ SizeType begin; // begin index of context.validators
+ SizeType count;
+ };
+
+ template <typename V1, typename V2>
+ void AddUniqueElement(V1& a, const V2& v) {
+ for (typename V1::ConstValueIterator itr = a.Begin(); itr != a.End(); ++itr)
+ if (*itr == v)
+ return;
+ V1 c(v, *allocator_);
+ a.PushBack(c, *allocator_);
+ }
+
+ static const ValueType* GetMember(const ValueType& value, const ValueType& name) {
+ typename ValueType::ConstMemberIterator itr = value.FindMember(name);
+ return itr != value.MemberEnd() ? &(itr->value) : 0;
+ }
+
+ static void AssignIfExist(bool& out, const ValueType& value, const ValueType& name) {
+ if (const ValueType* v = GetMember(value, name))
+ if (v->IsBool())
+ out = v->GetBool();
+ }
+
+ static void AssignIfExist(SizeType& out, const ValueType& value, const ValueType& name) {
+ if (const ValueType* v = GetMember(value, name))
+ if (v->IsUint64() && v->GetUint64() <= SizeType(~0))
+ out = static_cast<SizeType>(v->GetUint64());
+ }
+
+ void AssignIfExist(SchemaArray& out, SchemaDocumentType& schemaDocument, const PointerType& p, const ValueType& value, const ValueType& name, const ValueType& document) {
+ if (const ValueType* v = GetMember(value, name)) {
+ if (v->IsArray() && v->Size() > 0) {
+ PointerType q = p.Append(name, allocator_);
+ out.count = v->Size();
+ out.schemas = static_cast<const Schema**>(allocator_->Malloc(out.count * sizeof(const Schema*)));
+ memset(out.schemas, 0, sizeof(Schema*)* out.count);
+ for (SizeType i = 0; i < out.count; i++)
+ schemaDocument.CreateSchema(&out.schemas[i], q.Append(i, allocator_), (*v)[i], document);
+ out.begin = validatorCount_;
+ validatorCount_ += out.count;
+ }
+ }
+ }
+
+#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX
+ template <typename ValueType>
+ RegexType* CreatePattern(const ValueType& value) {
+ if (value.IsString()) {
+ RegexType* r = new (allocator_->Malloc(sizeof(RegexType))) RegexType(value.GetString(), allocator_);
+ if (!r->IsValid()) {
+ r->~RegexType();
+ AllocatorType::Free(r);
+ r = 0;
+ }
+ return r;
+ }
+ return 0;
+ }
+
+ static bool IsPatternMatch(const RegexType* pattern, const Ch *str, SizeType) {
+ GenericRegexSearch<RegexType> rs(*pattern);
+ return rs.Search(str);
+ }
+#elif RAPIDJSON_SCHEMA_USE_STDREGEX
+ template <typename ValueType>
+ RegexType* CreatePattern(const ValueType& value) {
+ if (value.IsString()) {
+ RegexType *r = static_cast<RegexType*>(allocator_->Malloc(sizeof(RegexType)));
+ try {
+ return new (r) RegexType(value.GetString(), std::size_t(value.GetStringLength()), std::regex_constants::ECMAScript);
+ }
+ catch (const std::regex_error&) {
+ AllocatorType::Free(r);
+ }
+ }
+ return 0;
+ }
+
+ static bool IsPatternMatch(const RegexType* pattern, const Ch *str, SizeType length) {
+ std::match_results<const Ch*> r;
+ return std::regex_search(str, str + length, r, *pattern);
+ }
+#else
+ template <typename ValueType>
+ RegexType* CreatePattern(const ValueType&) { return 0; }
+
+ static bool IsPatternMatch(const RegexType*, const Ch *, SizeType) { return true; }
+#endif // RAPIDJSON_SCHEMA_USE_STDREGEX
+
+ void AddType(const ValueType& type) {
+ if (type == GetNullString() ) type_ |= 1 << kNullSchemaType;
+ else if (type == GetBooleanString()) type_ |= 1 << kBooleanSchemaType;
+ else if (type == GetObjectString() ) type_ |= 1 << kObjectSchemaType;
+ else if (type == GetArrayString() ) type_ |= 1 << kArraySchemaType;
+ else if (type == GetStringString() ) type_ |= 1 << kStringSchemaType;
+ else if (type == GetIntegerString()) type_ |= 1 << kIntegerSchemaType;
+ else if (type == GetNumberString() ) type_ |= (1 << kNumberSchemaType) | (1 << kIntegerSchemaType);
+ }
+
+ bool CreateParallelValidator(Context& context) const {
+ if (enum_ || context.arrayUniqueness)
+ context.hasher = context.factory.CreateHasher();
+
+ if (validatorCount_) {
+ RAPIDJSON_ASSERT(context.validators == 0);
+ context.validators = static_cast<ISchemaValidator**>(context.factory.MallocState(sizeof(ISchemaValidator*) * validatorCount_));
+ context.validatorCount = validatorCount_;
+
+ if (allOf_.schemas)
+ CreateSchemaValidators(context, allOf_);
+
+ if (anyOf_.schemas)
+ CreateSchemaValidators(context, anyOf_);
+
+ if (oneOf_.schemas)
+ CreateSchemaValidators(context, oneOf_);
+
+ if (not_)
+ context.validators[notValidatorIndex_] = context.factory.CreateSchemaValidator(*not_);
+
+ if (hasSchemaDependencies_) {
+ for (SizeType i = 0; i < propertyCount_; i++)
+ if (properties_[i].dependenciesSchema)
+ context.validators[properties_[i].dependenciesValidatorIndex] = context.factory.CreateSchemaValidator(*properties_[i].dependenciesSchema);
+ }
+ }
+
+ return true;
+ }
+
+ void CreateSchemaValidators(Context& context, const SchemaArray& schemas) const {
+ for (SizeType i = 0; i < schemas.count; i++)
+ context.validators[schemas.begin + i] = context.factory.CreateSchemaValidator(*schemas.schemas[i]);
+ }
+
+ // O(n)
+ bool FindPropertyIndex(const ValueType& name, SizeType* outIndex) const {
+ SizeType len = name.GetStringLength();
+ const Ch* str = name.GetString();
+ for (SizeType index = 0; index < propertyCount_; index++)
+ if (properties_[index].name.GetStringLength() == len &&
+ (std::memcmp(properties_[index].name.GetString(), str, sizeof(Ch) * len) == 0))
+ {
+ *outIndex = index;
+ return true;
+ }
+ return false;
+ }
+
+ bool CheckInt(Context& context, int64_t i) const {
+ if (!(type_ & ((1 << kIntegerSchemaType) | (1 << kNumberSchemaType)))) {
+ DisallowedType(context, GetIntegerString());
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString());
+ }
+
+ if (!minimum_.IsNull()) {
+ if (minimum_.IsInt64()) {
+ if (exclusiveMinimum_ ? i <= minimum_.GetInt64() : i < minimum_.GetInt64()) {
+ context.error_handler.BelowMinimum(i, minimum_, exclusiveMinimum_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString());
+ }
+ }
+ else if (minimum_.IsUint64()) {
+ context.error_handler.BelowMinimum(i, minimum_, exclusiveMinimum_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); // i <= max(int64_t) < minimum.GetUint64()
+ }
+ else if (!CheckDoubleMinimum(context, static_cast<double>(i)))
+ return false;
+ }
+
+ if (!maximum_.IsNull()) {
+ if (maximum_.IsInt64()) {
+ if (exclusiveMaximum_ ? i >= maximum_.GetInt64() : i > maximum_.GetInt64()) {
+ context.error_handler.AboveMaximum(i, maximum_, exclusiveMaximum_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString());
+ }
+ }
+ else if (maximum_.IsUint64()) { }
+ /* do nothing */ // i <= max(int64_t) < maximum_.GetUint64()
+ else if (!CheckDoubleMaximum(context, static_cast<double>(i)))
+ return false;
+ }
+
+ if (!multipleOf_.IsNull()) {
+ if (multipleOf_.IsUint64()) {
+ if (static_cast<uint64_t>(i >= 0 ? i : -i) % multipleOf_.GetUint64() != 0) {
+ context.error_handler.NotMultipleOf(i, multipleOf_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString());
+ }
+ }
+ else if (!CheckDoubleMultipleOf(context, static_cast<double>(i)))
+ return false;
+ }
+
+ return true;
+ }
+
+ bool CheckUint(Context& context, uint64_t i) const {
+ if (!(type_ & ((1 << kIntegerSchemaType) | (1 << kNumberSchemaType)))) {
+ DisallowedType(context, GetIntegerString());
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString());
+ }
+
+ if (!minimum_.IsNull()) {
+ if (minimum_.IsUint64()) {
+ if (exclusiveMinimum_ ? i <= minimum_.GetUint64() : i < minimum_.GetUint64()) {
+ context.error_handler.BelowMinimum(i, minimum_, exclusiveMinimum_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString());
+ }
+ }
+ else if (minimum_.IsInt64())
+ /* do nothing */; // i >= 0 > minimum.Getint64()
+ else if (!CheckDoubleMinimum(context, static_cast<double>(i)))
+ return false;
+ }
+
+ if (!maximum_.IsNull()) {
+ if (maximum_.IsUint64()) {
+ if (exclusiveMaximum_ ? i >= maximum_.GetUint64() : i > maximum_.GetUint64()) {
+ context.error_handler.AboveMaximum(i, maximum_, exclusiveMaximum_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString());
+ }
+ }
+ else if (maximum_.IsInt64()) {
+ context.error_handler.AboveMaximum(i, maximum_, exclusiveMaximum_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); // i >= 0 > maximum_
+ }
+ else if (!CheckDoubleMaximum(context, static_cast<double>(i)))
+ return false;
+ }
+
+ if (!multipleOf_.IsNull()) {
+ if (multipleOf_.IsUint64()) {
+ if (i % multipleOf_.GetUint64() != 0) {
+ context.error_handler.NotMultipleOf(i, multipleOf_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString());
+ }
+ }
+ else if (!CheckDoubleMultipleOf(context, static_cast<double>(i)))
+ return false;
+ }
+
+ return true;
+ }
+
+ bool CheckDoubleMinimum(Context& context, double d) const {
+ if (exclusiveMinimum_ ? d <= minimum_.GetDouble() : d < minimum_.GetDouble()) {
+ context.error_handler.BelowMinimum(d, minimum_, exclusiveMinimum_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString());
+ }
+ return true;
+ }
+
+ bool CheckDoubleMaximum(Context& context, double d) const {
+ if (exclusiveMaximum_ ? d >= maximum_.GetDouble() : d > maximum_.GetDouble()) {
+ context.error_handler.AboveMaximum(d, maximum_, exclusiveMaximum_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString());
+ }
+ return true;
+ }
+
+ bool CheckDoubleMultipleOf(Context& context, double d) const {
+ double a = std::abs(d), b = std::abs(multipleOf_.GetDouble());
+ double q = std::floor(a / b);
+ double r = a - q * b;
+ if (r > 0.0) {
+ context.error_handler.NotMultipleOf(d, multipleOf_);
+ RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString());
+ }
+ return true;
+ }
+
+ void DisallowedType(Context& context, const ValueType& actualType) const {
+ ErrorHandler& eh = context.error_handler;
+ eh.StartDisallowedType();
+
+ if (type_ & (1 << kNullSchemaType)) eh.AddExpectedType(GetNullString());
+ if (type_ & (1 << kBooleanSchemaType)) eh.AddExpectedType(GetBooleanString());
+ if (type_ & (1 << kObjectSchemaType)) eh.AddExpectedType(GetObjectString());
+ if (type_ & (1 << kArraySchemaType)) eh.AddExpectedType(GetArrayString());
+ if (type_ & (1 << kStringSchemaType)) eh.AddExpectedType(GetStringString());
+
+ if (type_ & (1 << kNumberSchemaType)) eh.AddExpectedType(GetNumberString());
+ else if (type_ & (1 << kIntegerSchemaType)) eh.AddExpectedType(GetIntegerString());
+
+ eh.EndDisallowedType(actualType);
+ }
+
+ struct Property {
+ Property() : schema(), dependenciesSchema(), dependenciesValidatorIndex(), dependencies(), required(false) {}
+ ~Property() { AllocatorType::Free(dependencies); }
+ SValue name;
+ const SchemaType* schema;
+ const SchemaType* dependenciesSchema;
+ SizeType dependenciesValidatorIndex;
+ bool* dependencies;
+ bool required;
+ };
+
+ struct PatternProperty {
+ PatternProperty() : schema(), pattern() {}
+ ~PatternProperty() {
+ if (pattern) {
+ pattern->~RegexType();
+ AllocatorType::Free(pattern);
+ }
+ }
+ const SchemaType* schema;
+ RegexType* pattern;
+ };
+
+ AllocatorType* allocator_;
+ SValue uri_;
+ PointerType pointer_;
+ const SchemaType* typeless_;
+ uint64_t* enum_;
+ SizeType enumCount_;
+ SchemaArray allOf_;
+ SchemaArray anyOf_;
+ SchemaArray oneOf_;
+ const SchemaType* not_;
+ unsigned type_; // bitmask of kSchemaType
+ SizeType validatorCount_;
+ SizeType notValidatorIndex_;
+
+ Property* properties_;
+ const SchemaType* additionalPropertiesSchema_;
+ PatternProperty* patternProperties_;
+ SizeType patternPropertyCount_;
+ SizeType propertyCount_;
+ SizeType minProperties_;
+ SizeType maxProperties_;
+ bool additionalProperties_;
+ bool hasDependencies_;
+ bool hasRequired_;
+ bool hasSchemaDependencies_;
+
+ const SchemaType* additionalItemsSchema_;
+ const SchemaType* itemsList_;
+ const SchemaType** itemsTuple_;
+ SizeType itemsTupleCount_;
+ SizeType minItems_;
+ SizeType maxItems_;
+ bool additionalItems_;
+ bool uniqueItems_;
+
+ RegexType* pattern_;
+ SizeType minLength_;
+ SizeType maxLength_;
+
+ SValue minimum_;
+ SValue maximum_;
+ SValue multipleOf_;
+ bool exclusiveMinimum_;
+ bool exclusiveMaximum_;
+
+ SizeType defaultValueLength_;
+};
+
+template<typename Stack, typename Ch>
+struct TokenHelper {
+ RAPIDJSON_FORCEINLINE static void AppendIndexToken(Stack& documentStack, SizeType index) {
+ *documentStack.template Push<Ch>() = '/';
+ char buffer[21];
+ size_t length = static_cast<size_t>((sizeof(SizeType) == 4 ? u32toa(index, buffer) : u64toa(index, buffer)) - buffer);
+ for (size_t i = 0; i < length; i++)
+ *documentStack.template Push<Ch>() = static_cast<Ch>(buffer[i]);
+ }
+};
+
+// Partial specialized version for char to prevent buffer copying.
+template <typename Stack>
+struct TokenHelper<Stack, char> {
+ RAPIDJSON_FORCEINLINE static void AppendIndexToken(Stack& documentStack, SizeType index) {
+ if (sizeof(SizeType) == 4) {
+ char *buffer = documentStack.template Push<char>(1 + 10); // '/' + uint
+ *buffer++ = '/';
+ const char* end = internal::u32toa(index, buffer);
+ documentStack.template Pop<char>(static_cast<size_t>(10 - (end - buffer)));
+ }
+ else {
+ char *buffer = documentStack.template Push<char>(1 + 20); // '/' + uint64
+ *buffer++ = '/';
+ const char* end = internal::u64toa(index, buffer);
+ documentStack.template Pop<char>(static_cast<size_t>(20 - (end - buffer)));
+ }
+ }
+};
+
+} // namespace internal
+
+///////////////////////////////////////////////////////////////////////////////
+// IGenericRemoteSchemaDocumentProvider
+
+template <typename SchemaDocumentType>
+class IGenericRemoteSchemaDocumentProvider {
+public:
+ typedef typename SchemaDocumentType::Ch Ch;
+
+ virtual ~IGenericRemoteSchemaDocumentProvider() {}
+ virtual const SchemaDocumentType* GetRemoteDocument(const Ch* uri, SizeType length) = 0;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// GenericSchemaDocument
+
+//! JSON schema document.
+/*!
+ A JSON schema document is a compiled version of a JSON schema.
+ It is basically a tree of internal::Schema.
+
+ \note This is an immutable class (i.e. its instance cannot be modified after construction).
+ \tparam ValueT Type of JSON value (e.g. \c Value ), which also determine the encoding.
+ \tparam Allocator Allocator type for allocating memory of this document.
+*/
+template <typename ValueT, typename Allocator = CrtAllocator>
+class GenericSchemaDocument {
+public:
+ typedef ValueT ValueType;
+ typedef IGenericRemoteSchemaDocumentProvider<GenericSchemaDocument> IRemoteSchemaDocumentProviderType;
+ typedef Allocator AllocatorType;
+ typedef typename ValueType::EncodingType EncodingType;
+ typedef typename EncodingType::Ch Ch;
+ typedef internal::Schema<GenericSchemaDocument> SchemaType;
+ typedef GenericPointer<ValueType, Allocator> PointerType;
+ typedef GenericValue<EncodingType, Allocator> URIType;
+ friend class internal::Schema<GenericSchemaDocument>;
+ template <typename, typename, typename>
+ friend class GenericSchemaValidator;
+
+ //! Constructor.
+ /*!
+ Compile a JSON document into schema document.
+
+ \param document A JSON document as source.
+ \param uri The base URI of this schema document for purposes of violation reporting.
+ \param uriLength Length of \c name, in code points.
+ \param remoteProvider An optional remote schema document provider for resolving remote reference. Can be null.
+ \param allocator An optional allocator instance for allocating memory. Can be null.
+ */
+ explicit GenericSchemaDocument(const ValueType& document, const Ch* uri = 0, SizeType uriLength = 0,
+ IRemoteSchemaDocumentProviderType* remoteProvider = 0, Allocator* allocator = 0) :
+ remoteProvider_(remoteProvider),
+ allocator_(allocator),
+ ownAllocator_(),
+ root_(),
+ typeless_(),
+ schemaMap_(allocator, kInitialSchemaMapSize),
+ schemaRef_(allocator, kInitialSchemaRefSize)
+ {
+ if (!allocator_)
+ ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)();
+
+ Ch noUri[1] = {0};
+ uri_.SetString(uri ? uri : noUri, uriLength, *allocator_);
+
+ typeless_ = static_cast<SchemaType*>(allocator_->Malloc(sizeof(SchemaType)));
+ new (typeless_) SchemaType(this, PointerType(), ValueType(kObjectType).Move(), ValueType(kObjectType).Move(), allocator_);
+
+ // Generate root schema, it will call CreateSchema() to create sub-schemas,
+ // And call AddRefSchema() if there are $ref.
+ CreateSchemaRecursive(&root_, PointerType(), document, document);
+
+ // Resolve $ref
+ while (!schemaRef_.Empty()) {
+ SchemaRefEntry* refEntry = schemaRef_.template Pop<SchemaRefEntry>(1);
+ if (const SchemaType* s = GetSchema(refEntry->target)) {
+ if (refEntry->schema)
+ *refEntry->schema = s;
+
+ // Create entry in map if not exist
+ if (!GetSchema(refEntry->source)) {
+ new (schemaMap_.template Push<SchemaEntry>()) SchemaEntry(refEntry->source, const_cast<SchemaType*>(s), false, allocator_);
+ }
+ }
+ else if (refEntry->schema)
+ *refEntry->schema = typeless_;
+
+ refEntry->~SchemaRefEntry();
+ }
+
+ RAPIDJSON_ASSERT(root_ != 0);
+
+ schemaRef_.ShrinkToFit(); // Deallocate all memory for ref
+ }
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ //! Move constructor in C++11
+ GenericSchemaDocument(GenericSchemaDocument&& rhs) RAPIDJSON_NOEXCEPT :
+ remoteProvider_(rhs.remoteProvider_),
+ allocator_(rhs.allocator_),
+ ownAllocator_(rhs.ownAllocator_),
+ root_(rhs.root_),
+ typeless_(rhs.typeless_),
+ schemaMap_(std::move(rhs.schemaMap_)),
+ schemaRef_(std::move(rhs.schemaRef_)),
+ uri_(std::move(rhs.uri_))
+ {
+ rhs.remoteProvider_ = 0;
+ rhs.allocator_ = 0;
+ rhs.ownAllocator_ = 0;
+ rhs.typeless_ = 0;
+ }
+#endif
+
+ //! Destructor
+ ~GenericSchemaDocument() {
+ while (!schemaMap_.Empty())
+ schemaMap_.template Pop<SchemaEntry>(1)->~SchemaEntry();
+
+ if (typeless_) {
+ typeless_->~SchemaType();
+ Allocator::Free(typeless_);
+ }
+
+ RAPIDJSON_DELETE(ownAllocator_);
+ }
+
+ const URIType& GetURI() const { return uri_; }
+
+ //! Get the root schema.
+ const SchemaType& GetRoot() const { return *root_; }
+
+private:
+ //! Prohibit copying
+ GenericSchemaDocument(const GenericSchemaDocument&);
+ //! Prohibit assignment
+ GenericSchemaDocument& operator=(const GenericSchemaDocument&);
+
+ struct SchemaRefEntry {
+ SchemaRefEntry(const PointerType& s, const PointerType& t, const SchemaType** outSchema, Allocator *allocator) : source(s, allocator), target(t, allocator), schema(outSchema) {}
+ PointerType source;
+ PointerType target;
+ const SchemaType** schema;
+ };
+
+ struct SchemaEntry {
+ SchemaEntry(const PointerType& p, SchemaType* s, bool o, Allocator* allocator) : pointer(p, allocator), schema(s), owned(o) {}
+ ~SchemaEntry() {
+ if (owned) {
+ schema->~SchemaType();
+ Allocator::Free(schema);
+ }
+ }
+ PointerType pointer;
+ SchemaType* schema;
+ bool owned;
+ };
+
+ void CreateSchemaRecursive(const SchemaType** schema, const PointerType& pointer, const ValueType& v, const ValueType& document) {
+ if (schema)
+ *schema = typeless_;
+
+ if (v.GetType() == kObjectType) {
+ const SchemaType* s = GetSchema(pointer);
+ if (!s)
+ CreateSchema(schema, pointer, v, document);
+
+ for (typename ValueType::ConstMemberIterator itr = v.MemberBegin(); itr != v.MemberEnd(); ++itr)
+ CreateSchemaRecursive(0, pointer.Append(itr->name, allocator_), itr->value, document);
+ }
+ else if (v.GetType() == kArrayType)
+ for (SizeType i = 0; i < v.Size(); i++)
+ CreateSchemaRecursive(0, pointer.Append(i, allocator_), v[i], document);
+ }
+
+ void CreateSchema(const SchemaType** schema, const PointerType& pointer, const ValueType& v, const ValueType& document) {
+ RAPIDJSON_ASSERT(pointer.IsValid());
+ if (v.IsObject()) {
+ if (!HandleRefSchema(pointer, schema, v, document)) {
+ SchemaType* s = new (allocator_->Malloc(sizeof(SchemaType))) SchemaType(this, pointer, v, document, allocator_);
+ new (schemaMap_.template Push<SchemaEntry>()) SchemaEntry(pointer, s, true, allocator_);
+ if (schema)
+ *schema = s;
+ }
+ }
+ }
+
+ bool HandleRefSchema(const PointerType& source, const SchemaType** schema, const ValueType& v, const ValueType& document) {
+ static const Ch kRefString[] = { '$', 'r', 'e', 'f', '\0' };
+ static const ValueType kRefValue(kRefString, 4);
+
+ typename ValueType::ConstMemberIterator itr = v.FindMember(kRefValue);
+ if (itr == v.MemberEnd())
+ return false;
+
+ if (itr->value.IsString()) {
+ SizeType len = itr->value.GetStringLength();
+ if (len > 0) {
+ const Ch* s = itr->value.GetString();
+ SizeType i = 0;
+ while (i < len && s[i] != '#') // Find the first #
+ i++;
+
+ if (i > 0) { // Remote reference, resolve immediately
+ if (remoteProvider_) {
+ if (const GenericSchemaDocument* remoteDocument = remoteProvider_->GetRemoteDocument(s, i)) {
+ PointerType pointer(&s[i], len - i, allocator_);
+ if (pointer.IsValid()) {
+ if (const SchemaType* sc = remoteDocument->GetSchema(pointer)) {
+ if (schema)
+ *schema = sc;
+ new (schemaMap_.template Push<SchemaEntry>()) SchemaEntry(source, const_cast<SchemaType*>(sc), false, allocator_);
+ return true;
+ }
+ }
+ }
+ }
+ }
+ else if (s[i] == '#') { // Local reference, defer resolution
+ PointerType pointer(&s[i], len - i, allocator_);
+ if (pointer.IsValid()) {
+ if (const ValueType* nv = pointer.Get(document))
+ if (HandleRefSchema(source, schema, *nv, document))
+ return true;
+
+ new (schemaRef_.template Push<SchemaRefEntry>()) SchemaRefEntry(source, pointer, schema, allocator_);
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+ }
+
+ const SchemaType* GetSchema(const PointerType& pointer) const {
+ for (const SchemaEntry* target = schemaMap_.template Bottom<SchemaEntry>(); target != schemaMap_.template End<SchemaEntry>(); ++target)
+ if (pointer == target->pointer)
+ return target->schema;
+ return 0;
+ }
+
+ PointerType GetPointer(const SchemaType* schema) const {
+ for (const SchemaEntry* target = schemaMap_.template Bottom<SchemaEntry>(); target != schemaMap_.template End<SchemaEntry>(); ++target)
+ if (schema == target->schema)
+ return target->pointer;
+ return PointerType();
+ }
+
+ const SchemaType* GetTypeless() const { return typeless_; }
+
+ static const size_t kInitialSchemaMapSize = 64;
+ static const size_t kInitialSchemaRefSize = 64;
+
+ IRemoteSchemaDocumentProviderType* remoteProvider_;
+ Allocator *allocator_;
+ Allocator *ownAllocator_;
+ const SchemaType* root_; //!< Root schema.
+ SchemaType* typeless_;
+ internal::Stack<Allocator> schemaMap_; // Stores created Pointer -> Schemas
+ internal::Stack<Allocator> schemaRef_; // Stores Pointer from $ref and schema which holds the $ref
+ URIType uri_;
+};
+
+//! GenericSchemaDocument using Value type.
+typedef GenericSchemaDocument<Value> SchemaDocument;
+//! IGenericRemoteSchemaDocumentProvider using SchemaDocument.
+typedef IGenericRemoteSchemaDocumentProvider<SchemaDocument> IRemoteSchemaDocumentProvider;
+
+///////////////////////////////////////////////////////////////////////////////
+// GenericSchemaValidator
+
+//! JSON Schema Validator.
+/*!
+ A SAX style JSON schema validator.
+ It uses a \c GenericSchemaDocument to validate SAX events.
+ It delegates the incoming SAX events to an output handler.
+ The default output handler does nothing.
+ It can be reused multiple times by calling \c Reset().
+
+ \tparam SchemaDocumentType Type of schema document.
+ \tparam OutputHandler Type of output handler. Default handler does nothing.
+ \tparam StateAllocator Allocator for storing the internal validation states.
+*/
+template <
+ typename SchemaDocumentType,
+ typename OutputHandler = BaseReaderHandler<typename SchemaDocumentType::SchemaType::EncodingType>,
+ typename StateAllocator = CrtAllocator>
+class GenericSchemaValidator :
+ public internal::ISchemaStateFactory<typename SchemaDocumentType::SchemaType>,
+ public internal::ISchemaValidator,
+ public internal::IValidationErrorHandler<typename SchemaDocumentType::SchemaType>
+{
+public:
+ typedef typename SchemaDocumentType::SchemaType SchemaType;
+ typedef typename SchemaDocumentType::PointerType PointerType;
+ typedef typename SchemaType::EncodingType EncodingType;
+ typedef typename SchemaType::SValue SValue;
+ typedef typename EncodingType::Ch Ch;
+ typedef GenericStringRef<Ch> StringRefType;
+ typedef GenericValue<EncodingType, StateAllocator> ValueType;
+
+ //! Constructor without output handler.
+ /*!
+ \param schemaDocument The schema document to conform to.
+ \param allocator Optional allocator for storing internal validation states.
+ \param schemaStackCapacity Optional initial capacity of schema path stack.
+ \param documentStackCapacity Optional initial capacity of document path stack.
+ */
+ GenericSchemaValidator(
+ const SchemaDocumentType& schemaDocument,
+ StateAllocator* allocator = 0,
+ size_t schemaStackCapacity = kDefaultSchemaStackCapacity,
+ size_t documentStackCapacity = kDefaultDocumentStackCapacity)
+ :
+ schemaDocument_(&schemaDocument),
+ root_(schemaDocument.GetRoot()),
+ stateAllocator_(allocator),
+ ownStateAllocator_(0),
+ schemaStack_(allocator, schemaStackCapacity),
+ documentStack_(allocator, documentStackCapacity),
+ outputHandler_(0),
+ error_(kObjectType),
+ currentError_(),
+ missingDependents_(),
+ valid_(true)
+#if RAPIDJSON_SCHEMA_VERBOSE
+ , depth_(0)
+#endif
+ {
+ }
+
+ //! Constructor with output handler.
+ /*!
+ \param schemaDocument The schema document to conform to.
+ \param allocator Optional allocator for storing internal validation states.
+ \param schemaStackCapacity Optional initial capacity of schema path stack.
+ \param documentStackCapacity Optional initial capacity of document path stack.
+ */
+ GenericSchemaValidator(
+ const SchemaDocumentType& schemaDocument,
+ OutputHandler& outputHandler,
+ StateAllocator* allocator = 0,
+ size_t schemaStackCapacity = kDefaultSchemaStackCapacity,
+ size_t documentStackCapacity = kDefaultDocumentStackCapacity)
+ :
+ schemaDocument_(&schemaDocument),
+ root_(schemaDocument.GetRoot()),
+ stateAllocator_(allocator),
+ ownStateAllocator_(0),
+ schemaStack_(allocator, schemaStackCapacity),
+ documentStack_(allocator, documentStackCapacity),
+ outputHandler_(&outputHandler),
+ error_(kObjectType),
+ currentError_(),
+ missingDependents_(),
+ valid_(true)
+#if RAPIDJSON_SCHEMA_VERBOSE
+ , depth_(0)
+#endif
+ {
+ }
+
+ //! Destructor.
+ ~GenericSchemaValidator() {
+ Reset();
+ RAPIDJSON_DELETE(ownStateAllocator_);
+ }
+
+ //! Reset the internal states.
+ void Reset() {
+ while (!schemaStack_.Empty())
+ PopSchema();
+ documentStack_.Clear();
+ error_.SetObject();
+ currentError_.SetNull();
+ missingDependents_.SetNull();
+ valid_ = true;
+ }
+
+ //! Checks whether the current state is valid.
+ // Implementation of ISchemaValidator
+ virtual bool IsValid() const { return valid_; }
+
+ //! Gets the error object.
+ ValueType& GetError() { return error_; }
+ const ValueType& GetError() const { return error_; }
+
+ //! Gets the JSON pointer pointed to the invalid schema.
+ PointerType GetInvalidSchemaPointer() const {
+ return schemaStack_.Empty() ? PointerType() : CurrentSchema().GetPointer();
+ }
+
+ //! Gets the keyword of invalid schema.
+ const Ch* GetInvalidSchemaKeyword() const {
+ return schemaStack_.Empty() ? 0 : CurrentContext().invalidKeyword;
+ }
+
+ //! Gets the JSON pointer pointed to the invalid value.
+ PointerType GetInvalidDocumentPointer() const {
+ if (documentStack_.Empty()) {
+ return PointerType();
+ }
+ else {
+ return PointerType(documentStack_.template Bottom<Ch>(), documentStack_.GetSize() / sizeof(Ch));
+ }
+ }
+
+ void NotMultipleOf(int64_t actual, const SValue& expected) {
+ AddNumberError(SchemaType::GetMultipleOfString(), ValueType(actual).Move(), expected);
+ }
+ void NotMultipleOf(uint64_t actual, const SValue& expected) {
+ AddNumberError(SchemaType::GetMultipleOfString(), ValueType(actual).Move(), expected);
+ }
+ void NotMultipleOf(double actual, const SValue& expected) {
+ AddNumberError(SchemaType::GetMultipleOfString(), ValueType(actual).Move(), expected);
+ }
+ void AboveMaximum(int64_t actual, const SValue& expected, bool exclusive) {
+ AddNumberError(SchemaType::GetMaximumString(), ValueType(actual).Move(), expected,
+ exclusive ? &SchemaType::GetExclusiveMaximumString : 0);
+ }
+ void AboveMaximum(uint64_t actual, const SValue& expected, bool exclusive) {
+ AddNumberError(SchemaType::GetMaximumString(), ValueType(actual).Move(), expected,
+ exclusive ? &SchemaType::GetExclusiveMaximumString : 0);
+ }
+ void AboveMaximum(double actual, const SValue& expected, bool exclusive) {
+ AddNumberError(SchemaType::GetMaximumString(), ValueType(actual).Move(), expected,
+ exclusive ? &SchemaType::GetExclusiveMaximumString : 0);
+ }
+ void BelowMinimum(int64_t actual, const SValue& expected, bool exclusive) {
+ AddNumberError(SchemaType::GetMinimumString(), ValueType(actual).Move(), expected,
+ exclusive ? &SchemaType::GetExclusiveMinimumString : 0);
+ }
+ void BelowMinimum(uint64_t actual, const SValue& expected, bool exclusive) {
+ AddNumberError(SchemaType::GetMinimumString(), ValueType(actual).Move(), expected,
+ exclusive ? &SchemaType::GetExclusiveMinimumString : 0);
+ }
+ void BelowMinimum(double actual, const SValue& expected, bool exclusive) {
+ AddNumberError(SchemaType::GetMinimumString(), ValueType(actual).Move(), expected,
+ exclusive ? &SchemaType::GetExclusiveMinimumString : 0);
+ }
+
+ void TooLong(const Ch* str, SizeType length, SizeType expected) {
+ AddNumberError(SchemaType::GetMaxLengthString(),
+ ValueType(str, length, GetStateAllocator()).Move(), SValue(expected).Move());
+ }
+ void TooShort(const Ch* str, SizeType length, SizeType expected) {
+ AddNumberError(SchemaType::GetMinLengthString(),
+ ValueType(str, length, GetStateAllocator()).Move(), SValue(expected).Move());
+ }
+ void DoesNotMatch(const Ch* str, SizeType length) {
+ currentError_.SetObject();
+ currentError_.AddMember(GetActualString(), ValueType(str, length, GetStateAllocator()).Move(), GetStateAllocator());
+ AddCurrentError(SchemaType::GetPatternString());
+ }
+
+ void DisallowedItem(SizeType index) {
+ currentError_.SetObject();
+ currentError_.AddMember(GetDisallowedString(), ValueType(index).Move(), GetStateAllocator());
+ AddCurrentError(SchemaType::GetAdditionalItemsString(), true);
+ }
+ void TooFewItems(SizeType actualCount, SizeType expectedCount) {
+ AddNumberError(SchemaType::GetMinItemsString(),
+ ValueType(actualCount).Move(), SValue(expectedCount).Move());
+ }
+ void TooManyItems(SizeType actualCount, SizeType expectedCount) {
+ AddNumberError(SchemaType::GetMaxItemsString(),
+ ValueType(actualCount).Move(), SValue(expectedCount).Move());
+ }
+ void DuplicateItems(SizeType index1, SizeType index2) {
+ ValueType duplicates(kArrayType);
+ duplicates.PushBack(index1, GetStateAllocator());
+ duplicates.PushBack(index2, GetStateAllocator());
+ currentError_.SetObject();
+ currentError_.AddMember(GetDuplicatesString(), duplicates, GetStateAllocator());
+ AddCurrentError(SchemaType::GetUniqueItemsString(), true);
+ }
+
+ void TooManyProperties(SizeType actualCount, SizeType expectedCount) {
+ AddNumberError(SchemaType::GetMaxPropertiesString(),
+ ValueType(actualCount).Move(), SValue(expectedCount).Move());
+ }
+ void TooFewProperties(SizeType actualCount, SizeType expectedCount) {
+ AddNumberError(SchemaType::GetMinPropertiesString(),
+ ValueType(actualCount).Move(), SValue(expectedCount).Move());
+ }
+ void StartMissingProperties() {
+ currentError_.SetArray();
+ }
+ void AddMissingProperty(const SValue& name) {
+ currentError_.PushBack(ValueType(name, GetStateAllocator()).Move(), GetStateAllocator());
+ }
+ bool EndMissingProperties() {
+ if (currentError_.Empty())
+ return false;
+ ValueType error(kObjectType);
+ error.AddMember(GetMissingString(), currentError_, GetStateAllocator());
+ currentError_ = error;
+ AddCurrentError(SchemaType::GetRequiredString());
+ return true;
+ }
+ void PropertyViolations(ISchemaValidator** subvalidators, SizeType count) {
+ for (SizeType i = 0; i < count; ++i)
+ MergeError(static_cast<GenericSchemaValidator*>(subvalidators[i])->GetError());
+ }
+ void DisallowedProperty(const Ch* name, SizeType length) {
+ currentError_.SetObject();
+ currentError_.AddMember(GetDisallowedString(), ValueType(name, length, GetStateAllocator()).Move(), GetStateAllocator());
+ AddCurrentError(SchemaType::GetAdditionalPropertiesString(), true);
+ }
+
+ void StartDependencyErrors() {
+ currentError_.SetObject();
+ }
+ void StartMissingDependentProperties() {
+ missingDependents_.SetArray();
+ }
+ void AddMissingDependentProperty(const SValue& targetName) {
+ missingDependents_.PushBack(ValueType(targetName, GetStateAllocator()).Move(), GetStateAllocator());
+ }
+ void EndMissingDependentProperties(const SValue& sourceName) {
+ if (!missingDependents_.Empty())
+ currentError_.AddMember(ValueType(sourceName, GetStateAllocator()).Move(),
+ missingDependents_, GetStateAllocator());
+ }
+ void AddDependencySchemaError(const SValue& sourceName, ISchemaValidator* subvalidator) {
+ currentError_.AddMember(ValueType(sourceName, GetStateAllocator()).Move(),
+ static_cast<GenericSchemaValidator*>(subvalidator)->GetError(), GetStateAllocator());
+ }
+ bool EndDependencyErrors() {
+ if (currentError_.ObjectEmpty())
+ return false;
+ ValueType error(kObjectType);
+ error.AddMember(GetErrorsString(), currentError_, GetStateAllocator());
+ currentError_ = error;
+ AddCurrentError(SchemaType::GetDependenciesString());
+ return true;
+ }
+
+ void DisallowedValue() {
+ currentError_.SetObject();
+ AddCurrentError(SchemaType::GetEnumString());
+ }
+ void StartDisallowedType() {
+ currentError_.SetArray();
+ }
+ void AddExpectedType(const typename SchemaType::ValueType& expectedType) {
+ currentError_.PushBack(ValueType(expectedType, GetStateAllocator()).Move(), GetStateAllocator());
+ }
+ void EndDisallowedType(const typename SchemaType::ValueType& actualType) {
+ ValueType error(kObjectType);
+ error.AddMember(GetExpectedString(), currentError_, GetStateAllocator());
+ error.AddMember(GetActualString(), ValueType(actualType, GetStateAllocator()).Move(), GetStateAllocator());
+ currentError_ = error;
+ AddCurrentError(SchemaType::GetTypeString());
+ }
+ void NotAllOf(ISchemaValidator** subvalidators, SizeType count) {
+ for (SizeType i = 0; i < count; ++i) {
+ MergeError(static_cast<GenericSchemaValidator*>(subvalidators[i])->GetError());
+ }
+ }
+ void NoneOf(ISchemaValidator** subvalidators, SizeType count) {
+ AddErrorArray(SchemaType::GetAnyOfString(), subvalidators, count);
+ }
+ void NotOneOf(ISchemaValidator** subvalidators, SizeType count) {
+ AddErrorArray(SchemaType::GetOneOfString(), subvalidators, count);
+ }
+ void Disallowed() {
+ currentError_.SetObject();
+ AddCurrentError(SchemaType::GetNotString());
+ }
+
+#define RAPIDJSON_STRING_(name, ...) \
+ static const StringRefType& Get##name##String() {\
+ static const Ch s[] = { __VA_ARGS__, '\0' };\
+ static const StringRefType v(s, static_cast<SizeType>(sizeof(s) / sizeof(Ch) - 1)); \
+ return v;\
+ }
+
+ RAPIDJSON_STRING_(InstanceRef, 'i', 'n', 's', 't', 'a', 'n', 'c', 'e', 'R', 'e', 'f')
+ RAPIDJSON_STRING_(SchemaRef, 's', 'c', 'h', 'e', 'm', 'a', 'R', 'e', 'f')
+ RAPIDJSON_STRING_(Expected, 'e', 'x', 'p', 'e', 'c', 't', 'e', 'd')
+ RAPIDJSON_STRING_(Actual, 'a', 'c', 't', 'u', 'a', 'l')
+ RAPIDJSON_STRING_(Disallowed, 'd', 'i', 's', 'a', 'l', 'l', 'o', 'w', 'e', 'd')
+ RAPIDJSON_STRING_(Missing, 'm', 'i', 's', 's', 'i', 'n', 'g')
+ RAPIDJSON_STRING_(Errors, 'e', 'r', 'r', 'o', 'r', 's')
+ RAPIDJSON_STRING_(Duplicates, 'd', 'u', 'p', 'l', 'i', 'c', 'a', 't', 'e', 's')
+
+#undef RAPIDJSON_STRING_
+
+#if RAPIDJSON_SCHEMA_VERBOSE
+#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_() \
+RAPIDJSON_MULTILINEMACRO_BEGIN\
+ *documentStack_.template Push<Ch>() = '\0';\
+ documentStack_.template Pop<Ch>(1);\
+ internal::PrintInvalidDocument(documentStack_.template Bottom<Ch>());\
+RAPIDJSON_MULTILINEMACRO_END
+#else
+#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_()
+#endif
+
+#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_(method, arg1)\
+ if (!valid_) return false; \
+ if (!BeginValue() || !CurrentSchema().method arg1) {\
+ RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_();\
+ return valid_ = false;\
+ }
+
+#define RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(method, arg2)\
+ for (Context* context = schemaStack_.template Bottom<Context>(); context != schemaStack_.template End<Context>(); context++) {\
+ if (context->hasher)\
+ static_cast<HasherType*>(context->hasher)->method arg2;\
+ if (context->validators)\
+ for (SizeType i_ = 0; i_ < context->validatorCount; i_++)\
+ static_cast<GenericSchemaValidator*>(context->validators[i_])->method arg2;\
+ if (context->patternPropertiesValidators)\
+ for (SizeType i_ = 0; i_ < context->patternPropertiesValidatorCount; i_++)\
+ static_cast<GenericSchemaValidator*>(context->patternPropertiesValidators[i_])->method arg2;\
+ }
+
+#define RAPIDJSON_SCHEMA_HANDLE_END_(method, arg2)\
+ return valid_ = EndValue() && (!outputHandler_ || outputHandler_->method arg2)
+
+#define RAPIDJSON_SCHEMA_HANDLE_VALUE_(method, arg1, arg2) \
+ RAPIDJSON_SCHEMA_HANDLE_BEGIN_ (method, arg1);\
+ RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(method, arg2);\
+ RAPIDJSON_SCHEMA_HANDLE_END_ (method, arg2)
+
+ bool Null() { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Null, (CurrentContext()), ( )); }
+ bool Bool(bool b) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Bool, (CurrentContext(), b), (b)); }
+ bool Int(int i) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Int, (CurrentContext(), i), (i)); }
+ bool Uint(unsigned u) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Uint, (CurrentContext(), u), (u)); }
+ bool Int64(int64_t i) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Int64, (CurrentContext(), i), (i)); }
+ bool Uint64(uint64_t u) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Uint64, (CurrentContext(), u), (u)); }
+ bool Double(double d) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Double, (CurrentContext(), d), (d)); }
+ bool RawNumber(const Ch* str, SizeType length, bool copy)
+ { RAPIDJSON_SCHEMA_HANDLE_VALUE_(String, (CurrentContext(), str, length, copy), (str, length, copy)); }
+ bool String(const Ch* str, SizeType length, bool copy)
+ { RAPIDJSON_SCHEMA_HANDLE_VALUE_(String, (CurrentContext(), str, length, copy), (str, length, copy)); }
+
+ bool StartObject() {
+ RAPIDJSON_SCHEMA_HANDLE_BEGIN_(StartObject, (CurrentContext()));
+ RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(StartObject, ());
+ return valid_ = !outputHandler_ || outputHandler_->StartObject();
+ }
+
+ bool Key(const Ch* str, SizeType len, bool copy) {
+ if (!valid_) return false;
+ AppendToken(str, len);
+ if (!CurrentSchema().Key(CurrentContext(), str, len, copy)) return valid_ = false;
+ RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(Key, (str, len, copy));
+ return valid_ = !outputHandler_ || outputHandler_->Key(str, len, copy);
+ }
+
+ bool EndObject(SizeType memberCount) {
+ if (!valid_) return false;
+ RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(EndObject, (memberCount));
+ if (!CurrentSchema().EndObject(CurrentContext(), memberCount)) return valid_ = false;
+ RAPIDJSON_SCHEMA_HANDLE_END_(EndObject, (memberCount));
+ }
+
+ bool StartArray() {
+ RAPIDJSON_SCHEMA_HANDLE_BEGIN_(StartArray, (CurrentContext()));
+ RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(StartArray, ());
+ return valid_ = !outputHandler_ || outputHandler_->StartArray();
+ }
+
+ bool EndArray(SizeType elementCount) {
+ if (!valid_) return false;
+ RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(EndArray, (elementCount));
+ if (!CurrentSchema().EndArray(CurrentContext(), elementCount)) return valid_ = false;
+ RAPIDJSON_SCHEMA_HANDLE_END_(EndArray, (elementCount));
+ }
+
+#undef RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_
+#undef RAPIDJSON_SCHEMA_HANDLE_BEGIN_
+#undef RAPIDJSON_SCHEMA_HANDLE_PARALLEL_
+#undef RAPIDJSON_SCHEMA_HANDLE_VALUE_
+
+ // Implementation of ISchemaStateFactory<SchemaType>
+ virtual ISchemaValidator* CreateSchemaValidator(const SchemaType& root) {
+ return new (GetStateAllocator().Malloc(sizeof(GenericSchemaValidator))) GenericSchemaValidator(*schemaDocument_, root, documentStack_.template Bottom<char>(), documentStack_.GetSize(),
+#if RAPIDJSON_SCHEMA_VERBOSE
+ depth_ + 1,
+#endif
+ &GetStateAllocator());
+ }
+
+ virtual void DestroySchemaValidator(ISchemaValidator* validator) {
+ GenericSchemaValidator* v = static_cast<GenericSchemaValidator*>(validator);
+ v->~GenericSchemaValidator();
+ StateAllocator::Free(v);
+ }
+
+ virtual void* CreateHasher() {
+ return new (GetStateAllocator().Malloc(sizeof(HasherType))) HasherType(&GetStateAllocator());
+ }
+
+ virtual uint64_t GetHashCode(void* hasher) {
+ return static_cast<HasherType*>(hasher)->GetHashCode();
+ }
+
+ virtual void DestroryHasher(void* hasher) {
+ HasherType* h = static_cast<HasherType*>(hasher);
+ h->~HasherType();
+ StateAllocator::Free(h);
+ }
+
+ virtual void* MallocState(size_t size) {
+ return GetStateAllocator().Malloc(size);
+ }
+
+ virtual void FreeState(void* p) {
+ StateAllocator::Free(p);
+ }
+
+private:
+ typedef typename SchemaType::Context Context;
+ typedef GenericValue<UTF8<>, StateAllocator> HashCodeArray;
+ typedef internal::Hasher<EncodingType, StateAllocator> HasherType;
+
+ GenericSchemaValidator(
+ const SchemaDocumentType& schemaDocument,
+ const SchemaType& root,
+ const char* basePath, size_t basePathSize,
+#if RAPIDJSON_SCHEMA_VERBOSE
+ unsigned depth,
+#endif
+ StateAllocator* allocator = 0,
+ size_t schemaStackCapacity = kDefaultSchemaStackCapacity,
+ size_t documentStackCapacity = kDefaultDocumentStackCapacity)
+ :
+ schemaDocument_(&schemaDocument),
+ root_(root),
+ stateAllocator_(allocator),
+ ownStateAllocator_(0),
+ schemaStack_(allocator, schemaStackCapacity),
+ documentStack_(allocator, documentStackCapacity),
+ outputHandler_(0),
+ error_(kObjectType),
+ currentError_(),
+ missingDependents_(),
+ valid_(true)
+#if RAPIDJSON_SCHEMA_VERBOSE
+ , depth_(depth)
+#endif
+ {
+ if (basePath && basePathSize)
+ memcpy(documentStack_.template Push<char>(basePathSize), basePath, basePathSize);
+ }
+
+ StateAllocator& GetStateAllocator() {
+ if (!stateAllocator_)
+ stateAllocator_ = ownStateAllocator_ = RAPIDJSON_NEW(StateAllocator)();
+ return *stateAllocator_;
+ }
+
+ bool BeginValue() {
+ if (schemaStack_.Empty())
+ PushSchema(root_);
+ else {
+ if (CurrentContext().inArray)
+ internal::TokenHelper<internal::Stack<StateAllocator>, Ch>::AppendIndexToken(documentStack_, CurrentContext().arrayElementIndex);
+
+ if (!CurrentSchema().BeginValue(CurrentContext()))
+ return false;
+
+ SizeType count = CurrentContext().patternPropertiesSchemaCount;
+ const SchemaType** sa = CurrentContext().patternPropertiesSchemas;
+ typename Context::PatternValidatorType patternValidatorType = CurrentContext().valuePatternValidatorType;
+ bool valueUniqueness = CurrentContext().valueUniqueness;
+ RAPIDJSON_ASSERT(CurrentContext().valueSchema);
+ PushSchema(*CurrentContext().valueSchema);
+
+ if (count > 0) {
+ CurrentContext().objectPatternValidatorType = patternValidatorType;
+ ISchemaValidator**& va = CurrentContext().patternPropertiesValidators;
+ SizeType& validatorCount = CurrentContext().patternPropertiesValidatorCount;
+ va = static_cast<ISchemaValidator**>(MallocState(sizeof(ISchemaValidator*) * count));
+ for (SizeType i = 0; i < count; i++)
+ va[validatorCount++] = CreateSchemaValidator(*sa[i]);
+ }
+
+ CurrentContext().arrayUniqueness = valueUniqueness;
+ }
+ return true;
+ }
+
+ bool EndValue() {
+ if (!CurrentSchema().EndValue(CurrentContext()))
+ return false;
+
+#if RAPIDJSON_SCHEMA_VERBOSE
+ GenericStringBuffer<EncodingType> sb;
+ schemaDocument_->GetPointer(&CurrentSchema()).Stringify(sb);
+
+ *documentStack_.template Push<Ch>() = '\0';
+ documentStack_.template Pop<Ch>(1);
+ internal::PrintValidatorPointers(depth_, sb.GetString(), documentStack_.template Bottom<Ch>());
+#endif
+
+ uint64_t h = CurrentContext().arrayUniqueness ? static_cast<HasherType*>(CurrentContext().hasher)->GetHashCode() : 0;
+
+ PopSchema();
+
+ if (!schemaStack_.Empty()) {
+ Context& context = CurrentContext();
+ if (context.valueUniqueness) {
+ HashCodeArray* a = static_cast<HashCodeArray*>(context.arrayElementHashCodes);
+ if (!a)
+ CurrentContext().arrayElementHashCodes = a = new (GetStateAllocator().Malloc(sizeof(HashCodeArray))) HashCodeArray(kArrayType);
+ for (typename HashCodeArray::ConstValueIterator itr = a->Begin(); itr != a->End(); ++itr)
+ if (itr->GetUint64() == h) {
+ DuplicateItems(static_cast<SizeType>(itr - a->Begin()), a->Size());
+ RAPIDJSON_INVALID_KEYWORD_RETURN(SchemaType::GetUniqueItemsString());
+ }
+ a->PushBack(h, GetStateAllocator());
+ }
+ }
+
+ // Remove the last token of document pointer
+ while (!documentStack_.Empty() && *documentStack_.template Pop<Ch>(1) != '/')
+ ;
+
+ return true;
+ }
+
+ void AppendToken(const Ch* str, SizeType len) {
+ documentStack_.template Reserve<Ch>(1 + len * 2); // worst case all characters are escaped as two characters
+ *documentStack_.template PushUnsafe<Ch>() = '/';
+ for (SizeType i = 0; i < len; i++) {
+ if (str[i] == '~') {
+ *documentStack_.template PushUnsafe<Ch>() = '~';
+ *documentStack_.template PushUnsafe<Ch>() = '0';
+ }
+ else if (str[i] == '/') {
+ *documentStack_.template PushUnsafe<Ch>() = '~';
+ *documentStack_.template PushUnsafe<Ch>() = '1';
+ }
+ else
+ *documentStack_.template PushUnsafe<Ch>() = str[i];
+ }
+ }
+
+ RAPIDJSON_FORCEINLINE void PushSchema(const SchemaType& schema) { new (schemaStack_.template Push<Context>()) Context(*this, *this, &schema); }
+
+ RAPIDJSON_FORCEINLINE void PopSchema() {
+ Context* c = schemaStack_.template Pop<Context>(1);
+ if (HashCodeArray* a = static_cast<HashCodeArray*>(c->arrayElementHashCodes)) {
+ a->~HashCodeArray();
+ StateAllocator::Free(a);
+ }
+ c->~Context();
+ }
+
+ void AddErrorLocation(ValueType& result, bool parent) {
+ GenericStringBuffer<EncodingType> sb;
+ PointerType instancePointer = GetInvalidDocumentPointer();
+ ((parent && instancePointer.GetTokenCount() > 0)
+ ? PointerType(instancePointer.GetTokens(), instancePointer.GetTokenCount() - 1)
+ : instancePointer).StringifyUriFragment(sb);
+ ValueType instanceRef(sb.GetString(), static_cast<SizeType>(sb.GetSize() / sizeof(Ch)),
+ GetStateAllocator());
+ result.AddMember(GetInstanceRefString(), instanceRef, GetStateAllocator());
+ sb.Clear();
+ memcpy(sb.Push(CurrentSchema().GetURI().GetStringLength()),
+ CurrentSchema().GetURI().GetString(),
+ CurrentSchema().GetURI().GetStringLength() * sizeof(Ch));
+ GetInvalidSchemaPointer().StringifyUriFragment(sb);
+ ValueType schemaRef(sb.GetString(), static_cast<SizeType>(sb.GetSize() / sizeof(Ch)),
+ GetStateAllocator());
+ result.AddMember(GetSchemaRefString(), schemaRef, GetStateAllocator());
+ }
+
+ void AddError(ValueType& keyword, ValueType& error) {
+ typename ValueType::MemberIterator member = error_.FindMember(keyword);
+ if (member == error_.MemberEnd())
+ error_.AddMember(keyword, error, GetStateAllocator());
+ else {
+ if (member->value.IsObject()) {
+ ValueType errors(kArrayType);
+ errors.PushBack(member->value, GetStateAllocator());
+ member->value = errors;
+ }
+ member->value.PushBack(error, GetStateAllocator());
+ }
+ }
+
+ void AddCurrentError(const typename SchemaType::ValueType& keyword, bool parent = false) {
+ AddErrorLocation(currentError_, parent);
+ AddError(ValueType(keyword, GetStateAllocator(), false).Move(), currentError_);
+ }
+
+ void MergeError(ValueType& other) {
+ for (typename ValueType::MemberIterator it = other.MemberBegin(), end = other.MemberEnd(); it != end; ++it) {
+ AddError(it->name, it->value);
+ }
+ }
+
+ void AddNumberError(const typename SchemaType::ValueType& keyword, ValueType& actual, const SValue& expected,
+ const typename SchemaType::ValueType& (*exclusive)() = 0) {
+ currentError_.SetObject();
+ currentError_.AddMember(GetActualString(), actual, GetStateAllocator());
+ currentError_.AddMember(GetExpectedString(), ValueType(expected, GetStateAllocator()).Move(), GetStateAllocator());
+ if (exclusive)
+ currentError_.AddMember(ValueType(exclusive(), GetStateAllocator()).Move(), true, GetStateAllocator());
+ AddCurrentError(keyword);
+ }
+
+ void AddErrorArray(const typename SchemaType::ValueType& keyword,
+ ISchemaValidator** subvalidators, SizeType count) {
+ ValueType errors(kArrayType);
+ for (SizeType i = 0; i < count; ++i)
+ errors.PushBack(static_cast<GenericSchemaValidator*>(subvalidators[i])->GetError(), GetStateAllocator());
+ currentError_.SetObject();
+ currentError_.AddMember(GetErrorsString(), errors, GetStateAllocator());
+ AddCurrentError(keyword);
+ }
+
+ const SchemaType& CurrentSchema() const { return *schemaStack_.template Top<Context>()->schema; }
+ Context& CurrentContext() { return *schemaStack_.template Top<Context>(); }
+ const Context& CurrentContext() const { return *schemaStack_.template Top<Context>(); }
+
+ static const size_t kDefaultSchemaStackCapacity = 1024;
+ static const size_t kDefaultDocumentStackCapacity = 256;
+ const SchemaDocumentType* schemaDocument_;
+ const SchemaType& root_;
+ StateAllocator* stateAllocator_;
+ StateAllocator* ownStateAllocator_;
+ internal::Stack<StateAllocator> schemaStack_; //!< stack to store the current path of schema (BaseSchemaType *)
+ internal::Stack<StateAllocator> documentStack_; //!< stack to store the current path of validating document (Ch)
+ OutputHandler* outputHandler_;
+ ValueType error_;
+ ValueType currentError_;
+ ValueType missingDependents_;
+ bool valid_;
+#if RAPIDJSON_SCHEMA_VERBOSE
+ unsigned depth_;
+#endif
+};
+
+typedef GenericSchemaValidator<SchemaDocument> SchemaValidator;
+
+///////////////////////////////////////////////////////////////////////////////
+// SchemaValidatingReader
+
+//! A helper class for parsing with validation.
+/*!
+ This helper class is a functor, designed as a parameter of \ref GenericDocument::Populate().
+
+ \tparam parseFlags Combination of \ref ParseFlag.
+ \tparam InputStream Type of input stream, implementing Stream concept.
+ \tparam SourceEncoding Encoding of the input stream.
+ \tparam SchemaDocumentType Type of schema document.
+ \tparam StackAllocator Allocator type for stack.
+*/
+template <
+ unsigned parseFlags,
+ typename InputStream,
+ typename SourceEncoding,
+ typename SchemaDocumentType = SchemaDocument,
+ typename StackAllocator = CrtAllocator>
+class SchemaValidatingReader {
+public:
+ typedef typename SchemaDocumentType::PointerType PointerType;
+ typedef typename InputStream::Ch Ch;
+ typedef GenericValue<SourceEncoding, StackAllocator> ValueType;
+
+ //! Constructor
+ /*!
+ \param is Input stream.
+ \param sd Schema document.
+ */
+ SchemaValidatingReader(InputStream& is, const SchemaDocumentType& sd) : is_(is), sd_(sd), invalidSchemaKeyword_(), error_(kObjectType), isValid_(true) {}
+
+ template <typename Handler>
+ bool operator()(Handler& handler) {
+ GenericReader<SourceEncoding, typename SchemaDocumentType::EncodingType, StackAllocator> reader;
+ GenericSchemaValidator<SchemaDocumentType, Handler> validator(sd_, handler);
+ parseResult_ = reader.template Parse<parseFlags>(is_, validator);
+
+ isValid_ = validator.IsValid();
+ if (isValid_) {
+ invalidSchemaPointer_ = PointerType();
+ invalidSchemaKeyword_ = 0;
+ invalidDocumentPointer_ = PointerType();
+ error_.SetObject();
+ }
+ else {
+ invalidSchemaPointer_ = validator.GetInvalidSchemaPointer();
+ invalidSchemaKeyword_ = validator.GetInvalidSchemaKeyword();
+ invalidDocumentPointer_ = validator.GetInvalidDocumentPointer();
+ error_.CopyFrom(validator.GetError(), allocator_);
+ }
+
+ return parseResult_;
+ }
+
+ const ParseResult& GetParseResult() const { return parseResult_; }
+ bool IsValid() const { return isValid_; }
+ const PointerType& GetInvalidSchemaPointer() const { return invalidSchemaPointer_; }
+ const Ch* GetInvalidSchemaKeyword() const { return invalidSchemaKeyword_; }
+ const PointerType& GetInvalidDocumentPointer() const { return invalidDocumentPointer_; }
+ const ValueType& GetError() const { return error_; }
+
+private:
+ InputStream& is_;
+ const SchemaDocumentType& sd_;
+
+ ParseResult parseResult_;
+ PointerType invalidSchemaPointer_;
+ const Ch* invalidSchemaKeyword_;
+ PointerType invalidDocumentPointer_;
+ StackAllocator allocator_;
+ ValueType error_;
+ bool isValid_;
+};
+
+RAPIDJSON_NAMESPACE_END
+RAPIDJSON_DIAG_POP
+
+#endif // RAPIDJSON_SCHEMA_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_stream.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_stream.h
new file mode 100644
index 00000000..da28a998
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_stream.h
@@ -0,0 +1,223 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#include "lottie_rapidjson_rapidjson.h"
+
+#ifndef RAPIDJSON_STREAM_H_
+#define RAPIDJSON_STREAM_H_
+
+#include "lottie_rapidjson_encodings.h"
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+///////////////////////////////////////////////////////////////////////////////
+// Stream
+
+/*! \class rapidjson::Stream
+ \brief Concept for reading and writing characters.
+
+ For read-only stream, no need to implement PutBegin(), Put(), Flush() and PutEnd().
+
+ For write-only stream, only need to implement Put() and Flush().
+
+\code
+concept Stream {
+ typename Ch; //!< Character type of the stream.
+
+ //! Read the current character from stream without moving the read cursor.
+ Ch Peek() const;
+
+ //! Read the current character from stream and moving the read cursor to next character.
+ Ch Take();
+
+ //! Get the current read cursor.
+ //! \return Number of characters read from start.
+ size_t Tell();
+
+ //! Begin writing operation at the current read pointer.
+ //! \return The begin writer pointer.
+ Ch* PutBegin();
+
+ //! Write a character.
+ void Put(Ch c);
+
+ //! Flush the buffer.
+ void Flush();
+
+ //! End the writing operation.
+ //! \param begin The begin write pointer returned by PutBegin().
+ //! \return Number of characters written.
+ size_t PutEnd(Ch* begin);
+}
+\endcode
+*/
+
+//! Provides additional information for stream.
+/*!
+ By using traits pattern, this type provides a default configuration for stream.
+ For custom stream, this type can be specialized for other configuration.
+ See TEST(Reader, CustomStringStream) in readertest.cpp for example.
+*/
+template<typename Stream>
+struct StreamTraits {
+ //! Whether to make local copy of stream for optimization during parsing.
+ /*!
+ By default, for safety, streams do not use local copy optimization.
+ Stream that can be copied fast should specialize this, like StreamTraits<StringStream>.
+ */
+ enum { copyOptimization = 0 };
+};
+
+//! Reserve n characters for writing to a stream.
+template<typename Stream>
+inline void PutReserve(Stream& stream, size_t count) {
+ (void)stream;
+ (void)count;
+}
+
+//! Write character to a stream, presuming buffer is reserved.
+template<typename Stream>
+inline void PutUnsafe(Stream& stream, typename Stream::Ch c) {
+ stream.Put(c);
+}
+
+//! Put N copies of a character to a stream.
+template<typename Stream, typename Ch>
+inline void PutN(Stream& stream, Ch c, size_t n) {
+ PutReserve(stream, n);
+ for (size_t i = 0; i < n; i++)
+ PutUnsafe(stream, c);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// GenericStreamWrapper
+
+//! A Stream Wrapper
+/*! \tThis string stream is a wrapper for any stream by just forwarding any
+ \treceived message to the origin stream.
+ \note implements Stream concept
+*/
+
+#if defined(_MSC_VER) && _MSC_VER <= 1800
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(4702) // unreachable code
+RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
+#endif
+
+template <typename InputStream, typename Encoding = UTF8<> >
+class GenericStreamWrapper {
+public:
+ typedef typename Encoding::Ch Ch;
+ GenericStreamWrapper(InputStream& is): is_(is) {}
+
+ Ch Peek() const { return is_.Peek(); }
+ Ch Take() { return is_.Take(); }
+ size_t Tell() { return is_.Tell(); }
+ Ch* PutBegin() { return is_.PutBegin(); }
+ void Put(Ch ch) { is_.Put(ch); }
+ void Flush() { is_.Flush(); }
+ size_t PutEnd(Ch* ch) { return is_.PutEnd(ch); }
+
+ // wrapper for MemoryStream
+ const Ch* Peek4() const { return is_.Peek4(); }
+
+ // wrapper for AutoUTFInputStream
+ UTFType GetType() const { return is_.GetType(); }
+ bool HasBOM() const { return is_.HasBOM(); }
+
+protected:
+ InputStream& is_;
+};
+
+#if defined(_MSC_VER) && _MSC_VER <= 1800
+RAPIDJSON_DIAG_POP
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// StringStream
+
+//! Read-only string stream.
+/*! \note implements Stream concept
+*/
+template <typename Encoding>
+struct GenericStringStream {
+ typedef typename Encoding::Ch Ch;
+
+ GenericStringStream(const Ch *src) : src_(src), head_(src) {}
+
+ Ch Peek() const { return *src_; }
+ Ch Take() { return *src_++; }
+ size_t Tell() const { return static_cast<size_t>(src_ - head_); }
+
+ Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
+ void Put(Ch) { RAPIDJSON_ASSERT(false); }
+ void Flush() { RAPIDJSON_ASSERT(false); }
+ size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
+
+ const Ch* src_; //!< Current read position.
+ const Ch* head_; //!< Original head of the string.
+};
+
+template <typename Encoding>
+struct StreamTraits<GenericStringStream<Encoding> > {
+ enum { copyOptimization = 1 };
+};
+
+//! String stream with UTF8 encoding.
+typedef GenericStringStream<UTF8<> > StringStream;
+
+///////////////////////////////////////////////////////////////////////////////
+// InsituStringStream
+
+//! A read-write string stream.
+/*! This string stream is particularly designed for in-situ parsing.
+ \note implements Stream concept
+*/
+template <typename Encoding>
+struct GenericInsituStringStream {
+ typedef typename Encoding::Ch Ch;
+
+ GenericInsituStringStream(Ch *src) : src_(src), dst_(0), head_(src) {}
+
+ // Read
+ Ch Peek() { return *src_; }
+ Ch Take() { return *src_++; }
+ size_t Tell() { return static_cast<size_t>(src_ - head_); }
+
+ // Write
+ void Put(Ch c) { RAPIDJSON_ASSERT(dst_ != 0); *dst_++ = c; }
+
+ Ch* PutBegin() { return dst_ = src_; }
+ size_t PutEnd(Ch* begin) { return static_cast<size_t>(dst_ - begin); }
+ void Flush() {}
+
+ Ch* Push(size_t count) { Ch* begin = dst_; dst_ += count; return begin; }
+ void Pop(size_t count) { dst_ -= count; }
+
+ Ch* src_;
+ Ch* dst_;
+ Ch* head_;
+};
+
+template <typename Encoding>
+struct StreamTraits<GenericInsituStringStream<Encoding> > {
+ enum { copyOptimization = 1 };
+};
+
+//! Insitu string stream with UTF8 encoding.
+typedef GenericInsituStringStream<UTF8<> > InsituStringStream;
+
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_STREAM_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_stringbuffer.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_stringbuffer.h
new file mode 100644
index 00000000..295d6a2f
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_stringbuffer.h
@@ -0,0 +1,121 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_STRINGBUFFER_H_
+#define RAPIDJSON_STRINGBUFFER_H_
+
+#include "lottie_rapidjson_stream.h"
+#include "lottie_rapidjson_internal_stack.h"
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+#include <utility> // std::move
+#endif
+
+#include "lottie_rapidjson_internal_stack.h"
+
+#if defined(__clang__)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(c++98-compat)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+//! Represents an in-memory output stream.
+/*!
+ \tparam Encoding Encoding of the stream.
+ \tparam Allocator type for allocating memory buffer.
+ \note implements Stream concept
+*/
+template <typename Encoding, typename Allocator = CrtAllocator>
+class GenericStringBuffer {
+public:
+ typedef typename Encoding::Ch Ch;
+
+ GenericStringBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {}
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ GenericStringBuffer(GenericStringBuffer&& rhs) : stack_(std::move(rhs.stack_)) {}
+ GenericStringBuffer& operator=(GenericStringBuffer&& rhs) {
+ if (&rhs != this)
+ stack_ = std::move(rhs.stack_);
+ return *this;
+ }
+#endif
+
+ void Put(Ch c) { *stack_.template Push<Ch>() = c; }
+ void PutUnsafe(Ch c) { *stack_.template PushUnsafe<Ch>() = c; }
+ void Flush() {}
+
+ void Clear() { stack_.Clear(); }
+ void ShrinkToFit() {
+ // Push and pop a null terminator. This is safe.
+ *stack_.template Push<Ch>() = '\0';
+ stack_.ShrinkToFit();
+ stack_.template Pop<Ch>(1);
+ }
+
+ void Reserve(size_t count) { stack_.template Reserve<Ch>(count); }
+ Ch* Push(size_t count) { return stack_.template Push<Ch>(count); }
+ Ch* PushUnsafe(size_t count) { return stack_.template PushUnsafe<Ch>(count); }
+ void Pop(size_t count) { stack_.template Pop<Ch>(count); }
+
+ const Ch* GetString() const {
+ // Push and pop a null terminator. This is safe.
+ *stack_.template Push<Ch>() = '\0';
+ stack_.template Pop<Ch>(1);
+
+ return stack_.template Bottom<Ch>();
+ }
+
+ //! Get the size of string in bytes in the string buffer.
+ size_t GetSize() const { return stack_.GetSize(); }
+
+ //! Get the length of string in Ch in the string buffer.
+ size_t GetLength() const { return stack_.GetSize() / sizeof(Ch); }
+
+ static const size_t kDefaultCapacity = 256;
+ mutable internal::Stack<Allocator> stack_;
+
+private:
+ // Prohibit copy constructor & assignment operator.
+ GenericStringBuffer(const GenericStringBuffer&);
+ GenericStringBuffer& operator=(const GenericStringBuffer&);
+};
+
+//! String buffer with UTF8 encoding
+typedef GenericStringBuffer<UTF8<> > StringBuffer;
+
+template<typename Encoding, typename Allocator>
+inline void PutReserve(GenericStringBuffer<Encoding, Allocator>& stream, size_t count) {
+ stream.Reserve(count);
+}
+
+template<typename Encoding, typename Allocator>
+inline void PutUnsafe(GenericStringBuffer<Encoding, Allocator>& stream, typename Encoding::Ch c) {
+ stream.PutUnsafe(c);
+}
+
+//! Implement specialized version of PutN() with memset() for better performance.
+template<>
+inline void PutN(GenericStringBuffer<UTF8<> >& stream, char c, size_t n) {
+ std::memset(stream.stack_.Push<char>(n), c, n * sizeof(c));
+}
+
+RAPIDJSON_NAMESPACE_END
+
+#if defined(__clang__)
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_STRINGBUFFER_H_
diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_writer.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_writer.h
new file mode 100644
index 00000000..d494cd47
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_writer.h
@@ -0,0 +1,710 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_WRITER_H_
+#define RAPIDJSON_WRITER_H_
+
+#include "lottie_rapidjson_stream.h"
+#include "lottie_rapidjson_internal_clzll.h"
+#include "lottie_rapidjson_internal_meta.h"
+#include "lottie_rapidjson_internal_stack.h"
+#include "lottie_rapidjson_internal_strfunc.h"
+#include "lottie_rapidjson_internal_dtoa.h"
+#include "lottie_rapidjson_internal_itoa.h"
+#include "lottie_rapidjson_stringbuffer.h"
+#include <new> // placement new
+
+#if defined(RAPIDJSON_SIMD) && defined(_MSC_VER)
+#include <intrin.h>
+#pragma intrinsic(_BitScanForward)
+#endif
+#ifdef RAPIDJSON_SSE42
+#include <nmmintrin.h>
+#elif defined(RAPIDJSON_SSE2)
+#include <emmintrin.h>
+#elif defined(RAPIDJSON_NEON)
+#include <arm_neon.h>
+#endif
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(padded)
+RAPIDJSON_DIAG_OFF(unreachable-code)
+RAPIDJSON_DIAG_OFF(c++98-compat)
+#elif defined(_MSC_VER)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+
+///////////////////////////////////////////////////////////////////////////////
+// WriteFlag
+
+/*! \def RAPIDJSON_WRITE_DEFAULT_FLAGS
+ \ingroup RAPIDJSON_CONFIG
+ \brief User-defined kWriteDefaultFlags definition.
+
+ User can define this as any \c WriteFlag combinations.
+*/
+#ifndef RAPIDJSON_WRITE_DEFAULT_FLAGS
+#define RAPIDJSON_WRITE_DEFAULT_FLAGS kWriteNoFlags
+#endif
+
+//! Combination of writeFlags
+enum WriteFlag {
+ kWriteNoFlags = 0, //!< No flags are set.
+ kWriteValidateEncodingFlag = 1, //!< Validate encoding of JSON strings.
+ kWriteNanAndInfFlag = 2, //!< Allow writing of Infinity, -Infinity and NaN.
+ kWriteDefaultFlags = RAPIDJSON_WRITE_DEFAULT_FLAGS //!< Default write flags. Can be customized by defining RAPIDJSON_WRITE_DEFAULT_FLAGS
+};
+
+//! JSON writer
+/*! Writer implements the concept Handler.
+ It generates JSON text by events to an output os.
+
+ User may programmatically calls the functions of a writer to generate JSON text.
+
+ On the other side, a writer can also be passed to objects that generates events,
+
+ for example Reader::Parse() and Document::Accept().
+
+ \tparam OutputStream Type of output stream.
+ \tparam SourceEncoding Encoding of source string.
+ \tparam TargetEncoding Encoding of output stream.
+ \tparam StackAllocator Type of allocator for allocating memory of stack.
+ \note implements Handler concept
+*/
+template<typename OutputStream, typename SourceEncoding = UTF8<>, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator, unsigned writeFlags = kWriteDefaultFlags>
+class Writer {
+public:
+ typedef typename SourceEncoding::Ch Ch;
+
+ static const int kDefaultMaxDecimalPlaces = 324;
+
+ //! Constructor
+ /*! \param os Output stream.
+ \param stackAllocator User supplied allocator. If it is null, it will create a private one.
+ \param levelDepth Initial capacity of stack.
+ */
+ explicit
+ Writer(OutputStream& os, StackAllocator* stackAllocator = 0, size_t levelDepth = kDefaultLevelDepth) :
+ os_(&os), level_stack_(stackAllocator, levelDepth * sizeof(Level)), maxDecimalPlaces_(kDefaultMaxDecimalPlaces), hasRoot_(false) {}
+
+ explicit
+ Writer(StackAllocator* allocator = 0, size_t levelDepth = kDefaultLevelDepth) :
+ os_(0), level_stack_(allocator, levelDepth * sizeof(Level)), maxDecimalPlaces_(kDefaultMaxDecimalPlaces), hasRoot_(false) {}
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ Writer(Writer&& rhs) :
+ os_(rhs.os_), level_stack_(std::move(rhs.level_stack_)), maxDecimalPlaces_(rhs.maxDecimalPlaces_), hasRoot_(rhs.hasRoot_) {
+ rhs.os_ = 0;
+ }
+#endif
+
+ //! Reset the writer with a new stream.
+ /*!
+ This function reset the writer with a new stream and default settings,
+ in order to make a Writer object reusable for output multiple JSONs.
+
+ \param os New output stream.
+ \code
+ Writer<OutputStream> writer(os1);
+ writer.StartObject();
+ // ...
+ writer.EndObject();
+
+ writer.Reset(os2);
+ writer.StartObject();
+ // ...
+ writer.EndObject();
+ \endcode
+ */
+ void Reset(OutputStream& os) {
+ os_ = &os;
+ hasRoot_ = false;
+ level_stack_.Clear();
+ }
+
+ //! Checks whether the output is a complete JSON.
+ /*!
+ A complete JSON has a complete root object or array.
+ */
+ bool IsComplete() const {
+ return hasRoot_ && level_stack_.Empty();
+ }
+
+ int GetMaxDecimalPlaces() const {
+ return maxDecimalPlaces_;
+ }
+
+ //! Sets the maximum number of decimal places for double output.
+ /*!
+ This setting truncates the output with specified number of decimal places.
+
+ For example,
+
+ \code
+ writer.SetMaxDecimalPlaces(3);
+ writer.StartArray();
+ writer.Double(0.12345); // "0.123"
+ writer.Double(0.0001); // "0.0"
+ writer.Double(1.234567890123456e30); // "1.234567890123456e30" (do not truncate significand for positive exponent)
+ writer.Double(1.23e-4); // "0.0" (do truncate significand for negative exponent)
+ writer.EndArray();
+ \endcode
+
+ The default setting does not truncate any decimal places. You can restore to this setting by calling
+ \code
+ writer.SetMaxDecimalPlaces(Writer::kDefaultMaxDecimalPlaces);
+ \endcode
+ */
+ void SetMaxDecimalPlaces(int maxDecimalPlaces) {
+ maxDecimalPlaces_ = maxDecimalPlaces;
+ }
+
+ /*!@name Implementation of Handler
+ \see Handler
+ */
+ //@{
+
+ bool Null() { Prefix(kNullType); return EndValue(WriteNull()); }
+ bool Bool(bool b) { Prefix(b ? kTrueType : kFalseType); return EndValue(WriteBool(b)); }
+ bool Int(int i) { Prefix(kNumberType); return EndValue(WriteInt(i)); }
+ bool Uint(unsigned u) { Prefix(kNumberType); return EndValue(WriteUint(u)); }
+ bool Int64(int64_t i64) { Prefix(kNumberType); return EndValue(WriteInt64(i64)); }
+ bool Uint64(uint64_t u64) { Prefix(kNumberType); return EndValue(WriteUint64(u64)); }
+
+ //! Writes the given \c double value to the stream
+ /*!
+ \param d The value to be written.
+ \return Whether it is succeed.
+ */
+ bool Double(double d) { Prefix(kNumberType); return EndValue(WriteDouble(d)); }
+
+ bool RawNumber(const Ch* str, SizeType length, bool copy = false) {
+ RAPIDJSON_ASSERT(str != 0);
+ (void)copy;
+ Prefix(kNumberType);
+ return EndValue(WriteString(str, length));
+ }
+
+ bool String(const Ch* str, SizeType length, bool copy = false) {
+ RAPIDJSON_ASSERT(str != 0);
+ (void)copy;
+ Prefix(kStringType);
+ return EndValue(WriteString(str, length));
+ }
+
+#if RAPIDJSON_HAS_STDSTRING
+ bool String(const std::basic_string<Ch>& str) {
+ return String(str.data(), SizeType(str.size()));
+ }
+#endif
+
+ bool StartObject() {
+ Prefix(kObjectType);
+ new (level_stack_.template Push<Level>()) Level(false);
+ return WriteStartObject();
+ }
+
+ bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); }
+
+#if RAPIDJSON_HAS_STDSTRING
+ bool Key(const std::basic_string<Ch>& str)
+ {
+ return Key(str.data(), SizeType(str.size()));
+ }
+#endif
+
+ bool EndObject(SizeType memberCount = 0) {
+ (void)memberCount;
+ RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level)); // not inside an Object
+ RAPIDJSON_ASSERT(!level_stack_.template Top<Level>()->inArray); // currently inside an Array, not Object
+ RAPIDJSON_ASSERT(0 == level_stack_.template Top<Level>()->valueCount % 2); // Object has a Key without a Value
+ level_stack_.template Pop<Level>(1);
+ return EndValue(WriteEndObject());
+ }
+
+ bool StartArray() {
+ Prefix(kArrayType);
+ new (level_stack_.template Push<Level>()) Level(true);
+ return WriteStartArray();
+ }
+
+ bool EndArray(SizeType elementCount = 0) {
+ (void)elementCount;
+ RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level));
+ RAPIDJSON_ASSERT(level_stack_.template Top<Level>()->inArray);
+ level_stack_.template Pop<Level>(1);
+ return EndValue(WriteEndArray());
+ }
+ //@}
+
+ /*! @name Convenience extensions */
+ //@{
+
+ //! Simpler but slower overload.
+ bool String(const Ch* const& str) { return String(str, internal::StrLen(str)); }
+ bool Key(const Ch* const& str) { return Key(str, internal::StrLen(str)); }
+
+ //@}
+
+ //! Write a raw JSON value.
+ /*!
+ For user to write a stringified JSON as a value.
+
+ \param json A well-formed JSON value. It should not contain null character within [0, length - 1] range.
+ \param length Length of the json.
+ \param type Type of the root of json.
+ */
+ bool RawValue(const Ch* json, size_t length, Type type) {
+ RAPIDJSON_ASSERT(json != 0);
+ Prefix(type);
+ return EndValue(WriteRawValue(json, length));
+ }
+
+ //! Flush the output stream.
+ /*!
+ Allows the user to flush the output stream immediately.
+ */
+ void Flush() {
+ os_->Flush();
+ }
+
+ static const size_t kDefaultLevelDepth = 32;
+
+protected:
+ //! Information for each nested level
+ struct Level {
+ Level(bool inArray_) : valueCount(0), inArray(inArray_) {}
+ size_t valueCount; //!< number of values in this level
+ bool inArray; //!< true if in array, otherwise in object
+ };
+
+ bool WriteNull() {
+ PutReserve(*os_, 4);
+ PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 'l'); return true;
+ }
+
+ bool WriteBool(bool b) {
+ if (b) {
+ PutReserve(*os_, 4);
+ PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'r'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'e');
+ }
+ else {
+ PutReserve(*os_, 5);
+ PutUnsafe(*os_, 'f'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 's'); PutUnsafe(*os_, 'e');
+ }
+ return true;
+ }
+
+ bool WriteInt(int i) {
+ char buffer[11];
+ const char* end = internal::i32toa(i, buffer);
+ PutReserve(*os_, static_cast<size_t>(end - buffer));
+ for (const char* p = buffer; p != end; ++p)
+ PutUnsafe(*os_, static_cast<typename OutputStream::Ch>(*p));
+ return true;
+ }
+
+ bool WriteUint(unsigned u) {
+ char buffer[10];
+ const char* end = internal::u32toa(u, buffer);
+ PutReserve(*os_, static_cast<size_t>(end - buffer));
+ for (const char* p = buffer; p != end; ++p)
+ PutUnsafe(*os_, static_cast<typename OutputStream::Ch>(*p));
+ return true;
+ }
+
+ bool WriteInt64(int64_t i64) {
+ char buffer[21];
+ const char* end = internal::i64toa(i64, buffer);
+ PutReserve(*os_, static_cast<size_t>(end - buffer));
+ for (const char* p = buffer; p != end; ++p)
+ PutUnsafe(*os_, static_cast<typename OutputStream::Ch>(*p));
+ return true;
+ }
+
+ bool WriteUint64(uint64_t u64) {
+ char buffer[20];
+ char* end = internal::u64toa(u64, buffer);
+ PutReserve(*os_, static_cast<size_t>(end - buffer));
+ for (char* p = buffer; p != end; ++p)
+ PutUnsafe(*os_, static_cast<typename OutputStream::Ch>(*p));
+ return true;
+ }
+
+ bool WriteDouble(double d) {
+ if (internal::Double(d).IsNanOrInf()) {
+ if (!(writeFlags & kWriteNanAndInfFlag))
+ return false;
+ if (internal::Double(d).IsNan()) {
+ PutReserve(*os_, 3);
+ PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N');
+ return true;
+ }
+ if (internal::Double(d).Sign()) {
+ PutReserve(*os_, 9);
+ PutUnsafe(*os_, '-');
+ }
+ else
+ PutReserve(*os_, 8);
+ PutUnsafe(*os_, 'I'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'f');
+ PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'y');
+ return true;
+ }
+
+ char buffer[25];
+ char* end = internal::dtoa(d, buffer, maxDecimalPlaces_);
+ PutReserve(*os_, static_cast<size_t>(end - buffer));
+ for (char* p = buffer; p != end; ++p)
+ PutUnsafe(*os_, static_cast<typename OutputStream::Ch>(*p));
+ return true;
+ }
+
+ bool WriteString(const Ch* str, SizeType length) {
+ static const typename OutputStream::Ch hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
+ static const char escape[256] = {
+#define Z16 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+ //0 1 2 3 4 5 6 7 8 9 A B C D E F
+ 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'b', 't', 'n', 'u', 'f', 'r', 'u', 'u', // 00
+ 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', // 10
+ 0, 0, '"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20
+ Z16, Z16, // 30~4F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'\\', 0, 0, 0, // 50
+ Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16 // 60~FF
+#undef Z16
+ };
+
+ if (TargetEncoding::supportUnicode)
+ PutReserve(*os_, 2 + length * 6); // "\uxxxx..."
+ else
+ PutReserve(*os_, 2 + length * 12); // "\uxxxx\uyyyy..."
+
+ PutUnsafe(*os_, '\"');
+ GenericStringStream<SourceEncoding> is(str);
+ while (ScanWriteUnescapedString(is, length)) {
+ const Ch c = is.Peek();
+ if (!TargetEncoding::supportUnicode && static_cast<unsigned>(c) >= 0x80) {
+ // Unicode escaping
+ unsigned codepoint;
+ if (RAPIDJSON_UNLIKELY(!SourceEncoding::Decode(is, &codepoint)))
+ return false;
+ PutUnsafe(*os_, '\\');
+ PutUnsafe(*os_, 'u');
+ if (codepoint <= 0xD7FF || (codepoint >= 0xE000 && codepoint <= 0xFFFF)) {
+ PutUnsafe(*os_, hexDigits[(codepoint >> 12) & 15]);
+ PutUnsafe(*os_, hexDigits[(codepoint >> 8) & 15]);
+ PutUnsafe(*os_, hexDigits[(codepoint >> 4) & 15]);
+ PutUnsafe(*os_, hexDigits[(codepoint ) & 15]);
+ }
+ else {
+ RAPIDJSON_ASSERT(codepoint >= 0x010000 && codepoint <= 0x10FFFF);
+ // Surrogate pair
+ unsigned s = codepoint - 0x010000;
+ unsigned lead = (s >> 10) + 0xD800;
+ unsigned trail = (s & 0x3FF) + 0xDC00;
+ PutUnsafe(*os_, hexDigits[(lead >> 12) & 15]);
+ PutUnsafe(*os_, hexDigits[(lead >> 8) & 15]);
+ PutUnsafe(*os_, hexDigits[(lead >> 4) & 15]);
+ PutUnsafe(*os_, hexDigits[(lead ) & 15]);
+ PutUnsafe(*os_, '\\');
+ PutUnsafe(*os_, 'u');
+ PutUnsafe(*os_, hexDigits[(trail >> 12) & 15]);
+ PutUnsafe(*os_, hexDigits[(trail >> 8) & 15]);
+ PutUnsafe(*os_, hexDigits[(trail >> 4) & 15]);
+ PutUnsafe(*os_, hexDigits[(trail ) & 15]);
+ }
+ }
+ else if ((sizeof(Ch) == 1 || static_cast<unsigned>(c) < 256) && RAPIDJSON_UNLIKELY(escape[static_cast<unsigned char>(c)])) {
+ is.Take();
+ PutUnsafe(*os_, '\\');
+ PutUnsafe(*os_, static_cast<typename OutputStream::Ch>(escape[static_cast<unsigned char>(c)]));
+ if (escape[static_cast<unsigned char>(c)] == 'u') {
+ PutUnsafe(*os_, '0');
+ PutUnsafe(*os_, '0');
+ PutUnsafe(*os_, hexDigits[static_cast<unsigned char>(c) >> 4]);
+ PutUnsafe(*os_, hexDigits[static_cast<unsigned char>(c) & 0xF]);
+ }
+ }
+ else if (RAPIDJSON_UNLIKELY(!(writeFlags & kWriteValidateEncodingFlag ?
+ Transcoder<SourceEncoding, TargetEncoding>::Validate(is, *os_) :
+ Transcoder<SourceEncoding, TargetEncoding>::TranscodeUnsafe(is, *os_))))
+ return false;
+ }
+ PutUnsafe(*os_, '\"');
+ return true;
+ }
+
+ bool ScanWriteUnescapedString(GenericStringStream<SourceEncoding>& is, size_t length) {
+ return RAPIDJSON_LIKELY(is.Tell() < length);
+ }
+
+ bool WriteStartObject() { os_->Put('{'); return true; }
+ bool WriteEndObject() { os_->Put('}'); return true; }
+ bool WriteStartArray() { os_->Put('['); return true; }
+ bool WriteEndArray() { os_->Put(']'); return true; }
+
+ bool WriteRawValue(const Ch* json, size_t length) {
+ PutReserve(*os_, length);
+ GenericStringStream<SourceEncoding> is(json);
+ while (RAPIDJSON_LIKELY(is.Tell() < length)) {
+ RAPIDJSON_ASSERT(is.Peek() != '\0');
+ if (RAPIDJSON_UNLIKELY(!(writeFlags & kWriteValidateEncodingFlag ?
+ Transcoder<SourceEncoding, TargetEncoding>::Validate(is, *os_) :
+ Transcoder<SourceEncoding, TargetEncoding>::TranscodeUnsafe(is, *os_))))
+ return false;
+ }
+ return true;
+ }
+
+ void Prefix(Type type) {
+ (void)type;
+ if (RAPIDJSON_LIKELY(level_stack_.GetSize() != 0)) { // this value is not at root
+ Level* level = level_stack_.template Top<Level>();
+ if (level->valueCount > 0) {
+ if (level->inArray)
+ os_->Put(','); // add comma if it is not the first element in array
+ else // in object
+ os_->Put((level->valueCount % 2 == 0) ? ',' : ':');
+ }
+ if (!level->inArray && level->valueCount % 2 == 0)
+ RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name
+ level->valueCount++;
+ }
+ else {
+ RAPIDJSON_ASSERT(!hasRoot_); // Should only has one and only one root.
+ hasRoot_ = true;
+ }
+ }
+
+ // Flush the value if it is the top level one.
+ bool EndValue(bool ret) {
+ if (RAPIDJSON_UNLIKELY(level_stack_.Empty())) // end of json text
+ Flush();
+ return ret;
+ }
+
+ OutputStream* os_;
+ internal::Stack<StackAllocator> level_stack_;
+ int maxDecimalPlaces_;
+ bool hasRoot_;
+
+private:
+ // Prohibit copy constructor & assignment operator.
+ Writer(const Writer&);
+ Writer& operator=(const Writer&);
+};
+
+// Full specialization for StringStream to prevent memory copying
+
+template<>
+inline bool Writer<StringBuffer>::WriteInt(int i) {
+ char *buffer = os_->Push(11);
+ const char* end = internal::i32toa(i, buffer);
+ os_->Pop(static_cast<size_t>(11 - (end - buffer)));
+ return true;
+}
+
+template<>
+inline bool Writer<StringBuffer>::WriteUint(unsigned u) {
+ char *buffer = os_->Push(10);
+ const char* end = internal::u32toa(u, buffer);
+ os_->Pop(static_cast<size_t>(10 - (end - buffer)));
+ return true;
+}
+
+template<>
+inline bool Writer<StringBuffer>::WriteInt64(int64_t i64) {
+ char *buffer = os_->Push(21);
+ const char* end = internal::i64toa(i64, buffer);
+ os_->Pop(static_cast<size_t>(21 - (end - buffer)));
+ return true;
+}
+
+template<>
+inline bool Writer<StringBuffer>::WriteUint64(uint64_t u) {
+ char *buffer = os_->Push(20);
+ const char* end = internal::u64toa(u, buffer);
+ os_->Pop(static_cast<size_t>(20 - (end - buffer)));
+ return true;
+}
+
+template<>
+inline bool Writer<StringBuffer>::WriteDouble(double d) {
+ if (internal::Double(d).IsNanOrInf()) {
+ // Note: This code path can only be reached if (RAPIDJSON_WRITE_DEFAULT_FLAGS & kWriteNanAndInfFlag).
+ if (!(kWriteDefaultFlags & kWriteNanAndInfFlag))
+ return false;
+ if (internal::Double(d).IsNan()) {
+ PutReserve(*os_, 3);
+ PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N');
+ return true;
+ }
+ if (internal::Double(d).Sign()) {
+ PutReserve(*os_, 9);
+ PutUnsafe(*os_, '-');
+ }
+ else
+ PutReserve(*os_, 8);
+ PutUnsafe(*os_, 'I'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'f');
+ PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'y');
+ return true;
+ }
+
+ char *buffer = os_->Push(25);
+ char* end = internal::dtoa(d, buffer, maxDecimalPlaces_);
+ os_->Pop(static_cast<size_t>(25 - (end - buffer)));
+ return true;
+}
+
+#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42)
+template<>
+inline bool Writer<StringBuffer>::ScanWriteUnescapedString(StringStream& is, size_t length) {
+ if (length < 16)
+ return RAPIDJSON_LIKELY(is.Tell() < length);
+
+ if (!RAPIDJSON_LIKELY(is.Tell() < length))
+ return false;
+
+ const char* p = is.src_;
+ const char* end = is.head_ + length;
+ const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15));
+ const char* endAligned = reinterpret_cast<const char*>(reinterpret_cast<size_t>(end) & static_cast<size_t>(~15));
+ if (nextAligned > end)
+ return true;
+
+ while (p != nextAligned)
+ if (*p < 0x20 || *p == '\"' || *p == '\\') {
+ is.src_ = p;
+ return RAPIDJSON_LIKELY(is.Tell() < length);
+ }
+ else
+ os_->PutUnsafe(*p++);
+
+ // The rest of string using SIMD
+ static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' };
+ static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' };
+ static const char space[16] = { 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F };
+ const __m128i dq = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&dquote[0]));
+ const __m128i bs = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&bslash[0]));
+ const __m128i sp = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&space[0]));
+
+ for (; p != endAligned; p += 16) {
+ const __m128i s = _mm_load_si128(reinterpret_cast<const __m128i *>(p));
+ const __m128i t1 = _mm_cmpeq_epi8(s, dq);
+ const __m128i t2 = _mm_cmpeq_epi8(s, bs);
+ const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x1F) == 0x1F
+ const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3);
+ unsigned short r = static_cast<unsigned short>(_mm_movemask_epi8(x));
+ if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped
+ SizeType len;
+#ifdef _MSC_VER // Find the index of first escaped
+ unsigned long offset;
+ _BitScanForward(&offset, r);
+ len = offset;
+#else
+ len = static_cast<SizeType>(__builtin_ffs(r) - 1);
+#endif
+ char* q = reinterpret_cast<char*>(os_->PushUnsafe(len));
+ for (size_t i = 0; i < len; i++)
+ q[i] = p[i];
+
+ p += len;
+ break;
+ }
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(os_->PushUnsafe(16)), s);
+ }
+
+ is.src_ = p;
+ return RAPIDJSON_LIKELY(is.Tell() < length);
+}
+#elif defined(RAPIDJSON_NEON)
+template<>
+inline bool Writer<StringBuffer>::ScanWriteUnescapedString(StringStream& is, size_t length) {
+ if (length < 16)
+ return RAPIDJSON_LIKELY(is.Tell() < length);
+
+ if (!RAPIDJSON_LIKELY(is.Tell() < length))
+ return false;
+
+ const char* p = is.src_;
+ const char* end = is.head_ + length;
+ const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15));
+ const char* endAligned = reinterpret_cast<const char*>(reinterpret_cast<size_t>(end) & static_cast<size_t>(~15));
+ if (nextAligned > end)
+ return true;
+
+ while (p != nextAligned)
+ if (*p < 0x20 || *p == '\"' || *p == '\\') {
+ is.src_ = p;
+ return RAPIDJSON_LIKELY(is.Tell() < length);
+ }
+ else
+ os_->PutUnsafe(*p++);
+
+ // The rest of string using SIMD
+ const uint8x16_t s0 = vmovq_n_u8('"');
+ const uint8x16_t s1 = vmovq_n_u8('\\');
+ const uint8x16_t s2 = vmovq_n_u8('\b');
+ const uint8x16_t s3 = vmovq_n_u8(32);
+
+ for (; p != endAligned; p += 16) {
+ const uint8x16_t s = vld1q_u8(reinterpret_cast<const uint8_t *>(p));
+ uint8x16_t x = vceqq_u8(s, s0);
+ x = vorrq_u8(x, vceqq_u8(s, s1));
+ x = vorrq_u8(x, vceqq_u8(s, s2));
+ x = vorrq_u8(x, vcltq_u8(s, s3));
+
+ x = vrev64q_u8(x); // Rev in 64
+ uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract
+ uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract
+
+ SizeType len = 0;
+ bool escaped = false;
+ if (low == 0) {
+ if (high != 0) {
+ uint32_t lz = internal::clzll(high);
+ len = 8 + (lz >> 3);
+ escaped = true;
+ }
+ } else {
+ uint32_t lz = internal::clzll(low);
+ len = lz >> 3;
+ escaped = true;
+ }
+ if (RAPIDJSON_UNLIKELY(escaped)) { // some of characters is escaped
+ char* q = reinterpret_cast<char*>(os_->PushUnsafe(len));
+ for (size_t i = 0; i < len; i++)
+ q[i] = p[i];
+
+ p += len;
+ break;
+ }
+ vst1q_u8(reinterpret_cast<uint8_t *>(os_->PushUnsafe(16)), s);
+ }
+
+ is.src_ = p;
+ return RAPIDJSON_LIKELY(is.Tell() < length);
+}
+#endif // RAPIDJSON_NEON
+
+RAPIDJSON_NAMESPACE_END
+
+#if defined(_MSC_VER) || defined(__clang__)
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_RAPIDJSON_H_
diff --git a/vendor/github.com/Benau/go_rlottie/rlottie.h b/vendor/github.com/Benau/go_rlottie/rlottie.h
new file mode 100644
index 00000000..67565691
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/rlottie.h
@@ -0,0 +1,525 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _RLOTTIE_H_
+#define _RLOTTIE_H_
+
+#include <future>
+#include <vector>
+#include <memory>
+
+#if defined _WIN32 || defined __CYGWIN__
+ #ifdef RLOTTIE_BUILD
+ #define RLOTTIE_API __declspec(dllexport)
+ #else
+ #define RLOTTIE_API __declspec(dllimport)
+ #endif
+#else
+ #ifdef RLOTTIE_BUILD
+ #define RLOTTIE_API __attribute__ ((visibility ("default")))
+ #else
+ #define RLOTTIE_API
+ #endif
+#endif
+
+class AnimationImpl;
+struct LOTNode;
+struct LOTLayerNode;
+
+namespace rlottie {
+
+/**
+ * @brief Configures rlottie model cache policy.
+ *
+ * Provides Library level control to configure model cache
+ * policy. Setting it to 0 will disable
+ * the cache as well as flush all the previously cached content.
+ *
+ * @param[in] cacheSize Maximum Model Cache size.
+ *
+ * @note to disable Caching configure with 0 size.
+ * @note to flush the current Cache content configure it with 0 and
+ * then reconfigure with the new size.
+ *
+ * @internal
+ */
+RLOTTIE_API void configureModelCacheSize(size_t cacheSize);
+
+struct Color {
+ Color() = default;
+ Color(float r, float g , float b):_r(r), _g(g), _b(b){}
+ float r() const {return _r;}
+ float g() const {return _g;}
+ float b() const {return _b;}
+private:
+ float _r{0};
+ float _g{0};
+ float _b{0};
+};
+
+struct Size {
+ Size() = default;
+ Size(float w, float h):_w(w), _h(h){}
+ float w() const {return _w;}
+ float h() const {return _h;}
+private:
+ float _w{0};
+ float _h{0};
+};
+
+struct Point {
+ Point() = default;
+ Point(float x, float y):_x(x), _y(y){}
+ float x() const {return _x;}
+ float y() const {return _y;}
+private:
+ float _x{0};
+ float _y{0};
+};
+
+struct FrameInfo {
+ explicit FrameInfo(uint32_t frame): _frameNo(frame){}
+ uint32_t curFrame() const {return _frameNo;}
+private:
+ uint32_t _frameNo;
+};
+
+enum class Property {
+ FillColor, /*!< Color property of Fill object , value type is rlottie::Color */
+ FillOpacity, /*!< Opacity property of Fill object , value type is float [ 0 .. 100] */
+ StrokeColor, /*!< Color property of Stroke object , value type is rlottie::Color */
+ StrokeOpacity, /*!< Opacity property of Stroke object , value type is float [ 0 .. 100] */
+ StrokeWidth, /*!< stroke width property of Stroke object , value type is float */
+ TrAnchor, /*!< Transform Anchor property of Layer and Group object , value type is rlottie::Point */
+ TrPosition, /*!< Transform Position property of Layer and Group object , value type is rlottie::Point */
+ TrScale, /*!< Transform Scale property of Layer and Group object , value type is rlottie::Size. range[0 ..100] */
+ TrRotation, /*!< Transform Rotation property of Layer and Group object , value type is float. range[0 .. 360] in degrees*/
+ TrOpacity /*!< Transform Opacity property of Layer and Group object , value type is float [ 0 .. 100] */
+};
+
+struct Color_Type{};
+struct Point_Type{};
+struct Size_Type{};
+struct Float_Type{};
+template <typename T> struct MapType;
+
+class RLOTTIE_API Surface {
+public:
+ /**
+ * @brief Surface object constructor.
+ *
+ * @param[in] buffer surface buffer.
+ * @param[in] width surface width.
+ * @param[in] height surface height.
+ * @param[in] bytesPerLine number of bytes in a surface scanline.
+ *
+ * @note Default surface format is ARGB32_Premultiplied.
+ *
+ * @internal
+ */
+ Surface(uint32_t *buffer, size_t width, size_t height, size_t bytesPerLine);
+
+ /**
+ * @brief Sets the Draw Area available on the Surface.
+ *
+ * Lottie will use the draw region size to generate frame image
+ * and will update only the draw rgion of the surface.
+ *
+ * @param[in] x region area x position.
+ * @param[in] y region area y position.
+ * @param[in] width region area width.
+ * @param[in] height region area height.
+ *
+ * @note Default surface format is ARGB32_Premultiplied.
+ * @note Default draw region area is [ 0 , 0, surface width , surface height]
+ *
+ * @internal
+ */
+ void setDrawRegion(size_t x, size_t y, size_t width, size_t height);
+
+ /**
+ * @brief Returns width of the surface.
+ *
+ * @return surface width
+ *
+ * @internal
+ *
+ */
+ size_t width() const {return mWidth;}
+
+ /**
+ * @brief Returns height of the surface.
+ *
+ * @return surface height
+ *
+ * @internal
+ */
+ size_t height() const {return mHeight;}
+
+ /**
+ * @brief Returns number of bytes in the surface scanline.
+ *
+ * @return number of bytes in scanline.
+ *
+ * @internal
+ */
+ size_t bytesPerLine() const {return mBytesPerLine;}
+
+ /**
+ * @brief Returns buffer attached tp the surface.
+ *
+ * @return buffer attaced to the Surface.
+ *
+ * @internal
+ */
+ uint32_t *buffer() const {return mBuffer;}
+
+ /**
+ * @brief Returns drawable area width of the surface.
+ *
+ * @return drawable area width
+ *
+ * @note Default value is width() of the surface
+ *
+ * @internal
+ *
+ */
+ size_t drawRegionWidth() const {return mDrawArea.w;}
+
+ /**
+ * @brief Returns drawable area height of the surface.
+ *
+ * @return drawable area height
+ *
+ * @note Default value is height() of the surface
+ *
+ * @internal
+ */
+ size_t drawRegionHeight() const {return mDrawArea.h;}
+
+ /**
+ * @brief Returns drawable area's x position of the surface.
+ *
+ * @return drawable area's x potition.
+ *
+ * @note Default value is 0
+ *
+ * @internal
+ */
+ size_t drawRegionPosX() const {return mDrawArea.x;}
+
+ /**
+ * @brief Returns drawable area's y position of the surface.
+ *
+ * @return drawable area's y potition.
+ *
+ * @note Default value is 0
+ *
+ * @internal
+ */
+ size_t drawRegionPosY() const {return mDrawArea.y;}
+
+ /**
+ * @brief Default constructor.
+ */
+ Surface() = default;
+private:
+ uint32_t *mBuffer{nullptr};
+ size_t mWidth{0};
+ size_t mHeight{0};
+ size_t mBytesPerLine{0};
+ struct {
+ size_t x{0};
+ size_t y{0};
+ size_t w{0};
+ size_t h{0};
+ }mDrawArea;
+};
+
+using MarkerList = std::vector<std::tuple<std::string, int , int>>;
+/**
+ * @brief https://helpx.adobe.com/after-effects/using/layer-markers-composition-markers.html
+ * Markers exported form AE are used to describe a segmnet of an animation {comment/tag , startFrame, endFrame}
+ * Marker can be use to devide a resource in to separate animations by tagging the segment with comment string ,
+ * start frame and duration of that segment.
+ */
+
+using LayerInfoList = std::vector<std::tuple<std::string, int , int>>;
+
+
+using ColorFilter = std::function<void(float &r , float &g, float &b)>;
+
+class RLOTTIE_API Animation {
+public:
+
+ /**
+ * @brief Constructs an animation object from file path.
+ *
+ * @param[in] path Lottie resource file path
+ * @param[in] cachePolicy whether to cache or not the model data.
+ * use only when need to explicit disabl caching for a
+ * particular resource. To disable caching at library level
+ * use @see configureModelCacheSize() instead.
+ *
+ * @return Animation object that can render the contents of the
+ * Lottie resource represented by file path.
+ *
+ * @internal
+ */
+ static std::unique_ptr<Animation>
+ loadFromFile(const std::string &path, bool cachePolicy=true);
+
+ /**
+ * @brief Constructs an animation object from JSON string data.
+ *
+ * @param[in] jsonData The JSON string data.
+ * @param[in] key the string that will be used to cache the JSON string data.
+ * @param[in] resourcePath the path will be used to search for external resource.
+ * @param[in] cachePolicy whether to cache or not the model data.
+ * use only when need to explicit disabl caching for a
+ * particular resource. To disable caching at library level
+ * use @see configureModelCacheSize() instead.
+ *
+ * @return Animation object that can render the contents of the
+ * Lottie resource represented by JSON string data.
+ *
+ * @internal
+ */
+ static std::unique_ptr<Animation>
+ loadFromData(std::string jsonData, const std::string &key,
+ const std::string &resourcePath="", bool cachePolicy=true);
+
+ /**
+ * @brief Constructs an animation object from JSON string data and update.
+ * the color properties using ColorFilter.
+
+ * @param[in] jsonData The JSON string data.
+ * @param[in] resourcePath the path will be used to search for external resource.
+ * @param[in] filter The color filter that will be applied for each color property
+ * found during parsing.
+
+ * @return Animation object that can render the contents of the
+ * Lottie resource represented by JSON string data.
+ *
+ * @internal
+ */
+ static std::unique_ptr<Animation>
+ loadFromData(std::string jsonData, std::string resourcePath, ColorFilter filter);
+
+ /**
+ * @brief Returns default framerate of the Lottie resource.
+ *
+ * @return framerate of the Lottie resource
+ *
+ * @internal
+ *
+ */
+ double frameRate() const;
+
+ /**
+ * @brief Returns total number of frames present in the Lottie resource.
+ *
+ * @return frame count of the Lottie resource.
+ *
+ * @note frame number starts with 0.
+ *
+ * @internal
+ */
+ size_t totalFrame() const;
+
+ /**
+ * @brief Returns default viewport size of the Lottie resource.
+ *
+ * @param[out] width default width of the viewport.
+ * @param[out] height default height of the viewport.
+ *
+ * @internal
+ *
+ */
+ void size(size_t &width, size_t &height) const;
+
+ /**
+ * @brief Returns total animation duration of Lottie resource in second.
+ * it uses totalFrame() and frameRate() to calculate the duration.
+ * duration = totalFrame() / frameRate().
+ *
+ * @return total animation duration in second.
+ * @retval 0 if the Lottie resource has no animation.
+ *
+ * @see totalFrame()
+ * @see frameRate()
+ *
+ * @internal
+ */
+ double duration() const;
+
+ /**
+ * @brief Returns frame number for a given position.
+ * this function helps to map the position value retuned
+ * by the animator to a frame number in side the Lottie resource.
+ * frame_number = lerp(start_frame, endframe, pos);
+ *
+ * @param[in] pos normalized position value [0 ... 1]
+ *
+ * @return frame numer maps to the position value [startFrame .... endFrame]
+ *
+ * @internal
+ */
+ size_t frameAtPos(double pos);
+
+ /**
+ * @brief Renders the content to surface Asynchronously.
+ * it gives a future in return to get the result of the
+ * rendering at a future point.
+ * To get best performance user has to start rendering as soon as
+ * it finds that content at {frameNo} has to be rendered and get the
+ * result from the future at the last moment when the surface is needed
+ * to draw into the screen.
+ *
+ *
+ * @param[in] frameNo Content corresponds to the @p frameNo needs to be drawn
+ * @param[in] surface Surface in which content will be drawn
+ * @param[in] keepAspectRatio whether to keep the aspect ratio while scaling the content.
+ *
+ * @return future that will hold the result when rendering finished.
+ *
+ * for Synchronus rendering @see renderSync
+ *
+ * @see Surface
+ * @internal
+ */
+ std::future<Surface> render(size_t frameNo, Surface surface, bool keepAspectRatio=true);
+
+ /**
+ * @brief Renders the content to surface synchronously.
+ * for performance use the async rendering @see render
+ *
+ * @param[in] frameNo Content corresponds to the @p frameNo needs to be drawn
+ * @param[in] surface Surface in which content will be drawn
+ * @param[in] keepAspectRatio whether to keep the aspect ratio while scaling the content.
+ *
+ * @internal
+ */
+ void renderSync(size_t frameNo, Surface surface, bool keepAspectRatio=true);
+
+ /**
+ * @brief Returns root layer of the composition updated with
+ * content of the Lottie resource at frame number @p frameNo.
+ *
+ * @param[in] frameNo Content corresponds to the @p frameNo needs to be extracted.
+ * @param[in] width content viewbox width
+ * @param[in] height content viewbox height
+ *
+ * @return Root layer node.
+ *
+ * @internal
+ */
+ const LOTLayerNode * renderTree(size_t frameNo, size_t width, size_t height) const;
+
+ /**
+ * @brief Returns Composition Markers.
+ *
+ *
+ * @return returns MarkerList of the Composition.
+ *
+ * @see MarkerList
+ * @internal
+ */
+ const MarkerList& markers() const;
+
+ /**
+ * @brief Returns Layer information{name, inFrame, outFrame} of all the child layers of the composition.
+ *
+ *
+ * @return List of Layer Information of the Composition.
+ *
+ * @see LayerInfoList
+ * @internal
+ */
+ const LayerInfoList& layers() const;
+
+ /**
+ * @brief Sets property value for the specified {@link KeyPath}. This {@link KeyPath} can resolve
+ * to multiple contents. In that case, the callback's value will apply to all of them.
+ *
+ * Keypath should conatin object names separated by (.) and can handle globe(**) or wildchar(*).
+ *
+ * @usage
+ * To change fillcolor property of fill1 object in the layer1->group1->fill1 hirarchy to RED color
+ *
+ * player->setValue<rlottie::Property::FillColor>("layer1.group1.fill1", rlottie::Color(1, 0, 0);
+ *
+ * if all the color property inside group1 needs to be changed to GREEN color
+ *
+ * player->setValue<rlottie::Property::FillColor>("**.group1.**", rlottie::Color(0, 1, 0);
+ *
+ * @internal
+ */
+ template<Property prop, typename AnyValue>
+ void setValue(const std::string &keypath, AnyValue value)
+ {
+ setValue(MapType<std::integral_constant<Property, prop>>{}, prop, keypath, value);
+ }
+
+ /**
+ * @brief default destructor
+ *
+ * @internal
+ */
+ ~Animation();
+
+private:
+ void setValue(Color_Type, Property, const std::string &, Color);
+ void setValue(Float_Type, Property, const std::string &, float);
+ void setValue(Size_Type, Property, const std::string &, Size);
+ void setValue(Point_Type, Property, const std::string &, Point);
+
+ void setValue(Color_Type, Property, const std::string &, std::function<Color(const FrameInfo &)> &&);
+ void setValue(Float_Type, Property, const std::string &, std::function<float(const FrameInfo &)> &&);
+ void setValue(Size_Type, Property, const std::string &, std::function<Size(const FrameInfo &)> &&);
+ void setValue(Point_Type, Property, const std::string &, std::function<Point(const FrameInfo &)> &&);
+ /**
+ * @brief default constructor
+ *
+ * @internal
+ */
+ Animation();
+
+ std::unique_ptr<AnimationImpl> d;
+};
+
+//Map Property to Value type
+template<> struct MapType<std::integral_constant<Property, Property::FillColor>>: Color_Type{};
+template<> struct MapType<std::integral_constant<Property, Property::StrokeColor>>: Color_Type{};
+template<> struct MapType<std::integral_constant<Property, Property::FillOpacity>>: Float_Type{};
+template<> struct MapType<std::integral_constant<Property, Property::StrokeOpacity>>: Float_Type{};
+template<> struct MapType<std::integral_constant<Property, Property::StrokeWidth>>: Float_Type{};
+template<> struct MapType<std::integral_constant<Property, Property::TrRotation>>: Float_Type{};
+template<> struct MapType<std::integral_constant<Property, Property::TrOpacity>>: Float_Type{};
+template<> struct MapType<std::integral_constant<Property, Property::TrAnchor>>: Point_Type{};
+template<> struct MapType<std::integral_constant<Property, Property::TrPosition>>: Point_Type{};
+template<> struct MapType<std::integral_constant<Property, Property::TrScale>>: Size_Type{};
+
+
+} // namespace lotplayer
+
+#endif // _RLOTTIE_H_
diff --git a/vendor/github.com/Benau/go_rlottie/rlottie_capi.h b/vendor/github.com/Benau/go_rlottie/rlottie_capi.h
new file mode 100644
index 00000000..7c883fac
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/rlottie_capi.h
@@ -0,0 +1,299 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _RLOTTIE_CAPI_H_
+#define _RLOTTIE_CAPI_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include "rlottiecommon.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+ LOTTIE_ANIMATION_PROPERTY_FILLCOLOR, /*!< Color property of Fill object , value type is float [0 ... 1] */
+ LOTTIE_ANIMATION_PROPERTY_FILLOPACITY, /*!< Opacity property of Fill object , value type is float [ 0 .. 100] */
+ LOTTIE_ANIMATION_PROPERTY_STROKECOLOR, /*!< Color property of Stroke object , value type is float [0 ... 1] */
+ LOTTIE_ANIMATION_PROPERTY_STROKEOPACITY, /*!< Opacity property of Stroke object , value type is float [ 0 .. 100] */
+ LOTTIE_ANIMATION_PROPERTY_STROKEWIDTH, /*!< stroke with property of Stroke object , value type is float */
+ LOTTIE_ANIMATION_PROPERTY_TR_ANCHOR, /*!< Transform Anchor property of Layer and Group object , value type is int */
+ LOTTIE_ANIMATION_PROPERTY_TR_POSITION, /*!< Transform Position property of Layer and Group object , value type is int */
+ LOTTIE_ANIMATION_PROPERTY_TR_SCALE, /*!< Transform Scale property of Layer and Group object , value type is float range[0 ..100] */
+ LOTTIE_ANIMATION_PROPERTY_TR_ROTATION, /*!< Transform Scale property of Layer and Group object , value type is float. range[0 .. 360] in degrees*/
+ LOTTIE_ANIMATION_PROPERTY_TR_OPACITY /*!< Transform Opacity property of Layer and Group object , value type is float [ 0 .. 100] */
+}Lottie_Animation_Property;
+
+typedef struct Lottie_Animation_S Lottie_Animation;
+
+/**
+ * @brief Constructs an animation object from file path.
+ *
+ * @param[in] path Lottie resource file path
+ *
+ * @return Animation object that can build the contents of the
+ * Lottie resource represented by file path.
+ *
+ * @see lottie_animation_destroy()
+ *
+ * @ingroup Lottie_Animation
+ * @internal
+ */
+RLOTTIE_API Lottie_Animation *lottie_animation_from_file(const char *path);
+
+/**
+ * @brief Constructs an animation object from JSON string data.
+ *
+ * @param[in] data The JSON string data.
+ * @param[in] key the string that will be used to cache the JSON string data.
+ * @param[in] resource_path the path that will be used to load external resource needed by the JSON data.
+ *
+ * @return Animation object that can build the contents of the
+ * Lottie resource represented by JSON string data.
+ *
+ * @ingroup Lottie_Animation
+ * @internal
+ */
+RLOTTIE_API Lottie_Animation *lottie_animation_from_data(const char *data, const char *key, const char *resource_path);
+
+/**
+ * @brief Free given Animation object resource.
+ *
+ * @param[in] animation Animation object to free.
+ *
+ * @see lottie_animation_from_file()
+ * @see lottie_animation_from_data()
+ *
+ * @ingroup Lottie_Animation
+ * @internal
+ */
+RLOTTIE_API void lottie_animation_destroy(Lottie_Animation *animation);
+
+/**
+ * @brief Returns default viewport size of the Lottie resource.
+ *
+ * @param[in] animation Animation object.
+ * @param[out] w default width of the viewport.
+ * @param[out] h default height of the viewport.
+ *
+ * @ingroup Lottie_Animation
+ * @internal
+ */
+RLOTTIE_API void lottie_animation_get_size(const Lottie_Animation *animation, size_t *width, size_t *height);
+
+/**
+ * @brief Returns total animation duration of Lottie resource in second.
+ * it uses totalFrame() and frameRate() to calculate the duration.
+ * duration = totalFrame() / frameRate().
+ *
+ * @param[in] animation Animation object.
+ *
+ * @return total animation duration in second.
+ * @c 0 if the Lottie resource has no animation.
+ *
+ * @see lottie_animation_get_totalframe()
+ * @see lottie_animation_get_framerate()
+ *
+ * @ingroup Lottie_Animation
+ * @internal
+ */
+RLOTTIE_API double lottie_animation_get_duration(const Lottie_Animation *animation);
+
+/**
+ * @brief Returns total number of frames present in the Lottie resource.
+ *
+ * @param[in] animation Animation object.
+ *
+ * @return frame count of the Lottie resource.*
+ *
+ * @note frame number starts with 0.
+ *
+ * @see lottie_animation_get_duration()
+ * @see lottie_animation_get_framerate()
+ *
+ * @ingroup Lottie_Animation
+ * @internal
+ */
+RLOTTIE_API size_t lottie_animation_get_totalframe(const Lottie_Animation *animation);
+
+/**
+ * @brief Returns default framerate of the Lottie resource.
+ *
+ * @param[in] animation Animation object.
+ *
+ * @return framerate of the Lottie resource
+ *
+ * @ingroup Lottie_Animation
+ * @internal
+ *
+ */
+RLOTTIE_API double lottie_animation_get_framerate(const Lottie_Animation *animation);
+
+/**
+ * @brief Get the render tree which contains the snapshot of the animation object
+ * at frame = @c frame_num, the content of the animation in that frame number.
+ *
+ * @param[in] animation Animation object.
+ * @param[in] frame_num Content corresponds to the @p frame_num needs to be drawn
+ * @param[in] width requested snapshot viewport width.
+ * @param[in] height requested snapshot viewport height.
+ *
+ * @return Animation snapshot tree.
+ *
+ * @note: User has to traverse the tree for rendering.
+ *
+ * @see LOTLayerNode
+ * @see LOTNode
+ *
+ * @ingroup Lottie_Animation
+ * @internal
+ */
+RLOTTIE_API const LOTLayerNode *lottie_animation_render_tree(Lottie_Animation *animation, size_t frame_num, size_t width, size_t height);
+
+/**
+ * @brief Maps position to frame number and returns it.
+ *
+ * @param[in] animation Animation object.
+ * @param[in] pos position in the range [ 0.0 .. 1.0 ].
+ *
+ * @return mapped frame numbe in the range [ start_frame .. end_frame ].
+ * @c 0 if the Lottie resource has no animation.
+ *
+ *
+ * @ingroup Lottie_Animation
+ * @internal
+ */
+RLOTTIE_API size_t lottie_animation_get_frame_at_pos(const Lottie_Animation *animation, float pos);
+
+/**
+ * @brief Request to render the content of the frame @p frame_num to buffer @p buffer.
+ *
+ * @param[in] animation Animation object.
+ * @param[in] frame_num the frame number needs to be rendered.
+ * @param[in] buffer surface buffer use for rendering.
+ * @param[in] width width of the surface
+ * @param[in] height height of the surface
+ * @param[in] bytes_per_line stride of the surface in bytes.
+ *
+ *
+ * @ingroup Lottie_Animation
+ * @internal
+ */
+RLOTTIE_API void lottie_animation_render(Lottie_Animation *animation, size_t frame_num, uint32_t *buffer, size_t width, size_t height, size_t bytes_per_line);
+
+/**
+ * @brief Request to render the content of the frame @p frame_num to buffer @p buffer asynchronously.
+ *
+ * @param[in] animation Animation object.
+ * @param[in] frame_num the frame number needs to be rendered.
+ * @param[in] buffer surface buffer use for rendering.
+ * @param[in] width width of the surface
+ * @param[in] height height of the surface
+ * @param[in] bytes_per_line stride of the surface in bytes.
+ *
+ * @note user must call lottie_animation_render_flush() to make sure render is finished.
+ *
+ * @ingroup Lottie_Animation
+ * @internal
+ */
+RLOTTIE_API void lottie_animation_render_async(Lottie_Animation *animation, size_t frame_num, uint32_t *buffer, size_t width, size_t height, size_t bytes_per_line);
+
+/**
+ * @brief Request to finish the current async renderer job for this animation object.
+ * If render is finished then this call returns immidiately.
+ * If not, it waits till render job finish and then return.
+ *
+ * @param[in] animation Animation object.
+ *
+ * @warning User must call lottie_animation_render_async() and lottie_animation_render_flush()
+ * in pair to get the benefit of async rendering.
+ *
+ * @return the pixel buffer it finished rendering.
+ *
+ * @ingroup Lottie_Animation
+ * @internal
+ */
+RLOTTIE_API uint32_t *lottie_animation_render_flush(Lottie_Animation *animation);
+
+
+/**
+ * @brief Request to change the properties of this animation object.
+ * Keypath should conatin object names separated by (.) and can handle globe(**) or wildchar(*)
+ *
+ * @usage
+ * To change fillcolor property of fill1 object in the layer1->group1->fill1 hirarchy to RED color
+ *
+ * lottie_animation_property_override(animation, LOTTIE_ANIMATION_PROPERTY_FILLCOLOR, "layer1.group1.fill1", 1.0, 0.0, 0.0);
+ *
+ * if all the color property inside group1 needs to be changed to GREEN color
+ *
+ * lottie_animation_property_override(animation, LOTTIE_ANIMATION_PROPERTY_FILLCOLOR, "**.group1.**", 1.0, 0.0, 0.0);
+ *
+ * @param[in] animation Animation object.
+ * @param[in] type Property type. (@p Lottie_Animation_Property)
+ * @param[in] keypath Specific content of target.
+ * @param[in] ... Property values.
+ *
+ * @ingroup Lottie_Animation
+ * @internal
+ * */
+RLOTTIE_API void lottie_animation_property_override(Lottie_Animation *animation, const Lottie_Animation_Property type, const char *keypath, ...);
+
+
+/**
+ * @brief Returns list of markers in the Lottie resource
+ * @p LOTMarkerList has a @p LOTMarker list and size of list
+ * @p LOTMarker has the marker's name, start frame, and end frame.
+ *
+ * @param[in] animation Animation object.
+ *
+ * @return The list of marker. If there is no marker, return null.
+ *
+ * @ingroup Lottie_Animation
+ * @internal
+ * */
+RLOTTIE_API const LOTMarkerList* lottie_animation_get_markerlist(Lottie_Animation *animation);
+
+/**
+ * @brief Configures rlottie model cache policy.
+ *
+ * Provides Library level control to configure model cache
+ * policy. Setting it to 0 will disable
+ * the cache as well as flush all the previously cached content.
+ *
+ * @param[in] cacheSize Maximum Model Cache size.
+ *
+ * @note to disable Caching configure with 0 size.
+ * @note to flush the current Cache content configure it with 0 and
+ * then reconfigure with the new size.
+ *
+ * @internal
+ */
+RLOTTIE_API void lottie_configure_model_cache_size(size_t cacheSize);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_RLOTTIE_CAPI_H_
+
diff --git a/vendor/github.com/Benau/go_rlottie/rlottiecommon.h b/vendor/github.com/Benau/go_rlottie/rlottiecommon.h
new file mode 100644
index 00000000..784fbe28
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/rlottiecommon.h
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _RLOTTIE_COMMON_H_
+#define _RLOTTIE_COMMON_H_
+
+#if defined _WIN32 || defined __CYGWIN__
+ #ifdef RLOTTIE_BUILD
+ #define RLOTTIE_API __declspec(dllexport)
+ #else
+ #define RLOTTIE_API __declspec(dllimport)
+ #endif
+#else
+ #ifdef RLOTTIE_BUILD
+ #define RLOTTIE_API __attribute__ ((visibility ("default")))
+ #else
+ #define RLOTTIE_API
+ #endif
+#endif
+
+
+/**
+ * @defgroup Lottie_Animation Lottie_Animation
+ *
+ * Lottie Animation is a modern style vector based animation design. Its animation
+ * resource(within json format) could be generated by Adobe After Effect using
+ * bodymovin plugin. You can find a good examples in Lottie Community which
+ * shares many free resources(see: www.lottiefiles.com).
+ *
+ * This Lottie_Animation is a common engine to manipulate, control Lottie
+ * Animation from the Lottie resource - json file. It provides a scene-graph
+ * node tree per frames by user demand as well as rasterized frame images.
+ *
+ */
+
+/**
+ * @ingroup Lottie_Animation
+ */
+
+typedef enum
+{
+ BrushSolid = 0,
+ BrushGradient
+} LOTBrushType;
+
+typedef enum
+{
+ FillEvenOdd = 0,
+ FillWinding
+} LOTFillRule;
+
+typedef enum
+{
+ JoinMiter = 0,
+ JoinBevel,
+ JoinRound
+} LOTJoinStyle;
+
+typedef enum
+{
+ CapFlat = 0,
+ CapSquare,
+ CapRound
+} LOTCapStyle;
+
+typedef enum
+{
+ GradientLinear = 0,
+ GradientRadial
+} LOTGradientType;
+
+typedef struct LOTGradientStop
+{
+ float pos;
+ unsigned char r, g, b, a;
+} LOTGradientStop;
+
+typedef enum
+{
+ MaskAdd = 0,
+ MaskSubstract,
+ MaskIntersect,
+ MaskDifference
+} LOTMaskType;
+
+typedef struct LOTMask {
+ struct {
+ const float *ptPtr;
+ size_t ptCount;
+ const char* elmPtr;
+ size_t elmCount;
+ } mPath;
+ LOTMaskType mMode;
+ unsigned char mAlpha;
+}LOTMask;
+
+typedef enum
+{
+ MatteNone = 0,
+ MatteAlpha,
+ MatteAlphaInv,
+ MatteLuma,
+ MatteLumaInv
+} LOTMatteType;
+
+typedef struct LOTMarker {
+ char *name;
+ size_t startframe;
+ size_t endframe;
+} LOTMarker;
+
+typedef struct LOTMarkerList {
+ LOTMarker *ptr;
+ size_t size;
+} LOTMarkerList;
+
+typedef struct LOTNode {
+
+#define ChangeFlagNone 0x0000
+#define ChangeFlagPath 0x0001
+#define ChangeFlagPaint 0x0010
+#define ChangeFlagAll (ChangeFlagPath & ChangeFlagPaint)
+
+ struct {
+ const float *ptPtr;
+ size_t ptCount;
+ const char *elmPtr;
+ size_t elmCount;
+ } mPath;
+
+ struct {
+ unsigned char r, g, b, a;
+ } mColor;
+
+ struct {
+ unsigned char enable;
+ float width;
+ LOTCapStyle cap;
+ LOTJoinStyle join;
+ float miterLimit;
+ float *dashArray;
+ int dashArraySize;
+ } mStroke;
+
+ struct {
+ LOTGradientType type;
+ LOTGradientStop *stopPtr;
+ size_t stopCount;
+ struct {
+ float x, y;
+ } start, end, center, focal;
+ float cradius;
+ float fradius;
+ } mGradient;
+
+ struct {
+ unsigned char *data;
+ size_t width;
+ size_t height;
+ unsigned char mAlpha;
+ struct {
+ float m11; float m12; float m13;
+ float m21; float m22; float m23;
+ float m31; float m32; float m33;
+ } mMatrix;
+ } mImageInfo;
+
+ int mFlag;
+ LOTBrushType mBrushType;
+ LOTFillRule mFillRule;
+
+ const char *keypath;
+} LOTNode;
+
+
+
+typedef struct LOTLayerNode {
+
+ struct {
+ LOTMask *ptr;
+ size_t size;
+ } mMaskList;
+
+ struct {
+ const float *ptPtr;
+ size_t ptCount;
+ const char *elmPtr;
+ size_t elmCount;
+ } mClipPath;
+
+ struct {
+ struct LOTLayerNode **ptr;
+ size_t size;
+ } mLayerList;
+
+ struct {
+ LOTNode **ptr;
+ size_t size;
+ } mNodeList;
+
+ LOTMatteType mMatte;
+ int mVisible;
+ unsigned char mAlpha;
+ const char *keypath;
+
+} LOTLayerNode;
+
+/**
+ * @}
+ */
+
+#endif // _RLOTTIE_COMMON_H_
diff --git a/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_math.cpp b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_math.cpp
new file mode 100644
index 00000000..932f7ed4
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_math.cpp
@@ -0,0 +1,461 @@
+/***************************************************************************/
+/* */
+/* fttrigon.c */
+/* */
+/* FreeType trigonometric functions (body). */
+/* */
+/* Copyright 2001-2005, 2012-2013 by */
+/* David Turner, Robert Wilhelm, and Werner Lemberg. */
+/* */
+/* This file is part of the FreeType project, and may only be used, */
+/* modified, and distributed under the terms of the FreeType project */
+/* license, LICENSE.TXT. By continuing to use, modify, or distribute */
+/* this file you indicate that you have read the license and */
+/* understand and accept it fully. */
+/* */
+/***************************************************************************/
+
+#include "vector_freetype_v_ft_math.h"
+#include <math.h>
+
+//form https://github.com/chromium/chromium/blob/59afd8336009c9d97c22854c52e0382b62b3aa5e/third_party/abseil-cpp/absl/base/internal/bits.h
+
+#if defined(_MSC_VER)
+#include <intrin.h>
+static unsigned int __inline clz(unsigned int x) {
+ unsigned long r = 0;
+ if (x != 0)
+ {
+ _BitScanReverse(&r, x);
+ }
+ return r;
+}
+#define SW_FT_MSB(x) (clz(x))
+#elif defined(__GNUC__)
+#define SW_FT_MSB(x) (31 - __builtin_clz(x))
+#else
+static unsigned int __inline clz(unsigned int x) {
+ int c = 31;
+ x &= ~x + 1;
+ if (n & 0x0000FFFF) c -= 16;
+ if (n & 0x00FF00FF) c -= 8;
+ if (n & 0x0F0F0F0F) c -= 4;
+ if (n & 0x33333333) c -= 2;
+ if (n & 0x55555555) c -= 1;
+ return c;
+}
+#define SW_FT_MSB(x) (clz(x))
+#endif
+
+
+
+
+
+#define SW_FT_PAD_FLOOR(x, n) ((x) & ~((n)-1))
+#define SW_FT_PAD_ROUND(x, n) SW_FT_PAD_FLOOR((x) + ((n) / 2), n)
+#define SW_FT_PAD_CEIL(x, n) SW_FT_PAD_FLOOR((x) + ((n)-1), n)
+
+#define SW_FT_BEGIN_STMNT do {
+#define SW_FT_END_STMNT \
+ } \
+ while (0)
+/* transfer sign leaving a positive number */
+#define SW_FT_MOVE_SIGN(x, s) \
+ SW_FT_BEGIN_STMNT \
+ if (x < 0) { \
+ x = -x; \
+ s = -s; \
+ } \
+ SW_FT_END_STMNT
+
+SW_FT_Long SW_FT_MulFix(SW_FT_Long a, SW_FT_Long b)
+{
+ SW_FT_Int s = 1;
+ SW_FT_Long c;
+
+ SW_FT_MOVE_SIGN(a, s);
+ SW_FT_MOVE_SIGN(b, s);
+
+ c = (SW_FT_Long)(((SW_FT_Int64)a * b + 0x8000L) >> 16);
+
+ return (s > 0) ? c : -c;
+}
+
+SW_FT_Long SW_FT_MulDiv(SW_FT_Long a, SW_FT_Long b, SW_FT_Long c)
+{
+ SW_FT_Int s = 1;
+ SW_FT_Long d;
+
+ SW_FT_MOVE_SIGN(a, s);
+ SW_FT_MOVE_SIGN(b, s);
+ SW_FT_MOVE_SIGN(c, s);
+
+ d = (SW_FT_Long)(c > 0 ? ((SW_FT_Int64)a * b + (c >> 1)) / c : 0x7FFFFFFFL);
+
+ return (s > 0) ? d : -d;
+}
+
+SW_FT_Long SW_FT_DivFix(SW_FT_Long a, SW_FT_Long b)
+{
+ SW_FT_Int s = 1;
+ SW_FT_Long q;
+
+ SW_FT_MOVE_SIGN(a, s);
+ SW_FT_MOVE_SIGN(b, s);
+
+ q = (SW_FT_Long)(b > 0 ? (((SW_FT_UInt64)a << 16) + (b >> 1)) / b
+ : 0x7FFFFFFFL);
+
+ return (s < 0 ? -q : q);
+}
+
+/*************************************************************************/
+/* */
+/* This is a fixed-point CORDIC implementation of trigonometric */
+/* functions as well as transformations between Cartesian and polar */
+/* coordinates. The angles are represented as 16.16 fixed-point values */
+/* in degrees, i.e., the angular resolution is 2^-16 degrees. Note that */
+/* only vectors longer than 2^16*180/pi (or at least 22 bits) on a */
+/* discrete Cartesian grid can have the same or better angular */
+/* resolution. Therefore, to maintain this precision, some functions */
+/* require an interim upscaling of the vectors, whereas others operate */
+/* with 24-bit long vectors directly. */
+/* */
+/*************************************************************************/
+
+/* the Cordic shrink factor 0.858785336480436 * 2^32 */
+#define SW_FT_TRIG_SCALE 0xDBD95B16UL
+
+/* the highest bit in overflow-safe vector components, */
+/* MSB of 0.858785336480436 * sqrt(0.5) * 2^30 */
+#define SW_FT_TRIG_SAFE_MSB 29
+
+/* this table was generated for SW_FT_PI = 180L << 16, i.e. degrees */
+#define SW_FT_TRIG_MAX_ITERS 23
+
+static const SW_FT_Fixed ft_trig_arctan_table[] = {
+ 1740967L, 919879L, 466945L, 234379L, 117304L, 58666L, 29335L, 14668L,
+ 7334L, 3667L, 1833L, 917L, 458L, 229L, 115L, 57L,
+ 29L, 14L, 7L, 4L, 2L, 1L};
+
+/* multiply a given value by the CORDIC shrink factor */
+static SW_FT_Fixed ft_trig_downscale(SW_FT_Fixed val)
+{
+ SW_FT_Fixed s;
+ SW_FT_Int64 v;
+
+ s = val;
+ val = SW_FT_ABS(val);
+
+ v = (val * (SW_FT_Int64)SW_FT_TRIG_SCALE) + 0x100000000UL;
+ val = (SW_FT_Fixed)(v >> 32);
+
+ return (s >= 0) ? val : -val;
+}
+
+/* undefined and never called for zero vector */
+static SW_FT_Int ft_trig_prenorm(SW_FT_Vector* vec)
+{
+ SW_FT_Pos x, y;
+ SW_FT_Int shift;
+
+ x = vec->x;
+ y = vec->y;
+
+ shift = SW_FT_MSB(SW_FT_ABS(x) | SW_FT_ABS(y));
+
+ if (shift <= SW_FT_TRIG_SAFE_MSB) {
+ shift = SW_FT_TRIG_SAFE_MSB - shift;
+ vec->x = (SW_FT_Pos)((SW_FT_ULong)x << shift);
+ vec->y = (SW_FT_Pos)((SW_FT_ULong)y << shift);
+ } else {
+ shift -= SW_FT_TRIG_SAFE_MSB;
+ vec->x = x >> shift;
+ vec->y = y >> shift;
+ shift = -shift;
+ }
+
+ return shift;
+}
+
+static void ft_trig_pseudo_rotate(SW_FT_Vector* vec, SW_FT_Angle theta)
+{
+ SW_FT_Int i;
+ SW_FT_Fixed x, y, xtemp, b;
+ const SW_FT_Fixed* arctanptr;
+
+ x = vec->x;
+ y = vec->y;
+
+ /* Rotate inside [-PI/4,PI/4] sector */
+ while (theta < -SW_FT_ANGLE_PI4) {
+ xtemp = y;
+ y = -x;
+ x = xtemp;
+ theta += SW_FT_ANGLE_PI2;
+ }
+
+ while (theta > SW_FT_ANGLE_PI4) {
+ xtemp = -y;
+ y = x;
+ x = xtemp;
+ theta -= SW_FT_ANGLE_PI2;
+ }
+
+ arctanptr = ft_trig_arctan_table;
+
+ /* Pseudorotations, with right shifts */
+ for (i = 1, b = 1; i < SW_FT_TRIG_MAX_ITERS; b <<= 1, i++) {
+ SW_FT_Fixed v1 = ((y + b) >> i);
+ SW_FT_Fixed v2 = ((x + b) >> i);
+ if (theta < 0) {
+ xtemp = x + v1;
+ y = y - v2;
+ x = xtemp;
+ theta += *arctanptr++;
+ } else {
+ xtemp = x - v1;
+ y = y + v2;
+ x = xtemp;
+ theta -= *arctanptr++;
+ }
+ }
+
+ vec->x = x;
+ vec->y = y;
+}
+
+static void ft_trig_pseudo_polarize(SW_FT_Vector* vec)
+{
+ SW_FT_Angle theta;
+ SW_FT_Int i;
+ SW_FT_Fixed x, y, xtemp, b;
+ const SW_FT_Fixed* arctanptr;
+
+ x = vec->x;
+ y = vec->y;
+
+ /* Get the vector into [-PI/4,PI/4] sector */
+ if (y > x) {
+ if (y > -x) {
+ theta = SW_FT_ANGLE_PI2;
+ xtemp = y;
+ y = -x;
+ x = xtemp;
+ } else {
+ theta = y > 0 ? SW_FT_ANGLE_PI : -SW_FT_ANGLE_PI;
+ x = -x;
+ y = -y;
+ }
+ } else {
+ if (y < -x) {
+ theta = -SW_FT_ANGLE_PI2;
+ xtemp = -y;
+ y = x;
+ x = xtemp;
+ } else {
+ theta = 0;
+ }
+ }
+
+ arctanptr = ft_trig_arctan_table;
+
+ /* Pseudorotations, with right shifts */
+ for (i = 1, b = 1; i < SW_FT_TRIG_MAX_ITERS; b <<= 1, i++) {
+ SW_FT_Fixed v1 = ((y + b) >> i);
+ SW_FT_Fixed v2 = ((x + b) >> i);
+ if (y > 0) {
+ xtemp = x + v1;
+ y = y - v2;
+ x = xtemp;
+ theta += *arctanptr++;
+ } else {
+ xtemp = x - v1;
+ y = y + v2;
+ x = xtemp;
+ theta -= *arctanptr++;
+ }
+ }
+
+ /* round theta */
+ if (theta >= 0)
+ theta = SW_FT_PAD_ROUND(theta, 32);
+ else
+ theta = -SW_FT_PAD_ROUND(-theta, 32);
+
+ vec->x = x;
+ vec->y = theta;
+}
+
+/* documentation is in fttrigon.h */
+
+SW_FT_Fixed SW_FT_Cos(SW_FT_Angle angle)
+{
+ SW_FT_Vector v;
+
+ v.x = SW_FT_TRIG_SCALE >> 8;
+ v.y = 0;
+ ft_trig_pseudo_rotate(&v, angle);
+
+ return (v.x + 0x80L) >> 8;
+}
+
+/* documentation is in fttrigon.h */
+
+SW_FT_Fixed SW_FT_Sin(SW_FT_Angle angle)
+{
+ return SW_FT_Cos(SW_FT_ANGLE_PI2 - angle);
+}
+
+/* documentation is in fttrigon.h */
+
+SW_FT_Fixed SW_FT_Tan(SW_FT_Angle angle)
+{
+ SW_FT_Vector v;
+
+ v.x = SW_FT_TRIG_SCALE >> 8;
+ v.y = 0;
+ ft_trig_pseudo_rotate(&v, angle);
+
+ return SW_FT_DivFix(v.y, v.x);
+}
+
+/* documentation is in fttrigon.h */
+
+SW_FT_Angle SW_FT_Atan2(SW_FT_Fixed dx, SW_FT_Fixed dy)
+{
+ SW_FT_Vector v;
+
+ if (dx == 0 && dy == 0) return 0;
+
+ v.x = dx;
+ v.y = dy;
+ ft_trig_prenorm(&v);
+ ft_trig_pseudo_polarize(&v);
+
+ return v.y;
+}
+
+/* documentation is in fttrigon.h */
+
+void SW_FT_Vector_Unit(SW_FT_Vector* vec, SW_FT_Angle angle)
+{
+ vec->x = SW_FT_TRIG_SCALE >> 8;
+ vec->y = 0;
+ ft_trig_pseudo_rotate(vec, angle);
+ vec->x = (vec->x + 0x80L) >> 8;
+ vec->y = (vec->y + 0x80L) >> 8;
+}
+
+/* these macros return 0 for positive numbers,
+ and -1 for negative ones */
+#define SW_FT_SIGN_LONG(x) ((x) >> (SW_FT_SIZEOF_LONG * 8 - 1))
+#define SW_FT_SIGN_INT(x) ((x) >> (SW_FT_SIZEOF_INT * 8 - 1))
+#define SW_FT_SIGN_INT32(x) ((x) >> 31)
+#define SW_FT_SIGN_INT16(x) ((x) >> 15)
+
+/* documentation is in fttrigon.h */
+
+void SW_FT_Vector_Rotate(SW_FT_Vector* vec, SW_FT_Angle angle)
+{
+ SW_FT_Int shift;
+ SW_FT_Vector v;
+
+ v.x = vec->x;
+ v.y = vec->y;
+
+ if (angle && (v.x != 0 || v.y != 0)) {
+ shift = ft_trig_prenorm(&v);
+ ft_trig_pseudo_rotate(&v, angle);
+ v.x = ft_trig_downscale(v.x);
+ v.y = ft_trig_downscale(v.y);
+
+ if (shift > 0) {
+ SW_FT_Int32 half = (SW_FT_Int32)1L << (shift - 1);
+
+ vec->x = (v.x + half + SW_FT_SIGN_LONG(v.x)) >> shift;
+ vec->y = (v.y + half + SW_FT_SIGN_LONG(v.y)) >> shift;
+ } else {
+ shift = -shift;
+ vec->x = (SW_FT_Pos)((SW_FT_ULong)v.x << shift);
+ vec->y = (SW_FT_Pos)((SW_FT_ULong)v.y << shift);
+ }
+ }
+}
+
+/* documentation is in fttrigon.h */
+
+SW_FT_Fixed SW_FT_Vector_Length(SW_FT_Vector* vec)
+{
+ SW_FT_Int shift;
+ SW_FT_Vector v;
+
+ v = *vec;
+
+ /* handle trivial cases */
+ if (v.x == 0) {
+ return SW_FT_ABS(v.y);
+ } else if (v.y == 0) {
+ return SW_FT_ABS(v.x);
+ }
+
+ /* general case */
+ shift = ft_trig_prenorm(&v);
+ ft_trig_pseudo_polarize(&v);
+
+ v.x = ft_trig_downscale(v.x);
+
+ if (shift > 0) return (v.x + (1 << (shift - 1))) >> shift;
+
+ return (SW_FT_Fixed)((SW_FT_UInt32)v.x << -shift);
+}
+
+/* documentation is in fttrigon.h */
+
+void SW_FT_Vector_Polarize(SW_FT_Vector* vec, SW_FT_Fixed* length,
+ SW_FT_Angle* angle)
+{
+ SW_FT_Int shift;
+ SW_FT_Vector v;
+
+ v = *vec;
+
+ if (v.x == 0 && v.y == 0) return;
+
+ shift = ft_trig_prenorm(&v);
+ ft_trig_pseudo_polarize(&v);
+
+ v.x = ft_trig_downscale(v.x);
+
+ *length = (shift >= 0) ? (v.x >> shift)
+ : (SW_FT_Fixed)((SW_FT_UInt32)v.x << -shift);
+ *angle = v.y;
+}
+
+/* documentation is in fttrigon.h */
+
+void SW_FT_Vector_From_Polar(SW_FT_Vector* vec, SW_FT_Fixed length,
+ SW_FT_Angle angle)
+{
+ vec->x = length;
+ vec->y = 0;
+
+ SW_FT_Vector_Rotate(vec, angle);
+}
+
+/* documentation is in fttrigon.h */
+
+SW_FT_Angle SW_FT_Angle_Diff( SW_FT_Angle angle1, SW_FT_Angle angle2 )
+{
+ SW_FT_Angle delta = angle2 - angle1;
+
+ while ( delta <= -SW_FT_ANGLE_PI )
+ delta += SW_FT_ANGLE_2PI;
+
+ while ( delta > SW_FT_ANGLE_PI )
+ delta -= SW_FT_ANGLE_2PI;
+
+ return delta;
+}
+
+/* END */
diff --git a/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_math.h b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_math.h
new file mode 100644
index 00000000..0405c05b
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_math.h
@@ -0,0 +1,438 @@
+#ifndef V_FT_MATH_H
+#define V_FT_MATH_H
+
+/***************************************************************************/
+/* */
+/* fttrigon.h */
+/* */
+/* FreeType trigonometric functions (specification). */
+/* */
+/* Copyright 2001, 2003, 2005, 2007, 2013 by */
+/* David Turner, Robert Wilhelm, and Werner Lemberg. */
+/* */
+/* This file is part of the FreeType project, and may only be used, */
+/* modified, and distributed under the terms of the FreeType project */
+/* license, LICENSE.TXT. By continuing to use, modify, or distribute */
+/* this file you indicate that you have read the license and */
+/* understand and accept it fully. */
+/* */
+/***************************************************************************/
+
+#include "vector_freetype_v_ft_types.h"
+
+
+/*************************************************************************/
+/* */
+/* The min and max functions missing in C. As usual, be careful not to */
+/* write things like SW_FT_MIN( a++, b++ ) to avoid side effects. */
+/* */
+#define SW_FT_MIN( a, b ) ( (a) < (b) ? (a) : (b) )
+#define SW_FT_MAX( a, b ) ( (a) > (b) ? (a) : (b) )
+
+#define SW_FT_ABS( a ) ( (a) < 0 ? -(a) : (a) )
+
+/*
+ * Approximate sqrt(x*x+y*y) using the `alpha max plus beta min'
+ * algorithm. We use alpha = 1, beta = 3/8, giving us results with a
+ * largest error less than 7% compared to the exact value.
+ */
+#define SW_FT_HYPOT( x, y ) \
+ ( x = SW_FT_ABS( x ), \
+ y = SW_FT_ABS( y ), \
+ x > y ? x + ( 3 * y >> 3 ) \
+ : y + ( 3 * x >> 3 ) )
+
+/*************************************************************************/
+/* */
+/* <Function> */
+/* SW_FT_MulFix */
+/* */
+/* <Description> */
+/* A very simple function used to perform the computation */
+/* `(a*b)/0x10000' with maximum accuracy. Most of the time this is */
+/* used to multiply a given value by a 16.16 fixed-point factor. */
+/* */
+/* <Input> */
+/* a :: The first multiplier. */
+/* b :: The second multiplier. Use a 16.16 factor here whenever */
+/* possible (see note below). */
+/* */
+/* <Return> */
+/* The result of `(a*b)/0x10000'. */
+/* */
+/* <Note> */
+/* This function has been optimized for the case where the absolute */
+/* value of `a' is less than 2048, and `b' is a 16.16 scaling factor. */
+/* As this happens mainly when scaling from notional units to */
+/* fractional pixels in FreeType, it resulted in noticeable speed */
+/* improvements between versions 2.x and 1.x. */
+/* */
+/* As a conclusion, always try to place a 16.16 factor as the */
+/* _second_ argument of this function; this can make a great */
+/* difference. */
+/* */
+SW_FT_Long
+SW_FT_MulFix( SW_FT_Long a,
+ SW_FT_Long b );
+
+/*************************************************************************/
+/* */
+/* <Function> */
+/* SW_FT_MulDiv */
+/* */
+/* <Description> */
+/* A very simple function used to perform the computation `(a*b)/c' */
+/* with maximum accuracy (it uses a 64-bit intermediate integer */
+/* whenever necessary). */
+/* */
+/* This function isn't necessarily as fast as some processor specific */
+/* operations, but is at least completely portable. */
+/* */
+/* <Input> */
+/* a :: The first multiplier. */
+/* b :: The second multiplier. */
+/* c :: The divisor. */
+/* */
+/* <Return> */
+/* The result of `(a*b)/c'. This function never traps when trying to */
+/* divide by zero; it simply returns `MaxInt' or `MinInt' depending */
+/* on the signs of `a' and `b'. */
+/* */
+SW_FT_Long
+SW_FT_MulDiv( SW_FT_Long a,
+ SW_FT_Long b,
+ SW_FT_Long c );
+
+/*************************************************************************/
+/* */
+/* <Function> */
+/* SW_FT_DivFix */
+/* */
+/* <Description> */
+/* A very simple function used to perform the computation */
+/* `(a*0x10000)/b' with maximum accuracy. Most of the time, this is */
+/* used to divide a given value by a 16.16 fixed-point factor. */
+/* */
+/* <Input> */
+/* a :: The numerator. */
+/* b :: The denominator. Use a 16.16 factor here. */
+/* */
+/* <Return> */
+/* The result of `(a*0x10000)/b'. */
+/* */
+SW_FT_Long
+SW_FT_DivFix( SW_FT_Long a,
+ SW_FT_Long b );
+
+
+
+ /*************************************************************************/
+ /* */
+ /* <Section> */
+ /* computations */
+ /* */
+ /*************************************************************************/
+
+
+ /*************************************************************************
+ *
+ * @type:
+ * SW_FT_Angle
+ *
+ * @description:
+ * This type is used to model angle values in FreeType. Note that the
+ * angle is a 16.16 fixed-point value expressed in degrees.
+ *
+ */
+ typedef SW_FT_Fixed SW_FT_Angle;
+
+
+ /*************************************************************************
+ *
+ * @macro:
+ * SW_FT_ANGLE_PI
+ *
+ * @description:
+ * The angle pi expressed in @SW_FT_Angle units.
+ *
+ */
+#define SW_FT_ANGLE_PI ( 180L << 16 )
+
+
+ /*************************************************************************
+ *
+ * @macro:
+ * SW_FT_ANGLE_2PI
+ *
+ * @description:
+ * The angle 2*pi expressed in @SW_FT_Angle units.
+ *
+ */
+#define SW_FT_ANGLE_2PI ( SW_FT_ANGLE_PI * 2 )
+
+
+ /*************************************************************************
+ *
+ * @macro:
+ * SW_FT_ANGLE_PI2
+ *
+ * @description:
+ * The angle pi/2 expressed in @SW_FT_Angle units.
+ *
+ */
+#define SW_FT_ANGLE_PI2 ( SW_FT_ANGLE_PI / 2 )
+
+
+ /*************************************************************************
+ *
+ * @macro:
+ * SW_FT_ANGLE_PI4
+ *
+ * @description:
+ * The angle pi/4 expressed in @SW_FT_Angle units.
+ *
+ */
+#define SW_FT_ANGLE_PI4 ( SW_FT_ANGLE_PI / 4 )
+
+
+ /*************************************************************************
+ *
+ * @function:
+ * SW_FT_Sin
+ *
+ * @description:
+ * Return the sinus of a given angle in fixed-point format.
+ *
+ * @input:
+ * angle ::
+ * The input angle.
+ *
+ * @return:
+ * The sinus value.
+ *
+ * @note:
+ * If you need both the sinus and cosinus for a given angle, use the
+ * function @SW_FT_Vector_Unit.
+ *
+ */
+ SW_FT_Fixed
+ SW_FT_Sin( SW_FT_Angle angle );
+
+
+ /*************************************************************************
+ *
+ * @function:
+ * SW_FT_Cos
+ *
+ * @description:
+ * Return the cosinus of a given angle in fixed-point format.
+ *
+ * @input:
+ * angle ::
+ * The input angle.
+ *
+ * @return:
+ * The cosinus value.
+ *
+ * @note:
+ * If you need both the sinus and cosinus for a given angle, use the
+ * function @SW_FT_Vector_Unit.
+ *
+ */
+ SW_FT_Fixed
+ SW_FT_Cos( SW_FT_Angle angle );
+
+
+ /*************************************************************************
+ *
+ * @function:
+ * SW_FT_Tan
+ *
+ * @description:
+ * Return the tangent of a given angle in fixed-point format.
+ *
+ * @input:
+ * angle ::
+ * The input angle.
+ *
+ * @return:
+ * The tangent value.
+ *
+ */
+ SW_FT_Fixed
+ SW_FT_Tan( SW_FT_Angle angle );
+
+
+ /*************************************************************************
+ *
+ * @function:
+ * SW_FT_Atan2
+ *
+ * @description:
+ * Return the arc-tangent corresponding to a given vector (x,y) in
+ * the 2d plane.
+ *
+ * @input:
+ * x ::
+ * The horizontal vector coordinate.
+ *
+ * y ::
+ * The vertical vector coordinate.
+ *
+ * @return:
+ * The arc-tangent value (i.e. angle).
+ *
+ */
+ SW_FT_Angle
+ SW_FT_Atan2( SW_FT_Fixed x,
+ SW_FT_Fixed y );
+
+
+ /*************************************************************************
+ *
+ * @function:
+ * SW_FT_Angle_Diff
+ *
+ * @description:
+ * Return the difference between two angles. The result is always
+ * constrained to the ]-PI..PI] interval.
+ *
+ * @input:
+ * angle1 ::
+ * First angle.
+ *
+ * angle2 ::
+ * Second angle.
+ *
+ * @return:
+ * Constrained value of `value2-value1'.
+ *
+ */
+ SW_FT_Angle
+ SW_FT_Angle_Diff( SW_FT_Angle angle1,
+ SW_FT_Angle angle2 );
+
+
+ /*************************************************************************
+ *
+ * @function:
+ * SW_FT_Vector_Unit
+ *
+ * @description:
+ * Return the unit vector corresponding to a given angle. After the
+ * call, the value of `vec.x' will be `sin(angle)', and the value of
+ * `vec.y' will be `cos(angle)'.
+ *
+ * This function is useful to retrieve both the sinus and cosinus of a
+ * given angle quickly.
+ *
+ * @output:
+ * vec ::
+ * The address of target vector.
+ *
+ * @input:
+ * angle ::
+ * The input angle.
+ *
+ */
+ void
+ SW_FT_Vector_Unit( SW_FT_Vector* vec,
+ SW_FT_Angle angle );
+
+
+ /*************************************************************************
+ *
+ * @function:
+ * SW_FT_Vector_Rotate
+ *
+ * @description:
+ * Rotate a vector by a given angle.
+ *
+ * @inout:
+ * vec ::
+ * The address of target vector.
+ *
+ * @input:
+ * angle ::
+ * The input angle.
+ *
+ */
+ void
+ SW_FT_Vector_Rotate( SW_FT_Vector* vec,
+ SW_FT_Angle angle );
+
+
+ /*************************************************************************
+ *
+ * @function:
+ * SW_FT_Vector_Length
+ *
+ * @description:
+ * Return the length of a given vector.
+ *
+ * @input:
+ * vec ::
+ * The address of target vector.
+ *
+ * @return:
+ * The vector length, expressed in the same units that the original
+ * vector coordinates.
+ *
+ */
+ SW_FT_Fixed
+ SW_FT_Vector_Length( SW_FT_Vector* vec );
+
+
+ /*************************************************************************
+ *
+ * @function:
+ * SW_FT_Vector_Polarize
+ *
+ * @description:
+ * Compute both the length and angle of a given vector.
+ *
+ * @input:
+ * vec ::
+ * The address of source vector.
+ *
+ * @output:
+ * length ::
+ * The vector length.
+ *
+ * angle ::
+ * The vector angle.
+ *
+ */
+ void
+ SW_FT_Vector_Polarize( SW_FT_Vector* vec,
+ SW_FT_Fixed *length,
+ SW_FT_Angle *angle );
+
+
+ /*************************************************************************
+ *
+ * @function:
+ * SW_FT_Vector_From_Polar
+ *
+ * @description:
+ * Compute vector coordinates from a length and angle.
+ *
+ * @output:
+ * vec ::
+ * The address of source vector.
+ *
+ * @input:
+ * length ::
+ * The vector length.
+ *
+ * angle ::
+ * The vector angle.
+ *
+ */
+ void
+ SW_FT_Vector_From_Polar( SW_FT_Vector* vec,
+ SW_FT_Fixed length,
+ SW_FT_Angle angle );
+
+
+#endif // V_FT_MATH_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_raster.cpp b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_raster.cpp
new file mode 100644
index 00000000..01f2f23b
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_raster.cpp
@@ -0,0 +1,1423 @@
+/***************************************************************************/
+/* */
+/* ftgrays.c */
+/* */
+/* A new `perfect' anti-aliasing renderer (body). */
+/* */
+/* Copyright 2000-2003, 2005-2014 by */
+/* David Turner, Robert Wilhelm, and Werner Lemberg. */
+/* */
+/* This file is part of the FreeType project, and may only be used, */
+/* modified, and distributed under the terms of the FreeType project */
+/* license, LICENSE.TXT. By continuing to use, modify, or distribute */
+/* this file you indicate that you have read the license and */
+/* understand and accept it fully. */
+/* */
+/***************************************************************************/
+
+/*************************************************************************/
+/* */
+/* This is a new anti-aliasing scan-converter for FreeType 2. The */
+/* algorithm used here is _very_ different from the one in the standard */
+/* `ftraster' module. Actually, `ftgrays' computes the _exact_ */
+/* coverage of the outline on each pixel cell. */
+/* */
+/* It is based on ideas that I initially found in Raph Levien's */
+/* excellent LibArt graphics library (see http://www.levien.com/libart */
+/* for more information, though the web pages do not tell anything */
+/* about the renderer; you'll have to dive into the source code to */
+/* understand how it works). */
+/* */
+/* Note, however, that this is a _very_ different implementation */
+/* compared to Raph's. Coverage information is stored in a very */
+/* different way, and I don't use sorted vector paths. Also, it doesn't */
+/* use floating point values. */
+/* */
+/* This renderer has the following advantages: */
+/* */
+/* - It doesn't need an intermediate bitmap. Instead, one can supply a */
+/* callback function that will be called by the renderer to draw gray */
+/* spans on any target surface. You can thus do direct composition on */
+/* any kind of bitmap, provided that you give the renderer the right */
+/* callback. */
+/* */
+/* - A perfect anti-aliaser, i.e., it computes the _exact_ coverage on */
+/* each pixel cell. */
+/* */
+/* - It performs a single pass on the outline (the `standard' FT2 */
+/* renderer makes two passes). */
+/* */
+/* - It can easily be modified to render to _any_ number of gray levels */
+/* cheaply. */
+/* */
+/* - For small (< 20) pixel sizes, it is faster than the standard */
+/* renderer. */
+/* */
+/*************************************************************************/
+
+#include "vector_freetype_v_ft_raster.h"
+#include "vector_freetype_v_ft_math.h"
+
+/* Auxiliary macros for token concatenation. */
+#define SW_FT_ERR_XCAT(x, y) x##y
+#define SW_FT_ERR_CAT(x, y) SW_FT_ERR_XCAT(x, y)
+
+#define SW_FT_BEGIN_STMNT do {
+#define SW_FT_END_STMNT \
+ } \
+ while (0)
+
+#include <limits.h>
+#include <setjmp.h>
+#include <stddef.h>
+#include <string.h>
+#define SW_FT_UINT_MAX UINT_MAX
+#define SW_FT_INT_MAX INT_MAX
+#define SW_FT_ULONG_MAX ULONG_MAX
+#define SW_FT_CHAR_BIT CHAR_BIT
+
+#define ft_memset memset
+
+#define ft_setjmp setjmp
+#define ft_longjmp longjmp
+#define ft_jmp_buf jmp_buf
+
+typedef ptrdiff_t SW_FT_PtrDist;
+
+#define ErrRaster_Invalid_Mode -2
+#define ErrRaster_Invalid_Outline -1
+#define ErrRaster_Invalid_Argument -3
+#define ErrRaster_Memory_Overflow -4
+
+#define SW_FT_BEGIN_HEADER
+#define SW_FT_END_HEADER
+
+/* This macro is used to indicate that a function parameter is unused. */
+/* Its purpose is simply to reduce compiler warnings. Note also that */
+/* simply defining it as `(void)x' doesn't avoid warnings with certain */
+/* ANSI compilers (e.g. LCC). */
+#define SW_FT_UNUSED(x) (x) = (x)
+
+#define SW_FT_THROW(e) SW_FT_ERR_CAT(ErrRaster_, e)
+
+/* The size in bytes of the render pool used by the scan-line converter */
+/* to do all of its work. */
+#define SW_FT_RENDER_POOL_SIZE 16384L
+
+typedef int (*SW_FT_Outline_MoveToFunc)(const SW_FT_Vector* to, void* user);
+
+#define SW_FT_Outline_MoveTo_Func SW_FT_Outline_MoveToFunc
+
+typedef int (*SW_FT_Outline_LineToFunc)(const SW_FT_Vector* to, void* user);
+
+#define SW_FT_Outline_LineTo_Func SW_FT_Outline_LineToFunc
+
+typedef int (*SW_FT_Outline_ConicToFunc)(const SW_FT_Vector* control,
+ const SW_FT_Vector* to, void* user);
+
+#define SW_FT_Outline_ConicTo_Func SW_FT_Outline_ConicToFunc
+
+typedef int (*SW_FT_Outline_CubicToFunc)(const SW_FT_Vector* control1,
+ const SW_FT_Vector* control2,
+ const SW_FT_Vector* to, void* user);
+
+#define SW_FT_Outline_CubicTo_Func SW_FT_Outline_CubicToFunc
+
+typedef struct SW_FT_Outline_Funcs_ {
+ SW_FT_Outline_MoveToFunc move_to;
+ SW_FT_Outline_LineToFunc line_to;
+ SW_FT_Outline_ConicToFunc conic_to;
+ SW_FT_Outline_CubicToFunc cubic_to;
+
+ int shift;
+ SW_FT_Pos delta;
+
+} SW_FT_Outline_Funcs;
+
+#define SW_FT_DEFINE_OUTLINE_FUNCS(class_, move_to_, line_to_, conic_to_, \
+ cubic_to_, shift_, delta_) \
+ static const SW_FT_Outline_Funcs class_ = {move_to_, line_to_, conic_to_, \
+ cubic_to_, shift_, delta_};
+
+#define SW_FT_DEFINE_RASTER_FUNCS(class_, raster_new_, raster_reset_, \
+ raster_render_, raster_done_) \
+ const SW_FT_Raster_Funcs class_ = {raster_new_, raster_reset_, \
+ raster_render_, raster_done_};
+
+#ifndef SW_FT_MEM_SET
+#define SW_FT_MEM_SET(d, s, c) ft_memset(d, s, c)
+#endif
+
+#ifndef SW_FT_MEM_ZERO
+#define SW_FT_MEM_ZERO(dest, count) SW_FT_MEM_SET(dest, 0, count)
+#endif
+
+/* as usual, for the speed hungry :-) */
+
+#undef RAS_ARG
+#undef RAS_ARG_
+#undef RAS_VAR
+#undef RAS_VAR_
+
+#ifndef SW_FT_STATIC_RASTER
+
+#define RAS_ARG gray_PWorker worker
+#define RAS_ARG_ gray_PWorker worker,
+
+#define RAS_VAR worker
+#define RAS_VAR_ worker,
+
+#else /* SW_FT_STATIC_RASTER */
+
+#define RAS_ARG /* empty */
+#define RAS_ARG_ /* empty */
+#define RAS_VAR /* empty */
+#define RAS_VAR_ /* empty */
+
+#endif /* SW_FT_STATIC_RASTER */
+
+/* must be at least 6 bits! */
+#define PIXEL_BITS 8
+
+#undef FLOOR
+#undef CEILING
+#undef TRUNC
+#undef SCALED
+
+#define ONE_PIXEL (1L << PIXEL_BITS)
+#define PIXEL_MASK (-1L << PIXEL_BITS)
+#define TRUNC(x) ((TCoord)((x) >> PIXEL_BITS))
+#define SUBPIXELS(x) ((TPos)(x) << PIXEL_BITS)
+#define FLOOR(x) ((x) & -ONE_PIXEL)
+#define CEILING(x) (((x) + ONE_PIXEL - 1) & -ONE_PIXEL)
+#define ROUND(x) (((x) + ONE_PIXEL / 2) & -ONE_PIXEL)
+
+#if PIXEL_BITS >= 6
+#define UPSCALE(x) ((x) << (PIXEL_BITS - 6))
+#define DOWNSCALE(x) ((x) >> (PIXEL_BITS - 6))
+#else
+#define UPSCALE(x) ((x) >> (6 - PIXEL_BITS))
+#define DOWNSCALE(x) ((x) << (6 - PIXEL_BITS))
+#endif
+
+/* Compute `dividend / divisor' and return both its quotient and */
+/* remainder, cast to a specific type. This macro also ensures that */
+/* the remainder is always positive. */
+#define SW_FT_DIV_MOD(type, dividend, divisor, quotient, remainder) \
+ SW_FT_BEGIN_STMNT(quotient) = (type)((dividend) / (divisor)); \
+ (remainder) = (type)((dividend) % (divisor)); \
+ if ((remainder) < 0) { \
+ (quotient)--; \
+ (remainder) += (type)(divisor); \
+ } \
+ SW_FT_END_STMNT
+
+#ifdef __arm__
+/* Work around a bug specific to GCC which make the compiler fail to */
+/* optimize a division and modulo operation on the same parameters */
+/* into a single call to `__aeabi_idivmod'. See */
+/* */
+/* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43721 */
+#undef SW_FT_DIV_MOD
+#define SW_FT_DIV_MOD(type, dividend, divisor, quotient, remainder) \
+ SW_FT_BEGIN_STMNT(quotient) = (type)((dividend) / (divisor)); \
+ (remainder) = (type)((dividend) - (quotient) * (divisor)); \
+ if ((remainder) < 0) { \
+ (quotient)--; \
+ (remainder) += (type)(divisor); \
+ } \
+ SW_FT_END_STMNT
+#endif /* __arm__ */
+
+/* These macros speed up repetitive divisions by replacing them */
+/* with multiplications and right shifts. */
+#define SW_FT_UDIVPREP(b) \
+ long b##_r = (long)(SW_FT_ULONG_MAX >> PIXEL_BITS) / (b)
+#define SW_FT_UDIV(a, b) \
+ (((unsigned long)(a) * (unsigned long)(b##_r)) >> \
+ (sizeof(long) * SW_FT_CHAR_BIT - PIXEL_BITS))
+
+/*************************************************************************/
+/* */
+/* TYPE DEFINITIONS */
+/* */
+
+/* don't change the following types to SW_FT_Int or SW_FT_Pos, since we might */
+/* need to define them to "float" or "double" when experimenting with */
+/* new algorithms */
+
+typedef long TCoord; /* integer scanline/pixel coordinate */
+typedef long TPos; /* sub-pixel coordinate */
+
+/* determine the type used to store cell areas. This normally takes at */
+/* least PIXEL_BITS*2 + 1 bits. On 16-bit systems, we need to use */
+/* `long' instead of `int', otherwise bad things happen */
+
+#if PIXEL_BITS <= 7
+
+typedef int TArea;
+
+#else /* PIXEL_BITS >= 8 */
+
+/* approximately determine the size of integers using an ANSI-C header */
+#if SW_FT_UINT_MAX == 0xFFFFU
+typedef long TArea;
+#else
+typedef int TArea;
+#endif
+
+#endif /* PIXEL_BITS >= 8 */
+
+/* maximum number of gray spans in a call to the span callback */
+#define SW_FT_MAX_GRAY_SPANS 256
+
+typedef struct TCell_* PCell;
+
+typedef struct TCell_ {
+ TPos x; /* same with gray_TWorker.ex */
+ TCoord cover; /* same with gray_TWorker.cover */
+ TArea area;
+ PCell next;
+
+} TCell;
+
+#if defined(_MSC_VER) /* Visual C++ (and Intel C++) */
+/* We disable the warning `structure was padded due to */
+/* __declspec(align())' in order to compile cleanly with */
+/* the maximum level of warnings. */
+#pragma warning(push)
+#pragma warning(disable : 4324)
+#endif /* _MSC_VER */
+
+typedef struct gray_TWorker_ {
+ TCoord ex, ey;
+ TPos min_ex, max_ex;
+ TPos min_ey, max_ey;
+ TPos count_ex, count_ey;
+
+ TArea area;
+ TCoord cover;
+ int invalid;
+
+ PCell cells;
+ SW_FT_PtrDist max_cells;
+ SW_FT_PtrDist num_cells;
+
+ TPos x, y;
+
+ SW_FT_Vector bez_stack[32 * 3 + 1];
+ int lev_stack[32];
+
+ SW_FT_Outline outline;
+ SW_FT_BBox clip_box;
+
+ int bound_left;
+ int bound_top;
+ int bound_right;
+ int bound_bottom;
+
+ SW_FT_Span gray_spans[SW_FT_MAX_GRAY_SPANS];
+ int num_gray_spans;
+
+ SW_FT_Raster_Span_Func render_span;
+ void* render_span_data;
+
+ int band_size;
+ int band_shoot;
+
+ ft_jmp_buf jump_buffer;
+
+ void* buffer;
+ long buffer_size;
+
+ PCell* ycells;
+ TPos ycount;
+
+} gray_TWorker, *gray_PWorker;
+
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+
+#ifndef SW_FT_STATIC_RASTER
+#define ras (*worker)
+#else
+static gray_TWorker ras;
+#endif
+
+typedef struct gray_TRaster_ {
+ void* memory;
+
+} gray_TRaster, *gray_PRaster;
+
+/*************************************************************************/
+/* */
+/* Initialize the cells table. */
+/* */
+static void gray_init_cells(RAS_ARG_ void* buffer, long byte_size)
+{
+ ras.buffer = buffer;
+ ras.buffer_size = byte_size;
+
+ ras.ycells = (PCell*)buffer;
+ ras.cells = NULL;
+ ras.max_cells = 0;
+ ras.num_cells = 0;
+ ras.area = 0;
+ ras.cover = 0;
+ ras.invalid = 1;
+
+ ras.bound_left = INT_MAX;
+ ras.bound_top = INT_MAX;
+ ras.bound_right = INT_MIN;
+ ras.bound_bottom = INT_MIN;
+}
+
+/*************************************************************************/
+/* */
+/* Compute the outline bounding box. */
+/* */
+static void gray_compute_cbox(RAS_ARG)
+{
+ SW_FT_Outline* outline = &ras.outline;
+ SW_FT_Vector* vec = outline->points;
+ SW_FT_Vector* limit = vec + outline->n_points;
+
+ if (outline->n_points <= 0) {
+ ras.min_ex = ras.max_ex = 0;
+ ras.min_ey = ras.max_ey = 0;
+ return;
+ }
+
+ ras.min_ex = ras.max_ex = vec->x;
+ ras.min_ey = ras.max_ey = vec->y;
+
+ vec++;
+
+ for (; vec < limit; vec++) {
+ TPos x = vec->x;
+ TPos y = vec->y;
+
+ if (x < ras.min_ex) ras.min_ex = x;
+ if (x > ras.max_ex) ras.max_ex = x;
+ if (y < ras.min_ey) ras.min_ey = y;
+ if (y > ras.max_ey) ras.max_ey = y;
+ }
+
+ /* truncate the bounding box to integer pixels */
+ ras.min_ex = ras.min_ex >> 6;
+ ras.min_ey = ras.min_ey >> 6;
+ ras.max_ex = (ras.max_ex + 63) >> 6;
+ ras.max_ey = (ras.max_ey + 63) >> 6;
+}
+
+/*************************************************************************/
+/* */
+/* Record the current cell in the table. */
+/* */
+static PCell gray_find_cell(RAS_ARG)
+{
+ PCell *pcell, cell;
+ TPos x = ras.ex;
+
+ if (x > ras.count_ex) x = ras.count_ex;
+
+ pcell = &ras.ycells[ras.ey];
+ for (;;) {
+ cell = *pcell;
+ if (cell == NULL || cell->x > x) break;
+
+ if (cell->x == x) goto Exit;
+
+ pcell = &cell->next;
+ }
+
+ if (ras.num_cells >= ras.max_cells) ft_longjmp(ras.jump_buffer, 1);
+
+ cell = ras.cells + ras.num_cells++;
+ cell->x = x;
+ cell->area = 0;
+ cell->cover = 0;
+
+ cell->next = *pcell;
+ *pcell = cell;
+
+Exit:
+ return cell;
+}
+
+static void gray_record_cell(RAS_ARG)
+{
+ if (ras.area | ras.cover) {
+ PCell cell = gray_find_cell(RAS_VAR);
+
+ cell->area += ras.area;
+ cell->cover += ras.cover;
+ }
+}
+
+/*************************************************************************/
+/* */
+/* Set the current cell to a new position. */
+/* */
+static void gray_set_cell(RAS_ARG_ TCoord ex, TCoord ey)
+{
+ /* Move the cell pointer to a new position. We set the `invalid' */
+ /* flag to indicate that the cell isn't part of those we're interested */
+ /* in during the render phase. This means that: */
+ /* */
+ /* . the new vertical position must be within min_ey..max_ey-1. */
+ /* . the new horizontal position must be strictly less than max_ex */
+ /* */
+ /* Note that if a cell is to the left of the clipping region, it is */
+ /* actually set to the (min_ex-1) horizontal position. */
+
+ /* All cells that are on the left of the clipping region go to the */
+ /* min_ex - 1 horizontal position. */
+ ey -= ras.min_ey;
+
+ if (ex > ras.max_ex) ex = ras.max_ex;
+
+ ex -= ras.min_ex;
+ if (ex < 0) ex = -1;
+
+ /* are we moving to a different cell ? */
+ if (ex != ras.ex || ey != ras.ey) {
+ /* record the current one if it is valid */
+ if (!ras.invalid) gray_record_cell(RAS_VAR);
+
+ ras.area = 0;
+ ras.cover = 0;
+ ras.ex = ex;
+ ras.ey = ey;
+ }
+
+ ras.invalid =
+ ((unsigned)ey >= (unsigned)ras.count_ey || ex >= ras.count_ex);
+}
+
+/*************************************************************************/
+/* */
+/* Start a new contour at a given cell. */
+/* */
+static void gray_start_cell(RAS_ARG_ TCoord ex, TCoord ey)
+{
+ if (ex > ras.max_ex) ex = (TCoord)(ras.max_ex);
+
+ if (ex < ras.min_ex) ex = (TCoord)(ras.min_ex - 1);
+
+ ras.area = 0;
+ ras.cover = 0;
+ ras.ex = ex - ras.min_ex;
+ ras.ey = ey - ras.min_ey;
+ ras.invalid = 0;
+
+ gray_set_cell(RAS_VAR_ ex, ey);
+}
+
+/*************************************************************************/
+/* */
+/* Render a straight line across multiple cells in any direction. */
+/* */
+static void gray_render_line(RAS_ARG_ TPos to_x, TPos to_y)
+{
+ TPos dx, dy, fx1, fy1, fx2, fy2;
+ TCoord ex1, ex2, ey1, ey2;
+
+ ex1 = TRUNC(ras.x);
+ ex2 = TRUNC(to_x);
+ ey1 = TRUNC(ras.y);
+ ey2 = TRUNC(to_y);
+
+ /* perform vertical clipping */
+ if ((ey1 >= ras.max_ey && ey2 >= ras.max_ey) ||
+ (ey1 < ras.min_ey && ey2 < ras.min_ey))
+ goto End;
+
+ dx = to_x - ras.x;
+ dy = to_y - ras.y;
+
+ fx1 = ras.x - SUBPIXELS(ex1);
+ fy1 = ras.y - SUBPIXELS(ey1);
+
+ if (ex1 == ex2 && ey1 == ey2) /* inside one cell */
+ ;
+ else if (dy == 0) /* ex1 != ex2 */ /* any horizontal line */
+ {
+ ex1 = ex2;
+ gray_set_cell(RAS_VAR_ ex1, ey1);
+ } else if (dx == 0) {
+ if (dy > 0) /* vertical line up */
+ do {
+ fy2 = ONE_PIXEL;
+ ras.cover += (fy2 - fy1);
+ ras.area += (fy2 - fy1) * fx1 * 2;
+ fy1 = 0;
+ ey1++;
+ gray_set_cell(RAS_VAR_ ex1, ey1);
+ } while (ey1 != ey2);
+ else /* vertical line down */
+ do {
+ fy2 = 0;
+ ras.cover += (fy2 - fy1);
+ ras.area += (fy2 - fy1) * fx1 * 2;
+ fy1 = ONE_PIXEL;
+ ey1--;
+ gray_set_cell(RAS_VAR_ ex1, ey1);
+ } while (ey1 != ey2);
+ } else /* any other line */
+ {
+ TArea prod = dx * fy1 - dy * fx1;
+ SW_FT_UDIVPREP(dx);
+ SW_FT_UDIVPREP(dy);
+
+ /* The fundamental value `prod' determines which side and the */
+ /* exact coordinate where the line exits current cell. It is */
+ /* also easily updated when moving from one cell to the next. */
+ do {
+ if (prod <= 0 && prod - dx * ONE_PIXEL > 0) /* left */
+ {
+ fx2 = 0;
+ fy2 = (TPos)SW_FT_UDIV(-prod, -dx);
+ prod -= dy * ONE_PIXEL;
+ ras.cover += (fy2 - fy1);
+ ras.area += (fy2 - fy1) * (fx1 + fx2);
+ fx1 = ONE_PIXEL;
+ fy1 = fy2;
+ ex1--;
+ } else if (prod - dx * ONE_PIXEL <= 0 &&
+ prod - dx * ONE_PIXEL + dy * ONE_PIXEL > 0) /* up */
+ {
+ prod -= dx * ONE_PIXEL;
+ fx2 = (TPos)SW_FT_UDIV(-prod, dy);
+ fy2 = ONE_PIXEL;
+ ras.cover += (fy2 - fy1);
+ ras.area += (fy2 - fy1) * (fx1 + fx2);
+ fx1 = fx2;
+ fy1 = 0;
+ ey1++;
+ } else if (prod - dx * ONE_PIXEL + dy * ONE_PIXEL <= 0 &&
+ prod + dy * ONE_PIXEL >= 0) /* right */
+ {
+ prod += dy * ONE_PIXEL;
+ fx2 = ONE_PIXEL;
+ fy2 = (TPos)SW_FT_UDIV(prod, dx);
+ ras.cover += (fy2 - fy1);
+ ras.area += (fy2 - fy1) * (fx1 + fx2);
+ fx1 = 0;
+ fy1 = fy2;
+ ex1++;
+ } else /* ( prod + dy * ONE_PIXEL < 0 &&
+ prod > 0 ) down */
+ {
+ fx2 = (TPos)SW_FT_UDIV(prod, -dy);
+ fy2 = 0;
+ prod += dx * ONE_PIXEL;
+ ras.cover += (fy2 - fy1);
+ ras.area += (fy2 - fy1) * (fx1 + fx2);
+ fx1 = fx2;
+ fy1 = ONE_PIXEL;
+ ey1--;
+ }
+
+ gray_set_cell(RAS_VAR_ ex1, ey1);
+ } while (ex1 != ex2 || ey1 != ey2);
+ }
+
+ fx2 = to_x - SUBPIXELS(ex2);
+ fy2 = to_y - SUBPIXELS(ey2);
+
+ ras.cover += (fy2 - fy1);
+ ras.area += (fy2 - fy1) * (fx1 + fx2);
+
+End:
+ ras.x = to_x;
+ ras.y = to_y;
+}
+
+static void gray_split_conic(SW_FT_Vector* base)
+{
+ TPos a, b;
+
+ base[4].x = base[2].x;
+ a = base[0].x + base[1].x;
+ b = base[1].x + base[2].x;
+ base[3].x = b >> 1;
+ base[2].x = ( a + b ) >> 2;
+ base[1].x = a >> 1;
+
+ base[4].y = base[2].y;
+ a = base[0].y + base[1].y;
+ b = base[1].y + base[2].y;
+ base[3].y = b >> 1;
+ base[2].y = ( a + b ) >> 2;
+ base[1].y = a >> 1;
+}
+
+static void gray_render_conic(RAS_ARG_ const SW_FT_Vector* control,
+ const SW_FT_Vector* to)
+{
+ TPos dx, dy;
+ TPos min, max, y;
+ int top, level;
+ int* levels;
+ SW_FT_Vector* arc;
+
+ levels = ras.lev_stack;
+
+ arc = ras.bez_stack;
+ arc[0].x = UPSCALE(to->x);
+ arc[0].y = UPSCALE(to->y);
+ arc[1].x = UPSCALE(control->x);
+ arc[1].y = UPSCALE(control->y);
+ arc[2].x = ras.x;
+ arc[2].y = ras.y;
+ top = 0;
+
+ dx = SW_FT_ABS(arc[2].x + arc[0].x - 2 * arc[1].x);
+ dy = SW_FT_ABS(arc[2].y + arc[0].y - 2 * arc[1].y);
+ if (dx < dy) dx = dy;
+
+ if (dx < ONE_PIXEL / 4) goto Draw;
+
+ /* short-cut the arc that crosses the current band */
+ min = max = arc[0].y;
+
+ y = arc[1].y;
+ if (y < min) min = y;
+ if (y > max) max = y;
+
+ y = arc[2].y;
+ if (y < min) min = y;
+ if (y > max) max = y;
+
+ if (TRUNC(min) >= ras.max_ey || TRUNC(max) < ras.min_ey) goto Draw;
+
+ level = 0;
+ do {
+ dx >>= 2;
+ level++;
+ } while (dx > ONE_PIXEL / 4);
+
+ levels[0] = level;
+
+ do {
+ level = levels[top];
+ if (level > 0) {
+ gray_split_conic(arc);
+ arc += 2;
+ top++;
+ levels[top] = levels[top - 1] = level - 1;
+ continue;
+ }
+
+ Draw:
+ gray_render_line(RAS_VAR_ arc[0].x, arc[0].y);
+ top--;
+ arc -= 2;
+
+ } while (top >= 0);
+}
+
+static void gray_split_cubic(SW_FT_Vector* base)
+{
+ TPos a, b, c;
+
+
+ base[6].x = base[3].x;
+ a = base[0].x + base[1].x;
+ b = base[1].x + base[2].x;
+ c = base[2].x + base[3].x;
+ base[5].x = c >> 1;
+ c += b;
+ base[4].x = c >> 2;
+ base[1].x = a >> 1;
+ a += b;
+ base[2].x = a >> 2;
+ base[3].x = ( a + c ) >> 3;
+
+ base[6].y = base[3].y;
+ a = base[0].y + base[1].y;
+ b = base[1].y + base[2].y;
+ c = base[2].y + base[3].y;
+ base[5].y = c >> 1;
+ c += b;
+ base[4].y = c >> 2;
+ base[1].y = a >> 1;
+ a += b;
+ base[2].y = a >> 2;
+ base[3].y = ( a + c ) >> 3;
+}
+
+
+static void
+gray_render_cubic(RAS_ARG_ const SW_FT_Vector* control1,
+ const SW_FT_Vector* control2,
+ const SW_FT_Vector* to)
+{
+ SW_FT_Vector* arc = ras.bez_stack;
+
+ arc[0].x = UPSCALE( to->x );
+ arc[0].y = UPSCALE( to->y );
+ arc[1].x = UPSCALE( control2->x );
+ arc[1].y = UPSCALE( control2->y );
+ arc[2].x = UPSCALE( control1->x );
+ arc[2].y = UPSCALE( control1->y );
+ arc[3].x = ras.x;
+ arc[3].y = ras.y;
+
+ /* short-cut the arc that crosses the current band */
+ if ( ( TRUNC( arc[0].y ) >= ras.max_ey &&
+ TRUNC( arc[1].y ) >= ras.max_ey &&
+ TRUNC( arc[2].y ) >= ras.max_ey &&
+ TRUNC( arc[3].y ) >= ras.max_ey ) ||
+ ( TRUNC( arc[0].y ) < ras.min_ey &&
+ TRUNC( arc[1].y ) < ras.min_ey &&
+ TRUNC( arc[2].y ) < ras.min_ey &&
+ TRUNC( arc[3].y ) < ras.min_ey ) )
+ {
+ ras.x = arc[0].x;
+ ras.y = arc[0].y;
+ return;
+ }
+
+ for (;;)
+ {
+ /* with each split, control points quickly converge towards */
+ /* chord trisection points and the vanishing distances below */
+ /* indicate when the segment is flat enough to draw */
+ if ( SW_FT_ABS( 2 * arc[0].x - 3 * arc[1].x + arc[3].x ) > ONE_PIXEL / 2 ||
+ SW_FT_ABS( 2 * arc[0].y - 3 * arc[1].y + arc[3].y ) > ONE_PIXEL / 2 ||
+ SW_FT_ABS( arc[0].x - 3 * arc[2].x + 2 * arc[3].x ) > ONE_PIXEL / 2 ||
+ SW_FT_ABS( arc[0].y - 3 * arc[2].y + 2 * arc[3].y ) > ONE_PIXEL / 2 )
+ goto Split;
+
+ gray_render_line( RAS_VAR_ arc[0].x, arc[0].y );
+
+ if ( arc == ras.bez_stack )
+ return;
+
+ arc -= 3;
+ continue;
+
+ Split:
+ gray_split_cubic( arc );
+ arc += 3;
+ }
+}
+
+static int gray_move_to(const SW_FT_Vector* to, gray_PWorker worker)
+{
+ TPos x, y;
+
+ /* record current cell, if any */
+ if (!ras.invalid) gray_record_cell(RAS_VAR);
+
+ /* start to a new position */
+ x = UPSCALE(to->x);
+ y = UPSCALE(to->y);
+
+ gray_start_cell(RAS_VAR_ TRUNC(x), TRUNC(y));
+
+ worker->x = x;
+ worker->y = y;
+ return 0;
+}
+
+static int gray_line_to(const SW_FT_Vector* to, gray_PWorker worker)
+{
+ gray_render_line(RAS_VAR_ UPSCALE(to->x), UPSCALE(to->y));
+ return 0;
+}
+
+static int gray_conic_to(const SW_FT_Vector* control, const SW_FT_Vector* to,
+ gray_PWorker worker)
+{
+ gray_render_conic(RAS_VAR_ control, to);
+ return 0;
+}
+
+static int gray_cubic_to(const SW_FT_Vector* control1,
+ const SW_FT_Vector* control2, const SW_FT_Vector* to,
+ gray_PWorker worker)
+{
+ gray_render_cubic(RAS_VAR_ control1, control2, to);
+ return 0;
+}
+
+static void gray_hline(RAS_ARG_ TCoord x, TCoord y, TPos area, TCoord acount)
+{
+ int coverage;
+
+ /* compute the coverage line's coverage, depending on the */
+ /* outline fill rule */
+ /* */
+ /* the coverage percentage is area/(PIXEL_BITS*PIXEL_BITS*2) */
+ /* */
+ coverage = (int)(area >> (PIXEL_BITS * 2 + 1 - 8));
+ /* use range 0..256 */
+ if (coverage < 0) coverage = -coverage;
+
+ if (ras.outline.flags & SW_FT_OUTLINE_EVEN_ODD_FILL) {
+ coverage &= 511;
+
+ if (coverage > 256)
+ coverage = 512 - coverage;
+ else if (coverage == 256)
+ coverage = 255;
+ } else {
+ /* normal non-zero winding rule */
+ if (coverage >= 256) coverage = 255;
+ }
+
+ y += (TCoord)ras.min_ey;
+ x += (TCoord)ras.min_ex;
+
+ /* SW_FT_Span.x is a 16-bit short, so limit our coordinates appropriately */
+ if (x >= 32767) x = 32767;
+
+ /* SW_FT_Span.y is an integer, so limit our coordinates appropriately */
+ if (y >= SW_FT_INT_MAX) y = SW_FT_INT_MAX;
+
+ if (coverage) {
+ SW_FT_Span* span;
+ int count;
+
+ // update bounding box.
+ if (x < ras.bound_left) ras.bound_left = x;
+ if (y < ras.bound_top) ras.bound_top = y;
+ if (y > ras.bound_bottom) ras.bound_bottom = y;
+ if (x + acount > ras.bound_right) ras.bound_right = x + acount;
+
+ /* see whether we can add this span to the current list */
+ count = ras.num_gray_spans;
+ span = ras.gray_spans + count - 1;
+ if (count > 0 && span->y == y && (int)span->x + span->len == (int)x &&
+ span->coverage == coverage) {
+ span->len = (unsigned short)(span->len + acount);
+ return;
+ }
+
+ if (count >= SW_FT_MAX_GRAY_SPANS) {
+ if (ras.render_span && count > 0)
+ ras.render_span(count, ras.gray_spans, ras.render_span_data);
+
+#ifdef DEBUG_GRAYS
+
+ if (1) {
+ int n;
+
+ fprintf(stderr, "count = %3d ", count);
+ span = ras.gray_spans;
+ for (n = 0; n < count; n++, span++)
+ fprintf(stderr, "[%d , %d..%d] : %d ", span->y, span->x,
+ span->x + span->len - 1, span->coverage);
+ fprintf(stderr, "\n");
+ }
+
+#endif /* DEBUG_GRAYS */
+
+ ras.num_gray_spans = 0;
+
+ span = ras.gray_spans;
+ } else
+ span++;
+
+ /* add a gray span to the current list */
+ span->x = (short)x;
+ span->y = (short)y;
+ span->len = (unsigned short)acount;
+ span->coverage = (unsigned char)coverage;
+
+ ras.num_gray_spans++;
+ }
+}
+
+static void gray_sweep(RAS_ARG)
+{
+ int yindex;
+
+ if (ras.num_cells == 0) return;
+
+ ras.num_gray_spans = 0;
+
+ for (yindex = 0; yindex < ras.ycount; yindex++) {
+ PCell cell = ras.ycells[yindex];
+ TCoord cover = 0;
+ TCoord x = 0;
+
+ for (; cell != NULL; cell = cell->next) {
+ TPos area;
+
+ if (cell->x > x && cover != 0)
+ gray_hline(RAS_VAR_ x, yindex, cover * (ONE_PIXEL * 2),
+ cell->x - x);
+
+ cover += cell->cover;
+ area = cover * (ONE_PIXEL * 2) - cell->area;
+
+ if (area != 0 && cell->x >= 0)
+ gray_hline(RAS_VAR_ cell->x, yindex, area, 1);
+
+ x = cell->x + 1;
+ }
+
+ if (cover != 0)
+ gray_hline(RAS_VAR_ x, yindex, cover * (ONE_PIXEL * 2),
+ ras.count_ex - x);
+ }
+
+ if (ras.render_span && ras.num_gray_spans > 0)
+ ras.render_span(ras.num_gray_spans, ras.gray_spans,
+ ras.render_span_data);
+}
+
+/*************************************************************************/
+/* */
+/* The following function should only compile in stand-alone mode, */
+/* i.e., when building this component without the rest of FreeType. */
+/* */
+/*************************************************************************/
+
+/*************************************************************************/
+/* */
+/* <Function> */
+/* SW_FT_Outline_Decompose */
+/* */
+/* <Description> */
+/* Walk over an outline's structure to decompose it into individual */
+/* segments and Bézier arcs. This function is also able to emit */
+/* `move to' and `close to' operations to indicate the start and end */
+/* of new contours in the outline. */
+/* */
+/* <Input> */
+/* outline :: A pointer to the source target. */
+/* */
+/* func_interface :: A table of `emitters', i.e., function pointers */
+/* called during decomposition to indicate path */
+/* operations. */
+/* */
+/* <InOut> */
+/* user :: A typeless pointer which is passed to each */
+/* emitter during the decomposition. It can be */
+/* used to store the state during the */
+/* decomposition. */
+/* */
+/* <Return> */
+/* Error code. 0 means success. */
+/* */
+static int SW_FT_Outline_Decompose(const SW_FT_Outline* outline,
+ const SW_FT_Outline_Funcs* func_interface,
+ void* user)
+{
+#undef SCALED
+#define SCALED(x) (((x) << shift) - delta)
+
+ SW_FT_Vector v_last;
+ SW_FT_Vector v_control;
+ SW_FT_Vector v_start;
+
+ SW_FT_Vector* point;
+ SW_FT_Vector* limit;
+ char* tags;
+
+ int error;
+
+ int n; /* index of contour in outline */
+ int first; /* index of first point in contour */
+ char tag; /* current point's state */
+
+ int shift;
+ TPos delta;
+
+ if (!outline || !func_interface) return SW_FT_THROW(Invalid_Argument);
+
+ shift = func_interface->shift;
+ delta = func_interface->delta;
+ first = 0;
+
+ for (n = 0; n < outline->n_contours; n++) {
+ int last; /* index of last point in contour */
+
+ last = outline->contours[n];
+ if (last < 0) goto Invalid_Outline;
+ limit = outline->points + last;
+
+ v_start = outline->points[first];
+ v_start.x = SCALED(v_start.x);
+ v_start.y = SCALED(v_start.y);
+
+ v_last = outline->points[last];
+ v_last.x = SCALED(v_last.x);
+ v_last.y = SCALED(v_last.y);
+
+ v_control = v_start;
+
+ point = outline->points + first;
+ tags = outline->tags + first;
+ tag = SW_FT_CURVE_TAG(tags[0]);
+
+ /* A contour cannot start with a cubic control point! */
+ if (tag == SW_FT_CURVE_TAG_CUBIC) goto Invalid_Outline;
+
+ /* check first point to determine origin */
+ if (tag == SW_FT_CURVE_TAG_CONIC) {
+ /* first point is conic control. Yes, this happens. */
+ if (SW_FT_CURVE_TAG(outline->tags[last]) == SW_FT_CURVE_TAG_ON) {
+ /* start at last point if it is on the curve */
+ v_start = v_last;
+ limit--;
+ } else {
+ /* if both first and last points are conic, */
+ /* start at their middle and record its position */
+ /* for closure */
+ v_start.x = (v_start.x + v_last.x) / 2;
+ v_start.y = (v_start.y + v_last.y) / 2;
+ }
+ point--;
+ tags--;
+ }
+
+ error = func_interface->move_to(&v_start, user);
+ if (error) goto Exit;
+
+ while (point < limit) {
+ point++;
+ tags++;
+
+ tag = SW_FT_CURVE_TAG(tags[0]);
+ switch (tag) {
+ case SW_FT_CURVE_TAG_ON: /* emit a single line_to */
+ {
+ SW_FT_Vector vec;
+
+ vec.x = SCALED(point->x);
+ vec.y = SCALED(point->y);
+
+ error = func_interface->line_to(&vec, user);
+ if (error) goto Exit;
+ continue;
+ }
+
+ case SW_FT_CURVE_TAG_CONIC: /* consume conic arcs */
+ v_control.x = SCALED(point->x);
+ v_control.y = SCALED(point->y);
+
+ Do_Conic:
+ if (point < limit) {
+ SW_FT_Vector vec;
+ SW_FT_Vector v_middle;
+
+ point++;
+ tags++;
+ tag = SW_FT_CURVE_TAG(tags[0]);
+
+ vec.x = SCALED(point->x);
+ vec.y = SCALED(point->y);
+
+ if (tag == SW_FT_CURVE_TAG_ON) {
+ error =
+ func_interface->conic_to(&v_control, &vec, user);
+ if (error) goto Exit;
+ continue;
+ }
+
+ if (tag != SW_FT_CURVE_TAG_CONIC) goto Invalid_Outline;
+
+ v_middle.x = (v_control.x + vec.x) / 2;
+ v_middle.y = (v_control.y + vec.y) / 2;
+
+ error =
+ func_interface->conic_to(&v_control, &v_middle, user);
+ if (error) goto Exit;
+
+ v_control = vec;
+ goto Do_Conic;
+ }
+
+ error = func_interface->conic_to(&v_control, &v_start, user);
+ goto Close;
+
+ default: /* SW_FT_CURVE_TAG_CUBIC */
+ {
+ SW_FT_Vector vec1, vec2;
+
+ if (point + 1 > limit ||
+ SW_FT_CURVE_TAG(tags[1]) != SW_FT_CURVE_TAG_CUBIC)
+ goto Invalid_Outline;
+
+ point += 2;
+ tags += 2;
+
+ vec1.x = SCALED(point[-2].x);
+ vec1.y = SCALED(point[-2].y);
+
+ vec2.x = SCALED(point[-1].x);
+ vec2.y = SCALED(point[-1].y);
+
+ if (point <= limit) {
+ SW_FT_Vector vec;
+
+ vec.x = SCALED(point->x);
+ vec.y = SCALED(point->y);
+
+ error = func_interface->cubic_to(&vec1, &vec2, &vec, user);
+ if (error) goto Exit;
+ continue;
+ }
+
+ error = func_interface->cubic_to(&vec1, &vec2, &v_start, user);
+ goto Close;
+ }
+ }
+ }
+
+ /* close the contour with a line segment */
+ error = func_interface->line_to(&v_start, user);
+
+ Close:
+ if (error) goto Exit;
+
+ first = last + 1;
+ }
+
+ return 0;
+
+Exit:
+ return error;
+
+Invalid_Outline:
+ return SW_FT_THROW(Invalid_Outline);
+}
+
+typedef struct gray_TBand_ {
+ TPos min, max;
+
+} gray_TBand;
+
+SW_FT_DEFINE_OUTLINE_FUNCS(func_interface,
+ (SW_FT_Outline_MoveTo_Func)gray_move_to,
+ (SW_FT_Outline_LineTo_Func)gray_line_to,
+ (SW_FT_Outline_ConicTo_Func)gray_conic_to,
+ (SW_FT_Outline_CubicTo_Func)gray_cubic_to, 0, 0)
+
+static int gray_convert_glyph_inner(RAS_ARG)
+{
+ volatile int error = 0;
+
+ if (ft_setjmp(ras.jump_buffer) == 0) {
+ error = SW_FT_Outline_Decompose(&ras.outline, &func_interface, &ras);
+ if (!ras.invalid) gray_record_cell(RAS_VAR);
+ } else
+ error = SW_FT_THROW(Memory_Overflow);
+
+ return error;
+}
+
+static int gray_convert_glyph(RAS_ARG)
+{
+ gray_TBand bands[40];
+ gray_TBand* volatile band;
+ int volatile n, num_bands;
+ TPos volatile min, max, max_y;
+ SW_FT_BBox* clip;
+
+ /* Set up state in the raster object */
+ gray_compute_cbox(RAS_VAR);
+
+ /* clip to target bitmap, exit if nothing to do */
+ clip = &ras.clip_box;
+
+ if (ras.max_ex <= clip->xMin || ras.min_ex >= clip->xMax ||
+ ras.max_ey <= clip->yMin || ras.min_ey >= clip->yMax)
+ return 0;
+
+ if (ras.min_ex < clip->xMin) ras.min_ex = clip->xMin;
+ if (ras.min_ey < clip->yMin) ras.min_ey = clip->yMin;
+
+ if (ras.max_ex > clip->xMax) ras.max_ex = clip->xMax;
+ if (ras.max_ey > clip->yMax) ras.max_ey = clip->yMax;
+
+ ras.count_ex = ras.max_ex - ras.min_ex;
+ ras.count_ey = ras.max_ey - ras.min_ey;
+
+ /* set up vertical bands */
+ num_bands = (int)((ras.max_ey - ras.min_ey) / ras.band_size);
+ if (num_bands == 0) num_bands = 1;
+ if (num_bands >= 39) num_bands = 39;
+
+ ras.band_shoot = 0;
+
+ min = ras.min_ey;
+ max_y = ras.max_ey;
+
+ for (n = 0; n < num_bands; n++, min = max) {
+ max = min + ras.band_size;
+ if (n == num_bands - 1 || max > max_y) max = max_y;
+
+ bands[0].min = min;
+ bands[0].max = max;
+ band = bands;
+
+ while (band >= bands) {
+ TPos bottom, top, middle;
+ int error;
+
+ {
+ PCell cells_max;
+ int yindex;
+ long cell_start, cell_end, cell_mod;
+
+ ras.ycells = (PCell*)ras.buffer;
+ ras.ycount = band->max - band->min;
+
+ cell_start = sizeof(PCell) * ras.ycount;
+ cell_mod = cell_start % sizeof(TCell);
+ if (cell_mod > 0) cell_start += sizeof(TCell) - cell_mod;
+
+ cell_end = ras.buffer_size;
+ cell_end -= cell_end % sizeof(TCell);
+
+ cells_max = (PCell)((char*)ras.buffer + cell_end);
+ ras.cells = (PCell)((char*)ras.buffer + cell_start);
+ if (ras.cells >= cells_max) goto ReduceBands;
+
+ ras.max_cells = cells_max - ras.cells;
+ if (ras.max_cells < 2) goto ReduceBands;
+
+ for (yindex = 0; yindex < ras.ycount; yindex++)
+ ras.ycells[yindex] = NULL;
+ }
+
+ ras.num_cells = 0;
+ ras.invalid = 1;
+ ras.min_ey = band->min;
+ ras.max_ey = band->max;
+ ras.count_ey = band->max - band->min;
+
+ error = gray_convert_glyph_inner(RAS_VAR);
+
+ if (!error) {
+ gray_sweep(RAS_VAR);
+ band--;
+ continue;
+ } else if (error != ErrRaster_Memory_Overflow)
+ return 1;
+
+ ReduceBands:
+ /* render pool overflow; we will reduce the render band by half */
+ bottom = band->min;
+ top = band->max;
+ middle = bottom + ((top - bottom) >> 1);
+
+ /* This is too complex for a single scanline; there must */
+ /* be some problems. */
+ if (middle == bottom) {
+ return 1;
+ }
+
+ if (bottom - top >= ras.band_size) ras.band_shoot++;
+
+ band[1].min = bottom;
+ band[1].max = middle;
+ band[0].min = middle;
+ band[0].max = top;
+ band++;
+ }
+ }
+
+ if (ras.band_shoot > 8 && ras.band_size > 16)
+ ras.band_size = ras.band_size / 2;
+
+ return 0;
+}
+
+static int gray_raster_render(gray_PRaster raster,
+ const SW_FT_Raster_Params* params)
+{
+ SW_FT_UNUSED(raster);
+ const SW_FT_Outline* outline = (const SW_FT_Outline*)params->source;
+
+ gray_TWorker worker[1];
+
+ TCell buffer[SW_FT_RENDER_POOL_SIZE / sizeof(TCell)];
+ long buffer_size = sizeof(buffer);
+ int band_size = (int)(buffer_size / (long)(sizeof(TCell) * 8));
+
+ if (!outline) return SW_FT_THROW(Invalid_Outline);
+
+ /* return immediately if the outline is empty */
+ if (outline->n_points == 0 || outline->n_contours <= 0) return 0;
+
+ if (!outline->contours || !outline->points)
+ return SW_FT_THROW(Invalid_Outline);
+
+ if (outline->n_points != outline->contours[outline->n_contours - 1] + 1)
+ return SW_FT_THROW(Invalid_Outline);
+
+ /* this version does not support monochrome rendering */
+ if (!(params->flags & SW_FT_RASTER_FLAG_AA))
+ return SW_FT_THROW(Invalid_Mode);
+
+ if (params->flags & SW_FT_RASTER_FLAG_CLIP)
+ ras.clip_box = params->clip_box;
+ else {
+ ras.clip_box.xMin = -32768L;
+ ras.clip_box.yMin = -32768L;
+ ras.clip_box.xMax = 32767L;
+ ras.clip_box.yMax = 32767L;
+ }
+
+ gray_init_cells(RAS_VAR_ buffer, buffer_size);
+
+ ras.outline = *outline;
+ ras.num_cells = 0;
+ ras.invalid = 1;
+ ras.band_size = band_size;
+ ras.num_gray_spans = 0;
+
+ ras.render_span = (SW_FT_Raster_Span_Func)params->gray_spans;
+ ras.render_span_data = params->user;
+
+ gray_convert_glyph(RAS_VAR);
+ params->bbox_cb(ras.bound_left, ras.bound_top,
+ ras.bound_right - ras.bound_left,
+ ras.bound_bottom - ras.bound_top + 1, params->user);
+ return 1;
+}
+
+/**** RASTER OBJECT CREATION: In stand-alone mode, we simply use *****/
+/**** a static object. *****/
+
+static int gray_raster_new(SW_FT_Raster* araster)
+{
+ static gray_TRaster the_raster;
+
+ *araster = (SW_FT_Raster)&the_raster;
+ SW_FT_MEM_ZERO(&the_raster, sizeof(the_raster));
+
+ return 0;
+}
+
+static void gray_raster_done(SW_FT_Raster raster)
+{
+ /* nothing */
+ SW_FT_UNUSED(raster);
+}
+
+static void gray_raster_reset(SW_FT_Raster raster, char* pool_base,
+ long pool_size)
+{
+ SW_FT_UNUSED(raster);
+ SW_FT_UNUSED(pool_base);
+ SW_FT_UNUSED(pool_size);
+}
+
+SW_FT_DEFINE_RASTER_FUNCS(sw_ft_grays_raster,
+
+ (SW_FT_Raster_New_Func)gray_raster_new,
+ (SW_FT_Raster_Reset_Func)gray_raster_reset,
+ (SW_FT_Raster_Render_Func)gray_raster_render,
+ (SW_FT_Raster_Done_Func)gray_raster_done)
+
+/* END */
diff --git a/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_raster.h b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_raster.h
new file mode 100644
index 00000000..da585775
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_raster.h
@@ -0,0 +1,607 @@
+#ifndef V_FT_IMG_H
+#define V_FT_IMG_H
+/***************************************************************************/
+/* */
+/* ftimage.h */
+/* */
+/* FreeType glyph image formats and default raster interface */
+/* (specification). */
+/* */
+/* Copyright 1996-2010, 2013 by */
+/* David Turner, Robert Wilhelm, and Werner Lemberg. */
+/* */
+/* This file is part of the FreeType project, and may only be used, */
+/* modified, and distributed under the terms of the FreeType project */
+/* license, LICENSE.TXT. By continuing to use, modify, or distribute */
+/* this file you indicate that you have read the license and */
+/* understand and accept it fully. */
+/* */
+/***************************************************************************/
+
+ /*************************************************************************/
+ /* */
+ /* Note: A `raster' is simply a scan-line converter, used to render */
+ /* SW_FT_Outlines into SW_FT_Bitmaps. */
+ /* */
+ /*************************************************************************/
+
+#include "vector_freetype_v_ft_types.h"
+
+ /*************************************************************************/
+ /* */
+ /* <Struct> */
+ /* FT_BBox */
+ /* */
+ /* <Description> */
+ /* A structure used to hold an outline's bounding box, i.e., the */
+ /* coordinates of its extrema in the horizontal and vertical */
+ /* directions. */
+ /* */
+ /* <Fields> */
+ /* xMin :: The horizontal minimum (left-most). */
+ /* */
+ /* yMin :: The vertical minimum (bottom-most). */
+ /* */
+ /* xMax :: The horizontal maximum (right-most). */
+ /* */
+ /* yMax :: The vertical maximum (top-most). */
+ /* */
+ /* <Note> */
+ /* The bounding box is specified with the coordinates of the lower */
+ /* left and the upper right corner. In PostScript, those values are */
+ /* often called (llx,lly) and (urx,ury), respectively. */
+ /* */
+ /* If `yMin' is negative, this value gives the glyph's descender. */
+ /* Otherwise, the glyph doesn't descend below the baseline. */
+ /* Similarly, if `ymax' is positive, this value gives the glyph's */
+ /* ascender. */
+ /* */
+ /* `xMin' gives the horizontal distance from the glyph's origin to */
+ /* the left edge of the glyph's bounding box. If `xMin' is negative, */
+ /* the glyph extends to the left of the origin. */
+ /* */
+ typedef struct SW_FT_BBox_
+ {
+ SW_FT_Pos xMin, yMin;
+ SW_FT_Pos xMax, yMax;
+
+ } SW_FT_BBox;
+
+/*************************************************************************/
+/* */
+/* <Struct> */
+/* SW_FT_Outline */
+/* */
+/* <Description> */
+/* This structure is used to describe an outline to the scan-line */
+/* converter. */
+/* */
+/* <Fields> */
+/* n_contours :: The number of contours in the outline. */
+/* */
+/* n_points :: The number of points in the outline. */
+/* */
+/* points :: A pointer to an array of `n_points' @SW_FT_Vector */
+/* elements, giving the outline's point coordinates. */
+/* */
+/* tags :: A pointer to an array of `n_points' chars, giving */
+/* each outline point's type. */
+/* */
+/* If bit~0 is unset, the point is `off' the curve, */
+/* i.e., a Bézier control point, while it is `on' if */
+/* set. */
+/* */
+/* Bit~1 is meaningful for `off' points only. If set, */
+/* it indicates a third-order Bézier arc control point; */
+/* and a second-order control point if unset. */
+/* */
+/* If bit~2 is set, bits 5-7 contain the drop-out mode */
+/* (as defined in the OpenType specification; the value */
+/* is the same as the argument to the SCANMODE */
+/* instruction). */
+/* */
+/* Bits 3 and~4 are reserved for internal purposes. */
+/* */
+/* contours :: An array of `n_contours' shorts, giving the end */
+/* point of each contour within the outline. For */
+/* example, the first contour is defined by the points */
+/* `0' to `contours[0]', the second one is defined by */
+/* the points `contours[0]+1' to `contours[1]', etc. */
+/* */
+/* flags :: A set of bit flags used to characterize the outline */
+/* and give hints to the scan-converter and hinter on */
+/* how to convert/grid-fit it. See @SW_FT_OUTLINE_FLAGS.*/
+/* */
+typedef struct SW_FT_Outline_
+{
+ short n_contours; /* number of contours in glyph */
+ short n_points; /* number of points in the glyph */
+
+ SW_FT_Vector* points; /* the outline's points */
+ char* tags; /* the points flags */
+ short* contours; /* the contour end points */
+ char* contours_flag; /* the contour open flags */
+
+ int flags; /* outline masks */
+
+} SW_FT_Outline;
+
+
+ /*************************************************************************/
+ /* */
+ /* <Enum> */
+ /* SW_FT_OUTLINE_FLAGS */
+ /* */
+ /* <Description> */
+ /* A list of bit-field constants use for the flags in an outline's */
+ /* `flags' field. */
+ /* */
+ /* <Values> */
+ /* SW_FT_OUTLINE_NONE :: */
+ /* Value~0 is reserved. */
+ /* */
+ /* SW_FT_OUTLINE_OWNER :: */
+ /* If set, this flag indicates that the outline's field arrays */
+ /* (i.e., `points', `flags', and `contours') are `owned' by the */
+ /* outline object, and should thus be freed when it is destroyed. */
+ /* */
+ /* SW_FT_OUTLINE_EVEN_ODD_FILL :: */
+ /* By default, outlines are filled using the non-zero winding rule. */
+ /* If set to 1, the outline will be filled using the even-odd fill */
+ /* rule (only works with the smooth rasterizer). */
+ /* */
+ /* SW_FT_OUTLINE_REVERSE_FILL :: */
+ /* By default, outside contours of an outline are oriented in */
+ /* clock-wise direction, as defined in the TrueType specification. */
+ /* This flag is set if the outline uses the opposite direction */
+ /* (typically for Type~1 fonts). This flag is ignored by the scan */
+ /* converter. */
+ /* */
+ /* */
+ /* */
+ /* There exists a second mechanism to pass the drop-out mode to the */
+ /* B/W rasterizer; see the `tags' field in @SW_FT_Outline. */
+ /* */
+ /* Please refer to the description of the `SCANTYPE' instruction in */
+ /* the OpenType specification (in file `ttinst1.doc') how simple */
+ /* drop-outs, smart drop-outs, and stubs are defined. */
+ /* */
+#define SW_FT_OUTLINE_NONE 0x0
+#define SW_FT_OUTLINE_OWNER 0x1
+#define SW_FT_OUTLINE_EVEN_ODD_FILL 0x2
+#define SW_FT_OUTLINE_REVERSE_FILL 0x4
+
+ /* */
+
+#define SW_FT_CURVE_TAG( flag ) ( flag & 3 )
+
+#define SW_FT_CURVE_TAG_ON 1
+#define SW_FT_CURVE_TAG_CONIC 0
+#define SW_FT_CURVE_TAG_CUBIC 2
+
+
+#define SW_FT_Curve_Tag_On SW_FT_CURVE_TAG_ON
+#define SW_FT_Curve_Tag_Conic SW_FT_CURVE_TAG_CONIC
+#define SW_FT_Curve_Tag_Cubic SW_FT_CURVE_TAG_CUBIC
+
+ /*************************************************************************/
+ /* */
+ /* A raster is a scan converter, in charge of rendering an outline into */
+ /* a a bitmap. This section contains the public API for rasters. */
+ /* */
+ /* Note that in FreeType 2, all rasters are now encapsulated within */
+ /* specific modules called `renderers'. See `ftrender.h' for more */
+ /* details on renderers. */
+ /* */
+ /*************************************************************************/
+
+
+ /*************************************************************************/
+ /* */
+ /* <Type> */
+ /* SW_FT_Raster */
+ /* */
+ /* <Description> */
+ /* A handle (pointer) to a raster object. Each object can be used */
+ /* independently to convert an outline into a bitmap or pixmap. */
+ /* */
+ typedef struct SW_FT_RasterRec_* SW_FT_Raster;
+
+
+ /*************************************************************************/
+ /* */
+ /* <Struct> */
+ /* SW_FT_Span */
+ /* */
+ /* <Description> */
+ /* A structure used to model a single span of gray (or black) pixels */
+ /* when rendering a monochrome or anti-aliased bitmap. */
+ /* */
+ /* <Fields> */
+ /* x :: The span's horizontal start position. */
+ /* */
+ /* len :: The span's length in pixels. */
+ /* */
+ /* coverage :: The span color/coverage, ranging from 0 (background) */
+ /* to 255 (foreground). Only used for anti-aliased */
+ /* rendering. */
+ /* */
+ /* <Note> */
+ /* This structure is used by the span drawing callback type named */
+ /* @SW_FT_SpanFunc that takes the y~coordinate of the span as a */
+ /* parameter. */
+ /* */
+ /* The coverage value is always between 0 and 255. If you want less */
+ /* gray values, the callback function has to reduce them. */
+ /* */
+ typedef struct SW_FT_Span_
+ {
+ short x;
+ short y;
+ unsigned short len;
+ unsigned char coverage;
+
+ } SW_FT_Span;
+
+
+ /*************************************************************************/
+ /* */
+ /* <FuncType> */
+ /* SW_FT_SpanFunc */
+ /* */
+ /* <Description> */
+ /* A function used as a call-back by the anti-aliased renderer in */
+ /* order to let client applications draw themselves the gray pixel */
+ /* spans on each scan line. */
+ /* */
+ /* <Input> */
+ /* y :: The scanline's y~coordinate. */
+ /* */
+ /* count :: The number of spans to draw on this scanline. */
+ /* */
+ /* spans :: A table of `count' spans to draw on the scanline. */
+ /* */
+ /* user :: User-supplied data that is passed to the callback. */
+ /* */
+ /* <Note> */
+ /* This callback allows client applications to directly render the */
+ /* gray spans of the anti-aliased bitmap to any kind of surfaces. */
+ /* */
+ /* This can be used to write anti-aliased outlines directly to a */
+ /* given background bitmap, and even perform translucency. */
+ /* */
+ /* Note that the `count' field cannot be greater than a fixed value */
+ /* defined by the `SW_FT_MAX_GRAY_SPANS' configuration macro in */
+ /* `ftoption.h'. By default, this value is set to~32, which means */
+ /* that if there are more than 32~spans on a given scanline, the */
+ /* callback is called several times with the same `y' parameter in */
+ /* order to draw all callbacks. */
+ /* */
+ /* Otherwise, the callback is only called once per scan-line, and */
+ /* only for those scanlines that do have `gray' pixels on them. */
+ /* */
+ typedef void
+ (*SW_FT_SpanFunc)( int count,
+ const SW_FT_Span* spans,
+ void* user );
+
+ typedef void
+ (*SW_FT_BboxFunc)( int x, int y, int w, int h,
+ void* user);
+
+#define SW_FT_Raster_Span_Func SW_FT_SpanFunc
+
+
+
+ /*************************************************************************/
+ /* */
+ /* <Enum> */
+ /* SW_FT_RASTER_FLAG_XXX */
+ /* */
+ /* <Description> */
+ /* A list of bit flag constants as used in the `flags' field of a */
+ /* @SW_FT_Raster_Params structure. */
+ /* */
+ /* <Values> */
+ /* SW_FT_RASTER_FLAG_DEFAULT :: This value is 0. */
+ /* */
+ /* SW_FT_RASTER_FLAG_AA :: This flag is set to indicate that an */
+ /* anti-aliased glyph image should be */
+ /* generated. Otherwise, it will be */
+ /* monochrome (1-bit). */
+ /* */
+ /* SW_FT_RASTER_FLAG_DIRECT :: This flag is set to indicate direct */
+ /* rendering. In this mode, client */
+ /* applications must provide their own span */
+ /* callback. This lets them directly */
+ /* draw or compose over an existing bitmap. */
+ /* If this bit is not set, the target */
+ /* pixmap's buffer _must_ be zeroed before */
+ /* rendering. */
+ /* */
+ /* Note that for now, direct rendering is */
+ /* only possible with anti-aliased glyphs. */
+ /* */
+ /* SW_FT_RASTER_FLAG_CLIP :: This flag is only used in direct */
+ /* rendering mode. If set, the output will */
+ /* be clipped to a box specified in the */
+ /* `clip_box' field of the */
+ /* @SW_FT_Raster_Params structure. */
+ /* */
+ /* Note that by default, the glyph bitmap */
+ /* is clipped to the target pixmap, except */
+ /* in direct rendering mode where all spans */
+ /* are generated if no clipping box is set. */
+ /* */
+#define SW_FT_RASTER_FLAG_DEFAULT 0x0
+#define SW_FT_RASTER_FLAG_AA 0x1
+#define SW_FT_RASTER_FLAG_DIRECT 0x2
+#define SW_FT_RASTER_FLAG_CLIP 0x4
+
+
+ /*************************************************************************/
+ /* */
+ /* <Struct> */
+ /* SW_FT_Raster_Params */
+ /* */
+ /* <Description> */
+ /* A structure to hold the arguments used by a raster's render */
+ /* function. */
+ /* */
+ /* <Fields> */
+ /* target :: The target bitmap. */
+ /* */
+ /* source :: A pointer to the source glyph image (e.g., an */
+ /* @SW_FT_Outline). */
+ /* */
+ /* flags :: The rendering flags. */
+ /* */
+ /* gray_spans :: The gray span drawing callback. */
+ /* */
+ /* black_spans :: The black span drawing callback. UNIMPLEMENTED! */
+ /* */
+ /* bit_test :: The bit test callback. UNIMPLEMENTED! */
+ /* */
+ /* bit_set :: The bit set callback. UNIMPLEMENTED! */
+ /* */
+ /* user :: User-supplied data that is passed to each drawing */
+ /* callback. */
+ /* */
+ /* clip_box :: An optional clipping box. It is only used in */
+ /* direct rendering mode. Note that coordinates here */
+ /* should be expressed in _integer_ pixels (and not in */
+ /* 26.6 fixed-point units). */
+ /* */
+ /* <Note> */
+ /* An anti-aliased glyph bitmap is drawn if the @SW_FT_RASTER_FLAG_AA */
+ /* bit flag is set in the `flags' field, otherwise a monochrome */
+ /* bitmap is generated. */
+ /* */
+ /* If the @SW_FT_RASTER_FLAG_DIRECT bit flag is set in `flags', the */
+ /* raster will call the `gray_spans' callback to draw gray pixel */
+ /* spans, in the case of an aa glyph bitmap, it will call */
+ /* `black_spans', and `bit_test' and `bit_set' in the case of a */
+ /* monochrome bitmap. This allows direct composition over a */
+ /* pre-existing bitmap through user-provided callbacks to perform the */
+ /* span drawing/composition. */
+ /* */
+ /* Note that the `bit_test' and `bit_set' callbacks are required when */
+ /* rendering a monochrome bitmap, as they are crucial to implement */
+ /* correct drop-out control as defined in the TrueType specification. */
+ /* */
+ typedef struct SW_FT_Raster_Params_
+ {
+ const void* source;
+ int flags;
+ SW_FT_SpanFunc gray_spans;
+ SW_FT_BboxFunc bbox_cb;
+ void* user;
+ SW_FT_BBox clip_box;
+
+ } SW_FT_Raster_Params;
+
+
+/*************************************************************************/
+/* */
+/* <Function> */
+/* SW_FT_Outline_Check */
+/* */
+/* <Description> */
+/* Check the contents of an outline descriptor. */
+/* */
+/* <Input> */
+/* outline :: A handle to a source outline. */
+/* */
+/* <Return> */
+/* FreeType error code. 0~means success. */
+/* */
+SW_FT_Error
+SW_FT_Outline_Check( SW_FT_Outline* outline );
+
+
+/*************************************************************************/
+/* */
+/* <Function> */
+/* SW_FT_Outline_Get_CBox */
+/* */
+/* <Description> */
+/* Return an outline's `control box'. The control box encloses all */
+/* the outline's points, including Bézier control points. Though it */
+/* coincides with the exact bounding box for most glyphs, it can be */
+/* slightly larger in some situations (like when rotating an outline */
+/* that contains Bézier outside arcs). */
+/* */
+/* Computing the control box is very fast, while getting the bounding */
+/* box can take much more time as it needs to walk over all segments */
+/* and arcs in the outline. To get the latter, you can use the */
+/* `ftbbox' component, which is dedicated to this single task. */
+/* */
+/* <Input> */
+/* outline :: A pointer to the source outline descriptor. */
+/* */
+/* <Output> */
+/* acbox :: The outline's control box. */
+/* */
+/* <Note> */
+/* See @SW_FT_Glyph_Get_CBox for a discussion of tricky fonts. */
+/* */
+void
+SW_FT_Outline_Get_CBox( const SW_FT_Outline* outline,
+ SW_FT_BBox *acbox );
+
+
+ /*************************************************************************/
+ /* */
+ /* <FuncType> */
+ /* SW_FT_Raster_NewFunc */
+ /* */
+ /* <Description> */
+ /* A function used to create a new raster object. */
+ /* */
+ /* <Input> */
+ /* memory :: A handle to the memory allocator. */
+ /* */
+ /* <Output> */
+ /* raster :: A handle to the new raster object. */
+ /* */
+ /* <Return> */
+ /* Error code. 0~means success. */
+ /* */
+ /* <Note> */
+ /* The `memory' parameter is a typeless pointer in order to avoid */
+ /* un-wanted dependencies on the rest of the FreeType code. In */
+ /* practice, it is an @SW_FT_Memory object, i.e., a handle to the */
+ /* standard FreeType memory allocator. However, this field can be */
+ /* completely ignored by a given raster implementation. */
+ /* */
+ typedef int
+ (*SW_FT_Raster_NewFunc)( SW_FT_Raster* raster );
+
+#define SW_FT_Raster_New_Func SW_FT_Raster_NewFunc
+
+
+ /*************************************************************************/
+ /* */
+ /* <FuncType> */
+ /* SW_FT_Raster_DoneFunc */
+ /* */
+ /* <Description> */
+ /* A function used to destroy a given raster object. */
+ /* */
+ /* <Input> */
+ /* raster :: A handle to the raster object. */
+ /* */
+ typedef void
+ (*SW_FT_Raster_DoneFunc)( SW_FT_Raster raster );
+
+#define SW_FT_Raster_Done_Func SW_FT_Raster_DoneFunc
+
+
+ /*************************************************************************/
+ /* */
+ /* <FuncType> */
+ /* SW_FT_Raster_ResetFunc */
+ /* */
+ /* <Description> */
+ /* FreeType provides an area of memory called the `render pool', */
+ /* available to all registered rasters. This pool can be freely used */
+ /* during a given scan-conversion but is shared by all rasters. Its */
+ /* content is thus transient. */
+ /* */
+ /* This function is called each time the render pool changes, or just */
+ /* after a new raster object is created. */
+ /* */
+ /* <Input> */
+ /* raster :: A handle to the new raster object. */
+ /* */
+ /* pool_base :: The address in memory of the render pool. */
+ /* */
+ /* pool_size :: The size in bytes of the render pool. */
+ /* */
+ /* <Note> */
+ /* Rasters can ignore the render pool and rely on dynamic memory */
+ /* allocation if they want to (a handle to the memory allocator is */
+ /* passed to the raster constructor). However, this is not */
+ /* recommended for efficiency purposes. */
+ /* */
+ typedef void
+ (*SW_FT_Raster_ResetFunc)( SW_FT_Raster raster,
+ unsigned char* pool_base,
+ unsigned long pool_size );
+
+#define SW_FT_Raster_Reset_Func SW_FT_Raster_ResetFunc
+
+
+ /*************************************************************************/
+ /* */
+ /* <FuncType> */
+ /* SW_FT_Raster_RenderFunc */
+ /* */
+ /* <Description> */
+ /* Invoke a given raster to scan-convert a given glyph image into a */
+ /* target bitmap. */
+ /* */
+ /* <Input> */
+ /* raster :: A handle to the raster object. */
+ /* */
+ /* params :: A pointer to an @SW_FT_Raster_Params structure used to */
+ /* store the rendering parameters. */
+ /* */
+ /* <Return> */
+ /* Error code. 0~means success. */
+ /* */
+ /* <Note> */
+ /* The exact format of the source image depends on the raster's glyph */
+ /* format defined in its @SW_FT_Raster_Funcs structure. It can be an */
+ /* @SW_FT_Outline or anything else in order to support a large array of */
+ /* glyph formats. */
+ /* */
+ /* Note also that the render function can fail and return a */
+ /* `SW_FT_Err_Unimplemented_Feature' error code if the raster used does */
+ /* not support direct composition. */
+ /* */
+ /* XXX: For now, the standard raster doesn't support direct */
+ /* composition but this should change for the final release (see */
+ /* the files `demos/src/ftgrays.c' and `demos/src/ftgrays2.c' */
+ /* for examples of distinct implementations that support direct */
+ /* composition). */
+ /* */
+ typedef int
+ (*SW_FT_Raster_RenderFunc)( SW_FT_Raster raster,
+ const SW_FT_Raster_Params* params );
+
+#define SW_FT_Raster_Render_Func SW_FT_Raster_RenderFunc
+
+
+ /*************************************************************************/
+ /* */
+ /* <Struct> */
+ /* SW_FT_Raster_Funcs */
+ /* */
+ /* <Description> */
+ /* A structure used to describe a given raster class to the library. */
+ /* */
+ /* <Fields> */
+ /* glyph_format :: The supported glyph format for this raster. */
+ /* */
+ /* raster_new :: The raster constructor. */
+ /* */
+ /* raster_reset :: Used to reset the render pool within the raster. */
+ /* */
+ /* raster_render :: A function to render a glyph into a given bitmap. */
+ /* */
+ /* raster_done :: The raster destructor. */
+ /* */
+ typedef struct SW_FT_Raster_Funcs_
+ {
+ SW_FT_Raster_NewFunc raster_new;
+ SW_FT_Raster_ResetFunc raster_reset;
+ SW_FT_Raster_RenderFunc raster_render;
+ SW_FT_Raster_DoneFunc raster_done;
+
+ } SW_FT_Raster_Funcs;
+
+
+extern const SW_FT_Raster_Funcs sw_ft_grays_raster;
+
+#endif // V_FT_IMG_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_stroker.cpp b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_stroker.cpp
new file mode 100644
index 00000000..aed62c92
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_stroker.cpp
@@ -0,0 +1,1936 @@
+
+/***************************************************************************/
+/* */
+/* ftstroke.c */
+/* */
+/* FreeType path stroker (body). */
+/* */
+/* Copyright 2002-2006, 2008-2011, 2013 by */
+/* David Turner, Robert Wilhelm, and Werner Lemberg. */
+/* */
+/* This file is part of the FreeType project, and may only be used, */
+/* modified, and distributed under the terms of the FreeType project */
+/* license, LICENSE.TXT. By continuing to use, modify, or distribute */
+/* this file you indicate that you have read the license and */
+/* understand and accept it fully. */
+/* */
+/***************************************************************************/
+
+#include "vector_freetype_v_ft_stroker.h"
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include "vector_freetype_v_ft_math.h"
+
+/*************************************************************************/
+/*************************************************************************/
+/***** *****/
+/***** BEZIER COMPUTATIONS *****/
+/***** *****/
+/*************************************************************************/
+/*************************************************************************/
+
+#define SW_FT_SMALL_CONIC_THRESHOLD (SW_FT_ANGLE_PI / 6)
+#define SW_FT_SMALL_CUBIC_THRESHOLD (SW_FT_ANGLE_PI / 8)
+
+#define SW_FT_EPSILON 2
+
+#define SW_FT_IS_SMALL(x) ((x) > -SW_FT_EPSILON && (x) < SW_FT_EPSILON)
+
+static SW_FT_Pos ft_pos_abs(SW_FT_Pos x)
+{
+ return x >= 0 ? x : -x;
+}
+
+static void ft_conic_split(SW_FT_Vector* base)
+{
+ SW_FT_Pos a, b;
+
+ base[4].x = base[2].x;
+ a = base[0].x + base[1].x;
+ b = base[1].x + base[2].x;
+ base[3].x = b >> 1;
+ base[2].x = ( a + b ) >> 2;
+ base[1].x = a >> 1;
+
+ base[4].y = base[2].y;
+ a = base[0].y + base[1].y;
+ b = base[1].y + base[2].y;
+ base[3].y = b >> 1;
+ base[2].y = ( a + b ) >> 2;
+ base[1].y = a >> 1;
+}
+
+static SW_FT_Bool ft_conic_is_small_enough(SW_FT_Vector* base,
+ SW_FT_Angle* angle_in,
+ SW_FT_Angle* angle_out)
+{
+ SW_FT_Vector d1, d2;
+ SW_FT_Angle theta;
+ SW_FT_Int close1, close2;
+
+ d1.x = base[1].x - base[2].x;
+ d1.y = base[1].y - base[2].y;
+ d2.x = base[0].x - base[1].x;
+ d2.y = base[0].y - base[1].y;
+
+ close1 = SW_FT_IS_SMALL(d1.x) && SW_FT_IS_SMALL(d1.y);
+ close2 = SW_FT_IS_SMALL(d2.x) && SW_FT_IS_SMALL(d2.y);
+
+ if (close1) {
+ if (close2) {
+ /* basically a point; */
+ /* do nothing to retain original direction */
+ } else {
+ *angle_in = *angle_out = SW_FT_Atan2(d2.x, d2.y);
+ }
+ } else /* !close1 */
+ {
+ if (close2) {
+ *angle_in = *angle_out = SW_FT_Atan2(d1.x, d1.y);
+ } else {
+ *angle_in = SW_FT_Atan2(d1.x, d1.y);
+ *angle_out = SW_FT_Atan2(d2.x, d2.y);
+ }
+ }
+
+ theta = ft_pos_abs(SW_FT_Angle_Diff(*angle_in, *angle_out));
+
+ return SW_FT_BOOL(theta < SW_FT_SMALL_CONIC_THRESHOLD);
+}
+
+static void ft_cubic_split(SW_FT_Vector* base)
+{
+ SW_FT_Pos a, b, c;
+
+ base[6].x = base[3].x;
+ a = base[0].x + base[1].x;
+ b = base[1].x + base[2].x;
+ c = base[2].x + base[3].x;
+ base[5].x = c >> 1;
+ c += b;
+ base[4].x = c >> 2;
+ base[1].x = a >> 1;
+ a += b;
+ base[2].x = a >> 2;
+ base[3].x = ( a + c ) >> 3;
+
+ base[6].y = base[3].y;
+ a = base[0].y + base[1].y;
+ b = base[1].y + base[2].y;
+ c = base[2].y + base[3].y;
+ base[5].y = c >> 1;
+ c += b;
+ base[4].y = c >> 2;
+ base[1].y = a >> 1;
+ a += b;
+ base[2].y = a >> 2;
+ base[3].y = ( a + c ) >> 3;
+}
+
+/* Return the average of `angle1' and `angle2'. */
+/* This gives correct result even if `angle1' and `angle2' */
+/* have opposite signs. */
+static SW_FT_Angle ft_angle_mean(SW_FT_Angle angle1, SW_FT_Angle angle2)
+{
+ return angle1 + SW_FT_Angle_Diff(angle1, angle2) / 2;
+}
+
+static SW_FT_Bool ft_cubic_is_small_enough(SW_FT_Vector* base,
+ SW_FT_Angle* angle_in,
+ SW_FT_Angle* angle_mid,
+ SW_FT_Angle* angle_out)
+{
+ SW_FT_Vector d1, d2, d3;
+ SW_FT_Angle theta1, theta2;
+ SW_FT_Int close1, close2, close3;
+
+ d1.x = base[2].x - base[3].x;
+ d1.y = base[2].y - base[3].y;
+ d2.x = base[1].x - base[2].x;
+ d2.y = base[1].y - base[2].y;
+ d3.x = base[0].x - base[1].x;
+ d3.y = base[0].y - base[1].y;
+
+ close1 = SW_FT_IS_SMALL(d1.x) && SW_FT_IS_SMALL(d1.y);
+ close2 = SW_FT_IS_SMALL(d2.x) && SW_FT_IS_SMALL(d2.y);
+ close3 = SW_FT_IS_SMALL(d3.x) && SW_FT_IS_SMALL(d3.y);
+
+ if (close1) {
+ if (close2) {
+ if (close3) {
+ /* basically a point; */
+ /* do nothing to retain original direction */
+ } else /* !close3 */
+ {
+ *angle_in = *angle_mid = *angle_out = SW_FT_Atan2(d3.x, d3.y);
+ }
+ } else /* !close2 */
+ {
+ if (close3) {
+ *angle_in = *angle_mid = *angle_out = SW_FT_Atan2(d2.x, d2.y);
+ } else /* !close3 */
+ {
+ *angle_in = *angle_mid = SW_FT_Atan2(d2.x, d2.y);
+ *angle_out = SW_FT_Atan2(d3.x, d3.y);
+ }
+ }
+ } else /* !close1 */
+ {
+ if (close2) {
+ if (close3) {
+ *angle_in = *angle_mid = *angle_out = SW_FT_Atan2(d1.x, d1.y);
+ } else /* !close3 */
+ {
+ *angle_in = SW_FT_Atan2(d1.x, d1.y);
+ *angle_out = SW_FT_Atan2(d3.x, d3.y);
+ *angle_mid = ft_angle_mean(*angle_in, *angle_out);
+ }
+ } else /* !close2 */
+ {
+ if (close3) {
+ *angle_in = SW_FT_Atan2(d1.x, d1.y);
+ *angle_mid = *angle_out = SW_FT_Atan2(d2.x, d2.y);
+ } else /* !close3 */
+ {
+ *angle_in = SW_FT_Atan2(d1.x, d1.y);
+ *angle_mid = SW_FT_Atan2(d2.x, d2.y);
+ *angle_out = SW_FT_Atan2(d3.x, d3.y);
+ }
+ }
+ }
+
+ theta1 = ft_pos_abs(SW_FT_Angle_Diff(*angle_in, *angle_mid));
+ theta2 = ft_pos_abs(SW_FT_Angle_Diff(*angle_mid, *angle_out));
+
+ return SW_FT_BOOL(theta1 < SW_FT_SMALL_CUBIC_THRESHOLD &&
+ theta2 < SW_FT_SMALL_CUBIC_THRESHOLD);
+}
+
+/*************************************************************************/
+/*************************************************************************/
+/***** *****/
+/***** STROKE BORDERS *****/
+/***** *****/
+/*************************************************************************/
+/*************************************************************************/
+
+typedef enum SW_FT_StrokeTags_ {
+ SW_FT_STROKE_TAG_ON = 1, /* on-curve point */
+ SW_FT_STROKE_TAG_CUBIC = 2, /* cubic off-point */
+ SW_FT_STROKE_TAG_BEGIN = 4, /* sub-path start */
+ SW_FT_STROKE_TAG_END = 8 /* sub-path end */
+
+} SW_FT_StrokeTags;
+
+#define SW_FT_STROKE_TAG_BEGIN_END \
+ (SW_FT_STROKE_TAG_BEGIN | SW_FT_STROKE_TAG_END)
+
+typedef struct SW_FT_StrokeBorderRec_ {
+ SW_FT_UInt num_points;
+ SW_FT_UInt max_points;
+ SW_FT_Vector* points;
+ SW_FT_Byte* tags;
+ SW_FT_Bool movable; /* TRUE for ends of lineto borders */
+ SW_FT_Int start; /* index of current sub-path start point */
+ SW_FT_Bool valid;
+
+} SW_FT_StrokeBorderRec, *SW_FT_StrokeBorder;
+
+SW_FT_Error SW_FT_Outline_Check(SW_FT_Outline* outline)
+{
+ if (outline) {
+ SW_FT_Int n_points = outline->n_points;
+ SW_FT_Int n_contours = outline->n_contours;
+ SW_FT_Int end0, end;
+ SW_FT_Int n;
+
+ /* empty glyph? */
+ if (n_points == 0 && n_contours == 0) return 0;
+
+ /* check point and contour counts */
+ if (n_points <= 0 || n_contours <= 0) goto Bad;
+
+ end0 = end = -1;
+ for (n = 0; n < n_contours; n++) {
+ end = outline->contours[n];
+
+ /* note that we don't accept empty contours */
+ if (end <= end0 || end >= n_points) goto Bad;
+
+ end0 = end;
+ }
+
+ if (end != n_points - 1) goto Bad;
+
+ /* XXX: check the tags array */
+ return 0;
+ }
+
+Bad:
+ return -1; // SW_FT_THROW( Invalid_Argument );
+}
+
+void SW_FT_Outline_Get_CBox(const SW_FT_Outline* outline, SW_FT_BBox* acbox)
+{
+ SW_FT_Pos xMin, yMin, xMax, yMax;
+
+ if (outline && acbox) {
+ if (outline->n_points == 0) {
+ xMin = 0;
+ yMin = 0;
+ xMax = 0;
+ yMax = 0;
+ } else {
+ SW_FT_Vector* vec = outline->points;
+ SW_FT_Vector* limit = vec + outline->n_points;
+
+ xMin = xMax = vec->x;
+ yMin = yMax = vec->y;
+ vec++;
+
+ for (; vec < limit; vec++) {
+ SW_FT_Pos x, y;
+
+ x = vec->x;
+ if (x < xMin) xMin = x;
+ if (x > xMax) xMax = x;
+
+ y = vec->y;
+ if (y < yMin) yMin = y;
+ if (y > yMax) yMax = y;
+ }
+ }
+ acbox->xMin = xMin;
+ acbox->xMax = xMax;
+ acbox->yMin = yMin;
+ acbox->yMax = yMax;
+ }
+}
+
+static SW_FT_Error ft_stroke_border_grow(SW_FT_StrokeBorder border,
+ SW_FT_UInt new_points)
+{
+ SW_FT_UInt old_max = border->max_points;
+ SW_FT_UInt new_max = border->num_points + new_points;
+ SW_FT_Error error = 0;
+
+ if (new_max > old_max) {
+ SW_FT_UInt cur_max = old_max;
+
+ while (cur_max < new_max) cur_max += (cur_max >> 1) + 16;
+
+ border->points = (SW_FT_Vector*)realloc(border->points,
+ cur_max * sizeof(SW_FT_Vector));
+ border->tags =
+ (SW_FT_Byte*)realloc(border->tags, cur_max * sizeof(SW_FT_Byte));
+
+ if (!border->points || !border->tags) goto Exit;
+
+ border->max_points = cur_max;
+ }
+
+Exit:
+ return error;
+}
+
+static void ft_stroke_border_close(SW_FT_StrokeBorder border,
+ SW_FT_Bool reverse)
+{
+ SW_FT_UInt start = border->start;
+ SW_FT_UInt count = border->num_points;
+
+ assert(border->start >= 0);
+
+ /* don't record empty paths! */
+ if (count <= start + 1U)
+ border->num_points = start;
+ else {
+ /* copy the last point to the start of this sub-path, since */
+ /* it contains the `adjusted' starting coordinates */
+ border->num_points = --count;
+ border->points[start] = border->points[count];
+
+ if (reverse) {
+ /* reverse the points */
+ {
+ SW_FT_Vector* vec1 = border->points + start + 1;
+ SW_FT_Vector* vec2 = border->points + count - 1;
+
+ for (; vec1 < vec2; vec1++, vec2--) {
+ SW_FT_Vector tmp;
+
+ tmp = *vec1;
+ *vec1 = *vec2;
+ *vec2 = tmp;
+ }
+ }
+
+ /* then the tags */
+ {
+ SW_FT_Byte* tag1 = border->tags + start + 1;
+ SW_FT_Byte* tag2 = border->tags + count - 1;
+
+ for (; tag1 < tag2; tag1++, tag2--) {
+ SW_FT_Byte tmp;
+
+ tmp = *tag1;
+ *tag1 = *tag2;
+ *tag2 = tmp;
+ }
+ }
+ }
+
+ border->tags[start] |= SW_FT_STROKE_TAG_BEGIN;
+ border->tags[count - 1] |= SW_FT_STROKE_TAG_END;
+ }
+
+ border->start = -1;
+ border->movable = FALSE;
+}
+
+static SW_FT_Error ft_stroke_border_lineto(SW_FT_StrokeBorder border,
+ SW_FT_Vector* to, SW_FT_Bool movable)
+{
+ SW_FT_Error error = 0;
+
+ assert(border->start >= 0);
+
+ if (border->movable) {
+ /* move last point */
+ border->points[border->num_points - 1] = *to;
+ } else {
+ /* don't add zero-length lineto */
+ if (border->num_points > 0 &&
+ SW_FT_IS_SMALL(border->points[border->num_points - 1].x - to->x) &&
+ SW_FT_IS_SMALL(border->points[border->num_points - 1].y - to->y))
+ return error;
+
+ /* add one point */
+ error = ft_stroke_border_grow(border, 1);
+ if (!error) {
+ SW_FT_Vector* vec = border->points + border->num_points;
+ SW_FT_Byte* tag = border->tags + border->num_points;
+
+ vec[0] = *to;
+ tag[0] = SW_FT_STROKE_TAG_ON;
+
+ border->num_points += 1;
+ }
+ }
+ border->movable = movable;
+ return error;
+}
+
+static SW_FT_Error ft_stroke_border_conicto(SW_FT_StrokeBorder border,
+ SW_FT_Vector* control,
+ SW_FT_Vector* to)
+{
+ SW_FT_Error error;
+
+ assert(border->start >= 0);
+
+ error = ft_stroke_border_grow(border, 2);
+ if (!error) {
+ SW_FT_Vector* vec = border->points + border->num_points;
+ SW_FT_Byte* tag = border->tags + border->num_points;
+
+ vec[0] = *control;
+ vec[1] = *to;
+
+ tag[0] = 0;
+ tag[1] = SW_FT_STROKE_TAG_ON;
+
+ border->num_points += 2;
+ }
+
+ border->movable = FALSE;
+
+ return error;
+}
+
+static SW_FT_Error ft_stroke_border_cubicto(SW_FT_StrokeBorder border,
+ SW_FT_Vector* control1,
+ SW_FT_Vector* control2,
+ SW_FT_Vector* to)
+{
+ SW_FT_Error error;
+
+ assert(border->start >= 0);
+
+ error = ft_stroke_border_grow(border, 3);
+ if (!error) {
+ SW_FT_Vector* vec = border->points + border->num_points;
+ SW_FT_Byte* tag = border->tags + border->num_points;
+
+ vec[0] = *control1;
+ vec[1] = *control2;
+ vec[2] = *to;
+
+ tag[0] = SW_FT_STROKE_TAG_CUBIC;
+ tag[1] = SW_FT_STROKE_TAG_CUBIC;
+ tag[2] = SW_FT_STROKE_TAG_ON;
+
+ border->num_points += 3;
+ }
+
+ border->movable = FALSE;
+
+ return error;
+}
+
+#define SW_FT_ARC_CUBIC_ANGLE (SW_FT_ANGLE_PI / 2)
+
+
+static SW_FT_Error
+ft_stroke_border_arcto( SW_FT_StrokeBorder border,
+ SW_FT_Vector* center,
+ SW_FT_Fixed radius,
+ SW_FT_Angle angle_start,
+ SW_FT_Angle angle_diff )
+{
+ SW_FT_Fixed coef;
+ SW_FT_Vector a0, a1, a2, a3;
+ SW_FT_Int i, arcs = 1;
+ SW_FT_Error error = 0;
+
+
+ /* number of cubic arcs to draw */
+ while ( angle_diff > SW_FT_ARC_CUBIC_ANGLE * arcs ||
+ -angle_diff > SW_FT_ARC_CUBIC_ANGLE * arcs )
+ arcs++;
+
+ /* control tangents */
+ coef = SW_FT_Tan( angle_diff / ( 4 * arcs ) );
+ coef += coef / 3;
+
+ /* compute start and first control point */
+ SW_FT_Vector_From_Polar( &a0, radius, angle_start );
+ a1.x = SW_FT_MulFix( -a0.y, coef );
+ a1.y = SW_FT_MulFix( a0.x, coef );
+
+ a0.x += center->x;
+ a0.y += center->y;
+ a1.x += a0.x;
+ a1.y += a0.y;
+
+ for ( i = 1; i <= arcs; i++ )
+ {
+ /* compute end and second control point */
+ SW_FT_Vector_From_Polar( &a3, radius,
+ angle_start + i * angle_diff / arcs );
+ a2.x = SW_FT_MulFix( a3.y, coef );
+ a2.y = SW_FT_MulFix( -a3.x, coef );
+
+ a3.x += center->x;
+ a3.y += center->y;
+ a2.x += a3.x;
+ a2.y += a3.y;
+
+ /* add cubic arc */
+ error = ft_stroke_border_cubicto( border, &a1, &a2, &a3 );
+ if ( error )
+ break;
+
+ /* a0 = a3; */
+ a1.x = a3.x - a2.x + a3.x;
+ a1.y = a3.y - a2.y + a3.y;
+ }
+
+ return error;
+}
+
+static SW_FT_Error ft_stroke_border_moveto(SW_FT_StrokeBorder border,
+ SW_FT_Vector* to)
+{
+ /* close current open path if any ? */
+ if (border->start >= 0) ft_stroke_border_close(border, FALSE);
+
+ border->start = border->num_points;
+ border->movable = FALSE;
+
+ return ft_stroke_border_lineto(border, to, FALSE);
+}
+
+static void ft_stroke_border_init(SW_FT_StrokeBorder border)
+{
+ border->points = NULL;
+ border->tags = NULL;
+
+ border->num_points = 0;
+ border->max_points = 0;
+ border->start = -1;
+ border->valid = FALSE;
+}
+
+static void ft_stroke_border_reset(SW_FT_StrokeBorder border)
+{
+ border->num_points = 0;
+ border->start = -1;
+ border->valid = FALSE;
+}
+
+static void ft_stroke_border_done(SW_FT_StrokeBorder border)
+{
+ free(border->points);
+ free(border->tags);
+
+ border->num_points = 0;
+ border->max_points = 0;
+ border->start = -1;
+ border->valid = FALSE;
+}
+
+static SW_FT_Error ft_stroke_border_get_counts(SW_FT_StrokeBorder border,
+ SW_FT_UInt* anum_points,
+ SW_FT_UInt* anum_contours)
+{
+ SW_FT_Error error = 0;
+ SW_FT_UInt num_points = 0;
+ SW_FT_UInt num_contours = 0;
+
+ SW_FT_UInt count = border->num_points;
+ SW_FT_Vector* point = border->points;
+ SW_FT_Byte* tags = border->tags;
+ SW_FT_Int in_contour = 0;
+
+ for (; count > 0; count--, num_points++, point++, tags++) {
+ if (tags[0] & SW_FT_STROKE_TAG_BEGIN) {
+ if (in_contour != 0) goto Fail;
+
+ in_contour = 1;
+ } else if (in_contour == 0)
+ goto Fail;
+
+ if (tags[0] & SW_FT_STROKE_TAG_END) {
+ in_contour = 0;
+ num_contours++;
+ }
+ }
+
+ if (in_contour != 0) goto Fail;
+
+ border->valid = TRUE;
+
+Exit:
+ *anum_points = num_points;
+ *anum_contours = num_contours;
+ return error;
+
+Fail:
+ num_points = 0;
+ num_contours = 0;
+ goto Exit;
+}
+
+static void ft_stroke_border_export(SW_FT_StrokeBorder border,
+ SW_FT_Outline* outline)
+{
+ /* copy point locations */
+ memcpy(outline->points + outline->n_points, border->points,
+ border->num_points * sizeof(SW_FT_Vector));
+
+ /* copy tags */
+ {
+ SW_FT_UInt count = border->num_points;
+ SW_FT_Byte* read = border->tags;
+ SW_FT_Byte* write = (SW_FT_Byte*)outline->tags + outline->n_points;
+
+ for (; count > 0; count--, read++, write++) {
+ if (*read & SW_FT_STROKE_TAG_ON)
+ *write = SW_FT_CURVE_TAG_ON;
+ else if (*read & SW_FT_STROKE_TAG_CUBIC)
+ *write = SW_FT_CURVE_TAG_CUBIC;
+ else
+ *write = SW_FT_CURVE_TAG_CONIC;
+ }
+ }
+
+ /* copy contours */
+ {
+ SW_FT_UInt count = border->num_points;
+ SW_FT_Byte* tags = border->tags;
+ SW_FT_Short* write = outline->contours + outline->n_contours;
+ SW_FT_Short idx = (SW_FT_Short)outline->n_points;
+
+ for (; count > 0; count--, tags++, idx++) {
+ if (*tags & SW_FT_STROKE_TAG_END) {
+ *write++ = idx;
+ outline->n_contours++;
+ }
+ }
+ }
+
+ outline->n_points = (short)(outline->n_points + border->num_points);
+
+ assert(SW_FT_Outline_Check(outline) == 0);
+}
+
+/*************************************************************************/
+/*************************************************************************/
+/***** *****/
+/***** STROKER *****/
+/***** *****/
+/*************************************************************************/
+/*************************************************************************/
+
+#define SW_FT_SIDE_TO_ROTATE(s) (SW_FT_ANGLE_PI2 - (s)*SW_FT_ANGLE_PI)
+
+typedef struct SW_FT_StrokerRec_ {
+ SW_FT_Angle angle_in; /* direction into curr join */
+ SW_FT_Angle angle_out; /* direction out of join */
+ SW_FT_Vector center; /* current position */
+ SW_FT_Fixed line_length; /* length of last lineto */
+ SW_FT_Bool first_point; /* is this the start? */
+ SW_FT_Bool subpath_open; /* is the subpath open? */
+ SW_FT_Angle subpath_angle; /* subpath start direction */
+ SW_FT_Vector subpath_start; /* subpath start position */
+ SW_FT_Fixed subpath_line_length; /* subpath start lineto len */
+ SW_FT_Bool handle_wide_strokes; /* use wide strokes logic? */
+
+ SW_FT_Stroker_LineCap line_cap;
+ SW_FT_Stroker_LineJoin line_join;
+ SW_FT_Stroker_LineJoin line_join_saved;
+ SW_FT_Fixed miter_limit;
+ SW_FT_Fixed radius;
+
+ SW_FT_StrokeBorderRec borders[2];
+} SW_FT_StrokerRec;
+
+/* documentation is in ftstroke.h */
+
+SW_FT_Error SW_FT_Stroker_New(SW_FT_Stroker* astroker)
+{
+ SW_FT_Error error = 0; /* assigned in SW_FT_NEW */
+ SW_FT_Stroker stroker = NULL;
+
+ stroker = (SW_FT_StrokerRec*)calloc(1, sizeof(SW_FT_StrokerRec));
+ if (stroker) {
+ ft_stroke_border_init(&stroker->borders[0]);
+ ft_stroke_border_init(&stroker->borders[1]);
+ }
+
+ *astroker = stroker;
+
+ return error;
+}
+
+void SW_FT_Stroker_Rewind(SW_FT_Stroker stroker)
+{
+ if (stroker) {
+ ft_stroke_border_reset(&stroker->borders[0]);
+ ft_stroke_border_reset(&stroker->borders[1]);
+ }
+}
+
+/* documentation is in ftstroke.h */
+
+void SW_FT_Stroker_Set(SW_FT_Stroker stroker, SW_FT_Fixed radius,
+ SW_FT_Stroker_LineCap line_cap,
+ SW_FT_Stroker_LineJoin line_join,
+ SW_FT_Fixed miter_limit)
+{
+ stroker->radius = radius;
+ stroker->line_cap = line_cap;
+ stroker->line_join = line_join;
+ stroker->miter_limit = miter_limit;
+
+ /* ensure miter limit has sensible value */
+ if (stroker->miter_limit < 0x10000) stroker->miter_limit = 0x10000;
+
+ /* save line join style: */
+ /* line join style can be temporarily changed when stroking curves */
+ stroker->line_join_saved = line_join;
+
+ SW_FT_Stroker_Rewind(stroker);
+}
+
+/* documentation is in ftstroke.h */
+
+void SW_FT_Stroker_Done(SW_FT_Stroker stroker)
+{
+ if (stroker) {
+ ft_stroke_border_done(&stroker->borders[0]);
+ ft_stroke_border_done(&stroker->borders[1]);
+
+ free(stroker);
+ }
+}
+
+/* create a circular arc at a corner or cap */
+static SW_FT_Error ft_stroker_arcto(SW_FT_Stroker stroker, SW_FT_Int side)
+{
+ SW_FT_Angle total, rotate;
+ SW_FT_Fixed radius = stroker->radius;
+ SW_FT_Error error = 0;
+ SW_FT_StrokeBorder border = stroker->borders + side;
+
+ rotate = SW_FT_SIDE_TO_ROTATE(side);
+
+ total = SW_FT_Angle_Diff(stroker->angle_in, stroker->angle_out);
+ if (total == SW_FT_ANGLE_PI) total = -rotate * 2;
+
+ error = ft_stroke_border_arcto(border, &stroker->center, radius,
+ stroker->angle_in + rotate, total);
+ border->movable = FALSE;
+ return error;
+}
+
+/* add a cap at the end of an opened path */
+static SW_FT_Error
+ft_stroker_cap(SW_FT_Stroker stroker,
+ SW_FT_Angle angle,
+ SW_FT_Int side)
+{
+ SW_FT_Error error = 0;
+
+ if (stroker->line_cap == SW_FT_STROKER_LINECAP_ROUND)
+ {
+ /* add a round cap */
+ stroker->angle_in = angle;
+ stroker->angle_out = angle + SW_FT_ANGLE_PI;
+
+ error = ft_stroker_arcto(stroker, side);
+ }
+ else
+ {
+ /* add a square or butt cap */
+ SW_FT_Vector middle, delta;
+ SW_FT_Fixed radius = stroker->radius;
+ SW_FT_StrokeBorder border = stroker->borders + side;
+
+ /* compute middle point and first angle point */
+ SW_FT_Vector_From_Polar( &middle, radius, angle );
+ delta.x = side ? middle.y : -middle.y;
+ delta.y = side ? -middle.x : middle.x;
+
+ if ( stroker->line_cap == SW_FT_STROKER_LINECAP_SQUARE )
+ {
+ middle.x += stroker->center.x;
+ middle.y += stroker->center.y;
+ }
+ else /* SW_FT_STROKER_LINECAP_BUTT */
+ {
+ middle.x = stroker->center.x;
+ middle.y = stroker->center.y;
+ }
+
+ delta.x += middle.x;
+ delta.y += middle.y;
+
+ error = ft_stroke_border_lineto( border, &delta, FALSE );
+ if ( error )
+ goto Exit;
+
+ /* compute second angle point */
+ delta.x = middle.x - delta.x + middle.x;
+ delta.y = middle.y - delta.y + middle.y;
+
+ error = ft_stroke_border_lineto( border, &delta, FALSE );
+ }
+
+Exit:
+ return error;
+}
+
+/* process an inside corner, i.e. compute intersection */
+static SW_FT_Error ft_stroker_inside(SW_FT_Stroker stroker, SW_FT_Int side,
+ SW_FT_Fixed line_length)
+{
+ SW_FT_StrokeBorder border = stroker->borders + side;
+ SW_FT_Angle phi, theta, rotate;
+ SW_FT_Fixed length;
+ SW_FT_Vector sigma, delta;
+ SW_FT_Error error = 0;
+ SW_FT_Bool intersect; /* use intersection of lines? */
+
+ rotate = SW_FT_SIDE_TO_ROTATE(side);
+
+ theta = SW_FT_Angle_Diff(stroker->angle_in, stroker->angle_out) / 2;
+
+ /* Only intersect borders if between two lineto's and both */
+ /* lines are long enough (line_length is zero for curves). */
+ if (!border->movable || line_length == 0 ||
+ theta > 0x59C000 || theta < -0x59C000 )
+ intersect = FALSE;
+ else {
+ /* compute minimum required length of lines */
+ SW_FT_Fixed min_length;
+
+
+ SW_FT_Vector_Unit( &sigma, theta );
+ min_length =
+ ft_pos_abs( SW_FT_MulDiv( stroker->radius, sigma.y, sigma.x ) );
+
+ intersect = SW_FT_BOOL( min_length &&
+ stroker->line_length >= min_length &&
+ line_length >= min_length );
+ }
+
+ if (!intersect) {
+ SW_FT_Vector_From_Polar(&delta, stroker->radius,
+ stroker->angle_out + rotate);
+ delta.x += stroker->center.x;
+ delta.y += stroker->center.y;
+
+ border->movable = FALSE;
+ } else {
+ /* compute median angle */
+ phi = stroker->angle_in + theta + rotate;
+
+ length = SW_FT_DivFix( stroker->radius, sigma.x );
+
+ SW_FT_Vector_From_Polar( &delta, length, phi );
+ delta.x += stroker->center.x;
+ delta.y += stroker->center.y;
+ }
+
+ error = ft_stroke_border_lineto(border, &delta, FALSE);
+
+ return error;
+}
+
+ /* process an outside corner, i.e. compute bevel/miter/round */
+static SW_FT_Error
+ft_stroker_outside( SW_FT_Stroker stroker,
+ SW_FT_Int side,
+ SW_FT_Fixed line_length )
+{
+ SW_FT_StrokeBorder border = stroker->borders + side;
+ SW_FT_Error error;
+ SW_FT_Angle rotate;
+
+
+ if ( stroker->line_join == SW_FT_STROKER_LINEJOIN_ROUND )
+ error = ft_stroker_arcto( stroker, side );
+ else
+ {
+ /* this is a mitered (pointed) or beveled (truncated) corner */
+ SW_FT_Fixed radius = stroker->radius;
+ SW_FT_Vector sigma;
+ SW_FT_Angle theta = 0, phi = 0;
+ SW_FT_Bool bevel, fixed_bevel;
+
+
+ rotate = SW_FT_SIDE_TO_ROTATE( side );
+
+ bevel =
+ SW_FT_BOOL( stroker->line_join == SW_FT_STROKER_LINEJOIN_BEVEL );
+
+ fixed_bevel =
+ SW_FT_BOOL( stroker->line_join != SW_FT_STROKER_LINEJOIN_MITER_VARIABLE );
+
+ /* check miter limit first */
+ if ( !bevel )
+ {
+ theta = SW_FT_Angle_Diff( stroker->angle_in, stroker->angle_out ) / 2;
+
+ if ( theta == SW_FT_ANGLE_PI2 )
+ theta = -rotate;
+
+ phi = stroker->angle_in + theta + rotate;
+
+ SW_FT_Vector_From_Polar( &sigma, stroker->miter_limit, theta );
+
+ /* is miter limit exceeded? */
+ if ( sigma.x < 0x10000L )
+ {
+ /* don't create variable bevels for very small deviations; */
+ /* FT_Sin(x) = 0 for x <= 57 */
+ if ( fixed_bevel || ft_pos_abs( theta ) > 57 )
+ bevel = TRUE;
+ }
+ }
+
+ if ( bevel ) /* this is a bevel (broken angle) */
+ {
+ if ( fixed_bevel )
+ {
+ /* the outer corners are simply joined together */
+ SW_FT_Vector delta;
+
+
+ /* add bevel */
+ SW_FT_Vector_From_Polar( &delta,
+ radius,
+ stroker->angle_out + rotate );
+ delta.x += stroker->center.x;
+ delta.y += stroker->center.y;
+
+ border->movable = FALSE;
+ error = ft_stroke_border_lineto( border, &delta, FALSE );
+ }
+ else /* variable bevel or clipped miter */
+ {
+ /* the miter is truncated */
+ SW_FT_Vector middle, delta;
+ SW_FT_Fixed coef;
+
+
+ /* compute middle point and first angle point */
+ SW_FT_Vector_From_Polar( &middle,
+ SW_FT_MulFix( radius, stroker->miter_limit ),
+ phi );
+
+ coef = SW_FT_DivFix( 0x10000L - sigma.x, sigma.y );
+ delta.x = SW_FT_MulFix( middle.y, coef );
+ delta.y = SW_FT_MulFix( -middle.x, coef );
+
+ middle.x += stroker->center.x;
+ middle.y += stroker->center.y;
+ delta.x += middle.x;
+ delta.y += middle.y;
+
+ error = ft_stroke_border_lineto( border, &delta, FALSE );
+ if ( error )
+ goto Exit;
+
+ /* compute second angle point */
+ delta.x = middle.x - delta.x + middle.x;
+ delta.y = middle.y - delta.y + middle.y;
+
+ error = ft_stroke_border_lineto( border, &delta, FALSE );
+ if ( error )
+ goto Exit;
+
+ /* finally, add an end point; only needed if not lineto */
+ /* (line_length is zero for curves) */
+ if ( line_length == 0 )
+ {
+ SW_FT_Vector_From_Polar( &delta,
+ radius,
+ stroker->angle_out + rotate );
+
+ delta.x += stroker->center.x;
+ delta.y += stroker->center.y;
+
+ error = ft_stroke_border_lineto( border, &delta, FALSE );
+ }
+ }
+ }
+ else /* this is a miter (intersection) */
+ {
+ SW_FT_Fixed length;
+ SW_FT_Vector delta;
+
+
+ length = SW_FT_MulDiv( stroker->radius, stroker->miter_limit, sigma.x );
+
+ SW_FT_Vector_From_Polar( &delta, length, phi );
+ delta.x += stroker->center.x;
+ delta.y += stroker->center.y;
+
+ error = ft_stroke_border_lineto( border, &delta, FALSE );
+ if ( error )
+ goto Exit;
+
+ /* now add an end point; only needed if not lineto */
+ /* (line_length is zero for curves) */
+ if ( line_length == 0 )
+ {
+ SW_FT_Vector_From_Polar( &delta,
+ stroker->radius,
+ stroker->angle_out + rotate );
+ delta.x += stroker->center.x;
+ delta.y += stroker->center.y;
+
+ error = ft_stroke_border_lineto( border, &delta, FALSE );
+ }
+ }
+ }
+
+ Exit:
+ return error;
+}
+
+static SW_FT_Error ft_stroker_process_corner(SW_FT_Stroker stroker,
+ SW_FT_Fixed line_length)
+{
+ SW_FT_Error error = 0;
+ SW_FT_Angle turn;
+ SW_FT_Int inside_side;
+
+ turn = SW_FT_Angle_Diff(stroker->angle_in, stroker->angle_out);
+
+ /* no specific corner processing is required if the turn is 0 */
+ if (turn == 0) goto Exit;
+
+ /* when we turn to the right, the inside side is 0 */
+ inside_side = 0;
+
+ /* otherwise, the inside side is 1 */
+ if (turn < 0) inside_side = 1;
+
+ /* process the inside side */
+ error = ft_stroker_inside(stroker, inside_side, line_length);
+ if (error) goto Exit;
+
+ /* process the outside side */
+ error = ft_stroker_outside(stroker, 1 - inside_side, line_length);
+
+Exit:
+ return error;
+}
+
+/* add two points to the left and right borders corresponding to the */
+/* start of the subpath */
+static SW_FT_Error ft_stroker_subpath_start(SW_FT_Stroker stroker,
+ SW_FT_Angle start_angle,
+ SW_FT_Fixed line_length)
+{
+ SW_FT_Vector delta;
+ SW_FT_Vector point;
+ SW_FT_Error error;
+ SW_FT_StrokeBorder border;
+
+ SW_FT_Vector_From_Polar(&delta, stroker->radius,
+ start_angle + SW_FT_ANGLE_PI2);
+
+ point.x = stroker->center.x + delta.x;
+ point.y = stroker->center.y + delta.y;
+
+ border = stroker->borders;
+ error = ft_stroke_border_moveto(border, &point);
+ if (error) goto Exit;
+
+ point.x = stroker->center.x - delta.x;
+ point.y = stroker->center.y - delta.y;
+
+ border++;
+ error = ft_stroke_border_moveto(border, &point);
+
+ /* save angle, position, and line length for last join */
+ /* (line_length is zero for curves) */
+ stroker->subpath_angle = start_angle;
+ stroker->first_point = FALSE;
+ stroker->subpath_line_length = line_length;
+
+Exit:
+ return error;
+}
+
+/* documentation is in ftstroke.h */
+
+SW_FT_Error SW_FT_Stroker_LineTo(SW_FT_Stroker stroker, SW_FT_Vector* to)
+{
+ SW_FT_Error error = 0;
+ SW_FT_StrokeBorder border;
+ SW_FT_Vector delta;
+ SW_FT_Angle angle;
+ SW_FT_Int side;
+ SW_FT_Fixed line_length;
+
+ delta.x = to->x - stroker->center.x;
+ delta.y = to->y - stroker->center.y;
+
+ /* a zero-length lineto is a no-op; avoid creating a spurious corner */
+ if (delta.x == 0 && delta.y == 0) goto Exit;
+
+ /* compute length of line */
+ line_length = SW_FT_Vector_Length(&delta);
+
+ angle = SW_FT_Atan2(delta.x, delta.y);
+ SW_FT_Vector_From_Polar(&delta, stroker->radius, angle + SW_FT_ANGLE_PI2);
+
+ /* process corner if necessary */
+ if (stroker->first_point) {
+ /* This is the first segment of a subpath. We need to */
+ /* add a point to each border at their respective starting */
+ /* point locations. */
+ error = ft_stroker_subpath_start(stroker, angle, line_length);
+ if (error) goto Exit;
+ } else {
+ /* process the current corner */
+ stroker->angle_out = angle;
+ error = ft_stroker_process_corner(stroker, line_length);
+ if (error) goto Exit;
+ }
+
+ /* now add a line segment to both the `inside' and `outside' paths */
+ for (border = stroker->borders, side = 1; side >= 0; side--, border++) {
+ SW_FT_Vector point;
+
+ point.x = to->x + delta.x;
+ point.y = to->y + delta.y;
+
+ /* the ends of lineto borders are movable */
+ error = ft_stroke_border_lineto(border, &point, TRUE);
+ if (error) goto Exit;
+
+ delta.x = -delta.x;
+ delta.y = -delta.y;
+ }
+
+ stroker->angle_in = angle;
+ stroker->center = *to;
+ stroker->line_length = line_length;
+
+Exit:
+ return error;
+}
+
+/* documentation is in ftstroke.h */
+
+SW_FT_Error SW_FT_Stroker_ConicTo(SW_FT_Stroker stroker, SW_FT_Vector* control,
+ SW_FT_Vector* to)
+{
+ SW_FT_Error error = 0;
+ SW_FT_Vector bez_stack[34];
+ SW_FT_Vector* arc;
+ SW_FT_Vector* limit = bez_stack + 30;
+ SW_FT_Bool first_arc = TRUE;
+
+ /* if all control points are coincident, this is a no-op; */
+ /* avoid creating a spurious corner */
+ if (SW_FT_IS_SMALL(stroker->center.x - control->x) &&
+ SW_FT_IS_SMALL(stroker->center.y - control->y) &&
+ SW_FT_IS_SMALL(control->x - to->x) &&
+ SW_FT_IS_SMALL(control->y - to->y)) {
+ stroker->center = *to;
+ goto Exit;
+ }
+
+ arc = bez_stack;
+ arc[0] = *to;
+ arc[1] = *control;
+ arc[2] = stroker->center;
+
+ while (arc >= bez_stack) {
+ SW_FT_Angle angle_in, angle_out;
+
+ /* initialize with current direction */
+ angle_in = angle_out = stroker->angle_in;
+
+ if (arc < limit &&
+ !ft_conic_is_small_enough(arc, &angle_in, &angle_out)) {
+ if (stroker->first_point) stroker->angle_in = angle_in;
+
+ ft_conic_split(arc);
+ arc += 2;
+ continue;
+ }
+
+ if (first_arc) {
+ first_arc = FALSE;
+
+ /* process corner if necessary */
+ if (stroker->first_point)
+ error = ft_stroker_subpath_start(stroker, angle_in, 0);
+ else {
+ stroker->angle_out = angle_in;
+ error = ft_stroker_process_corner(stroker, 0);
+ }
+ } else if (ft_pos_abs(SW_FT_Angle_Diff(stroker->angle_in, angle_in)) >
+ SW_FT_SMALL_CONIC_THRESHOLD / 4) {
+ /* if the deviation from one arc to the next is too great, */
+ /* add a round corner */
+ stroker->center = arc[2];
+ stroker->angle_out = angle_in;
+ stroker->line_join = SW_FT_STROKER_LINEJOIN_ROUND;
+
+ error = ft_stroker_process_corner(stroker, 0);
+
+ /* reinstate line join style */
+ stroker->line_join = stroker->line_join_saved;
+ }
+
+ if (error) goto Exit;
+
+ /* the arc's angle is small enough; we can add it directly to each */
+ /* border */
+ {
+ SW_FT_Vector ctrl, end;
+ SW_FT_Angle theta, phi, rotate, alpha0 = 0;
+ SW_FT_Fixed length;
+ SW_FT_StrokeBorder border;
+ SW_FT_Int side;
+
+ theta = SW_FT_Angle_Diff(angle_in, angle_out) / 2;
+ phi = angle_in + theta;
+ length = SW_FT_DivFix(stroker->radius, SW_FT_Cos(theta));
+
+ /* compute direction of original arc */
+ if (stroker->handle_wide_strokes)
+ alpha0 = SW_FT_Atan2(arc[0].x - arc[2].x, arc[0].y - arc[2].y);
+
+ for (border = stroker->borders, side = 0; side <= 1;
+ side++, border++) {
+ rotate = SW_FT_SIDE_TO_ROTATE(side);
+
+ /* compute control point */
+ SW_FT_Vector_From_Polar(&ctrl, length, phi + rotate);
+ ctrl.x += arc[1].x;
+ ctrl.y += arc[1].y;
+
+ /* compute end point */
+ SW_FT_Vector_From_Polar(&end, stroker->radius,
+ angle_out + rotate);
+ end.x += arc[0].x;
+ end.y += arc[0].y;
+
+ if (stroker->handle_wide_strokes) {
+ SW_FT_Vector start;
+ SW_FT_Angle alpha1;
+
+ /* determine whether the border radius is greater than the
+ */
+ /* radius of curvature of the original arc */
+ start = border->points[border->num_points - 1];
+
+ alpha1 = SW_FT_Atan2(end.x - start.x, end.y - start.y);
+
+ /* is the direction of the border arc opposite to */
+ /* that of the original arc? */
+ if (ft_pos_abs(SW_FT_Angle_Diff(alpha0, alpha1)) >
+ SW_FT_ANGLE_PI / 2) {
+ SW_FT_Angle beta, gamma;
+ SW_FT_Vector bvec, delta;
+ SW_FT_Fixed blen, sinA, sinB, alen;
+
+ /* use the sine rule to find the intersection point */
+ beta =
+ SW_FT_Atan2(arc[2].x - start.x, arc[2].y - start.y);
+ gamma = SW_FT_Atan2(arc[0].x - end.x, arc[0].y - end.y);
+
+ bvec.x = end.x - start.x;
+ bvec.y = end.y - start.y;
+
+ blen = SW_FT_Vector_Length(&bvec);
+
+ sinA = ft_pos_abs(SW_FT_Sin(alpha1 - gamma));
+ sinB = ft_pos_abs(SW_FT_Sin(beta - gamma));
+
+ alen = SW_FT_MulDiv(blen, sinA, sinB);
+
+ SW_FT_Vector_From_Polar(&delta, alen, beta);
+ delta.x += start.x;
+ delta.y += start.y;
+
+ /* circumnavigate the negative sector backwards */
+ border->movable = FALSE;
+ error = ft_stroke_border_lineto(border, &delta, FALSE);
+ if (error) goto Exit;
+ error = ft_stroke_border_lineto(border, &end, FALSE);
+ if (error) goto Exit;
+ error = ft_stroke_border_conicto(border, &ctrl, &start);
+ if (error) goto Exit;
+ /* and then move to the endpoint */
+ error = ft_stroke_border_lineto(border, &end, FALSE);
+ if (error) goto Exit;
+
+ continue;
+ }
+
+ /* else fall through */
+ }
+
+ /* simply add an arc */
+ error = ft_stroke_border_conicto(border, &ctrl, &end);
+ if (error) goto Exit;
+ }
+ }
+
+ arc -= 2;
+
+ stroker->angle_in = angle_out;
+ }
+
+ stroker->center = *to;
+
+Exit:
+ return error;
+}
+
+/* documentation is in ftstroke.h */
+
+SW_FT_Error SW_FT_Stroker_CubicTo(SW_FT_Stroker stroker, SW_FT_Vector* control1,
+ SW_FT_Vector* control2, SW_FT_Vector* to)
+{
+ SW_FT_Error error = 0;
+ SW_FT_Vector bez_stack[37];
+ SW_FT_Vector* arc;
+ SW_FT_Vector* limit = bez_stack + 32;
+ SW_FT_Bool first_arc = TRUE;
+
+ /* if all control points are coincident, this is a no-op; */
+ /* avoid creating a spurious corner */
+ if (SW_FT_IS_SMALL(stroker->center.x - control1->x) &&
+ SW_FT_IS_SMALL(stroker->center.y - control1->y) &&
+ SW_FT_IS_SMALL(control1->x - control2->x) &&
+ SW_FT_IS_SMALL(control1->y - control2->y) &&
+ SW_FT_IS_SMALL(control2->x - to->x) &&
+ SW_FT_IS_SMALL(control2->y - to->y)) {
+ stroker->center = *to;
+ goto Exit;
+ }
+
+ arc = bez_stack;
+ arc[0] = *to;
+ arc[1] = *control2;
+ arc[2] = *control1;
+ arc[3] = stroker->center;
+
+ while (arc >= bez_stack) {
+ SW_FT_Angle angle_in, angle_mid, angle_out;
+
+ /* initialize with current direction */
+ angle_in = angle_out = angle_mid = stroker->angle_in;
+
+ if (arc < limit &&
+ !ft_cubic_is_small_enough(arc, &angle_in, &angle_mid, &angle_out)) {
+ if (stroker->first_point) stroker->angle_in = angle_in;
+
+ ft_cubic_split(arc);
+ arc += 3;
+ continue;
+ }
+
+ if (first_arc) {
+ first_arc = FALSE;
+
+ /* process corner if necessary */
+ if (stroker->first_point)
+ error = ft_stroker_subpath_start(stroker, angle_in, 0);
+ else {
+ stroker->angle_out = angle_in;
+ error = ft_stroker_process_corner(stroker, 0);
+ }
+ } else if (ft_pos_abs(SW_FT_Angle_Diff(stroker->angle_in, angle_in)) >
+ SW_FT_SMALL_CUBIC_THRESHOLD / 4) {
+ /* if the deviation from one arc to the next is too great, */
+ /* add a round corner */
+ stroker->center = arc[3];
+ stroker->angle_out = angle_in;
+ stroker->line_join = SW_FT_STROKER_LINEJOIN_ROUND;
+
+ error = ft_stroker_process_corner(stroker, 0);
+
+ /* reinstate line join style */
+ stroker->line_join = stroker->line_join_saved;
+ }
+
+ if (error) goto Exit;
+
+ /* the arc's angle is small enough; we can add it directly to each */
+ /* border */
+ {
+ SW_FT_Vector ctrl1, ctrl2, end;
+ SW_FT_Angle theta1, phi1, theta2, phi2, rotate, alpha0 = 0;
+ SW_FT_Fixed length1, length2;
+ SW_FT_StrokeBorder border;
+ SW_FT_Int side;
+
+ theta1 = SW_FT_Angle_Diff(angle_in, angle_mid) / 2;
+ theta2 = SW_FT_Angle_Diff(angle_mid, angle_out) / 2;
+ phi1 = ft_angle_mean(angle_in, angle_mid);
+ phi2 = ft_angle_mean(angle_mid, angle_out);
+ length1 = SW_FT_DivFix(stroker->radius, SW_FT_Cos(theta1));
+ length2 = SW_FT_DivFix(stroker->radius, SW_FT_Cos(theta2));
+
+ /* compute direction of original arc */
+ if (stroker->handle_wide_strokes)
+ alpha0 = SW_FT_Atan2(arc[0].x - arc[3].x, arc[0].y - arc[3].y);
+
+ for (border = stroker->borders, side = 0; side <= 1;
+ side++, border++) {
+ rotate = SW_FT_SIDE_TO_ROTATE(side);
+
+ /* compute control points */
+ SW_FT_Vector_From_Polar(&ctrl1, length1, phi1 + rotate);
+ ctrl1.x += arc[2].x;
+ ctrl1.y += arc[2].y;
+
+ SW_FT_Vector_From_Polar(&ctrl2, length2, phi2 + rotate);
+ ctrl2.x += arc[1].x;
+ ctrl2.y += arc[1].y;
+
+ /* compute end point */
+ SW_FT_Vector_From_Polar(&end, stroker->radius,
+ angle_out + rotate);
+ end.x += arc[0].x;
+ end.y += arc[0].y;
+
+ if (stroker->handle_wide_strokes) {
+ SW_FT_Vector start;
+ SW_FT_Angle alpha1;
+
+ /* determine whether the border radius is greater than the
+ */
+ /* radius of curvature of the original arc */
+ start = border->points[border->num_points - 1];
+
+ alpha1 = SW_FT_Atan2(end.x - start.x, end.y - start.y);
+
+ /* is the direction of the border arc opposite to */
+ /* that of the original arc? */
+ if (ft_pos_abs(SW_FT_Angle_Diff(alpha0, alpha1)) >
+ SW_FT_ANGLE_PI / 2) {
+ SW_FT_Angle beta, gamma;
+ SW_FT_Vector bvec, delta;
+ SW_FT_Fixed blen, sinA, sinB, alen;
+
+ /* use the sine rule to find the intersection point */
+ beta =
+ SW_FT_Atan2(arc[3].x - start.x, arc[3].y - start.y);
+ gamma = SW_FT_Atan2(arc[0].x - end.x, arc[0].y - end.y);
+
+ bvec.x = end.x - start.x;
+ bvec.y = end.y - start.y;
+
+ blen = SW_FT_Vector_Length(&bvec);
+
+ sinA = ft_pos_abs(SW_FT_Sin(alpha1 - gamma));
+ sinB = ft_pos_abs(SW_FT_Sin(beta - gamma));
+
+ alen = SW_FT_MulDiv(blen, sinA, sinB);
+
+ SW_FT_Vector_From_Polar(&delta, alen, beta);
+ delta.x += start.x;
+ delta.y += start.y;
+
+ /* circumnavigate the negative sector backwards */
+ border->movable = FALSE;
+ error = ft_stroke_border_lineto(border, &delta, FALSE);
+ if (error) goto Exit;
+ error = ft_stroke_border_lineto(border, &end, FALSE);
+ if (error) goto Exit;
+ error = ft_stroke_border_cubicto(border, &ctrl2, &ctrl1,
+ &start);
+ if (error) goto Exit;
+ /* and then move to the endpoint */
+ error = ft_stroke_border_lineto(border, &end, FALSE);
+ if (error) goto Exit;
+
+ continue;
+ }
+
+ /* else fall through */
+ }
+
+ /* simply add an arc */
+ error = ft_stroke_border_cubicto(border, &ctrl1, &ctrl2, &end);
+ if (error) goto Exit;
+ }
+ }
+
+ arc -= 3;
+
+ stroker->angle_in = angle_out;
+ }
+
+ stroker->center = *to;
+
+Exit:
+ return error;
+}
+
+/* documentation is in ftstroke.h */
+
+SW_FT_Error SW_FT_Stroker_BeginSubPath(SW_FT_Stroker stroker, SW_FT_Vector* to,
+ SW_FT_Bool open)
+{
+ /* We cannot process the first point, because there is not enough */
+ /* information regarding its corner/cap. The latter will be processed */
+ /* in the `SW_FT_Stroker_EndSubPath' routine. */
+ /* */
+ stroker->first_point = TRUE;
+ stroker->center = *to;
+ stroker->subpath_open = open;
+
+ /* Determine if we need to check whether the border radius is greater */
+ /* than the radius of curvature of a curve, to handle this case */
+ /* specially. This is only required if bevel joins or butt caps may */
+ /* be created, because round & miter joins and round & square caps */
+ /* cover the negative sector created with wide strokes. */
+ stroker->handle_wide_strokes =
+ SW_FT_BOOL(stroker->line_join != SW_FT_STROKER_LINEJOIN_ROUND ||
+ (stroker->subpath_open &&
+ stroker->line_cap == SW_FT_STROKER_LINECAP_BUTT));
+
+ /* record the subpath start point for each border */
+ stroker->subpath_start = *to;
+
+ stroker->angle_in = 0;
+
+ return 0;
+}
+
+static SW_FT_Error ft_stroker_add_reverse_left(SW_FT_Stroker stroker,
+ SW_FT_Bool open)
+{
+ SW_FT_StrokeBorder right = stroker->borders + 0;
+ SW_FT_StrokeBorder left = stroker->borders + 1;
+ SW_FT_Int new_points;
+ SW_FT_Error error = 0;
+
+ assert(left->start >= 0);
+
+ new_points = left->num_points - left->start;
+ if (new_points > 0) {
+ error = ft_stroke_border_grow(right, (SW_FT_UInt)new_points);
+ if (error) goto Exit;
+
+ {
+ SW_FT_Vector* dst_point = right->points + right->num_points;
+ SW_FT_Byte* dst_tag = right->tags + right->num_points;
+ SW_FT_Vector* src_point = left->points + left->num_points - 1;
+ SW_FT_Byte* src_tag = left->tags + left->num_points - 1;
+
+ while (src_point >= left->points + left->start) {
+ *dst_point = *src_point;
+ *dst_tag = *src_tag;
+
+ if (open)
+ dst_tag[0] &= ~SW_FT_STROKE_TAG_BEGIN_END;
+ else {
+ SW_FT_Byte ttag =
+ (SW_FT_Byte)(dst_tag[0] & SW_FT_STROKE_TAG_BEGIN_END);
+
+ /* switch begin/end tags if necessary */
+ if (ttag == SW_FT_STROKE_TAG_BEGIN ||
+ ttag == SW_FT_STROKE_TAG_END)
+ dst_tag[0] ^= SW_FT_STROKE_TAG_BEGIN_END;
+ }
+
+ src_point--;
+ src_tag--;
+ dst_point++;
+ dst_tag++;
+ }
+ }
+
+ left->num_points = left->start;
+ right->num_points += new_points;
+
+ right->movable = FALSE;
+ left->movable = FALSE;
+ }
+
+Exit:
+ return error;
+}
+
+/* documentation is in ftstroke.h */
+
+/* there's a lot of magic in this function! */
+SW_FT_Error SW_FT_Stroker_EndSubPath(SW_FT_Stroker stroker)
+{
+ SW_FT_Error error = 0;
+
+ if (stroker->subpath_open) {
+ SW_FT_StrokeBorder right = stroker->borders;
+
+ /* All right, this is an opened path, we need to add a cap between */
+ /* right & left, add the reverse of left, then add a final cap */
+ /* between left & right. */
+ error = ft_stroker_cap(stroker, stroker->angle_in, 0);
+ if (error) goto Exit;
+
+ /* add reversed points from `left' to `right' */
+ error = ft_stroker_add_reverse_left(stroker, TRUE);
+ if (error) goto Exit;
+
+ /* now add the final cap */
+ stroker->center = stroker->subpath_start;
+ error =
+ ft_stroker_cap(stroker, stroker->subpath_angle + SW_FT_ANGLE_PI, 0);
+ if (error) goto Exit;
+
+ /* Now end the right subpath accordingly. The left one is */
+ /* rewind and doesn't need further processing. */
+ ft_stroke_border_close(right, FALSE);
+ } else {
+ SW_FT_Angle turn;
+ SW_FT_Int inside_side;
+
+ /* close the path if needed */
+ if (stroker->center.x != stroker->subpath_start.x ||
+ stroker->center.y != stroker->subpath_start.y) {
+ error = SW_FT_Stroker_LineTo(stroker, &stroker->subpath_start);
+ if (error) goto Exit;
+ }
+
+ /* process the corner */
+ stroker->angle_out = stroker->subpath_angle;
+ turn = SW_FT_Angle_Diff(stroker->angle_in, stroker->angle_out);
+
+ /* no specific corner processing is required if the turn is 0 */
+ if (turn != 0) {
+ /* when we turn to the right, the inside side is 0 */
+ inside_side = 0;
+
+ /* otherwise, the inside side is 1 */
+ if (turn < 0) inside_side = 1;
+
+ error = ft_stroker_inside(stroker, inside_side,
+ stroker->subpath_line_length);
+ if (error) goto Exit;
+
+ /* process the outside side */
+ error = ft_stroker_outside(stroker, 1 - inside_side,
+ stroker->subpath_line_length);
+ if (error) goto Exit;
+ }
+
+ /* then end our two subpaths */
+ ft_stroke_border_close(stroker->borders + 0, FALSE);
+ ft_stroke_border_close(stroker->borders + 1, TRUE);
+ }
+
+Exit:
+ return error;
+}
+
+/* documentation is in ftstroke.h */
+
+SW_FT_Error SW_FT_Stroker_GetBorderCounts(SW_FT_Stroker stroker,
+ SW_FT_StrokerBorder border,
+ SW_FT_UInt* anum_points,
+ SW_FT_UInt* anum_contours)
+{
+ SW_FT_UInt num_points = 0, num_contours = 0;
+ SW_FT_Error error;
+
+ if (!stroker || border > 1) {
+ error = -1; // SW_FT_THROW( Invalid_Argument );
+ goto Exit;
+ }
+
+ error = ft_stroke_border_get_counts(stroker->borders + border, &num_points,
+ &num_contours);
+Exit:
+ if (anum_points) *anum_points = num_points;
+
+ if (anum_contours) *anum_contours = num_contours;
+
+ return error;
+}
+
+/* documentation is in ftstroke.h */
+
+SW_FT_Error SW_FT_Stroker_GetCounts(SW_FT_Stroker stroker,
+ SW_FT_UInt* anum_points,
+ SW_FT_UInt* anum_contours)
+{
+ SW_FT_UInt count1, count2, num_points = 0;
+ SW_FT_UInt count3, count4, num_contours = 0;
+ SW_FT_Error error;
+
+ error = ft_stroke_border_get_counts(stroker->borders + 0, &count1, &count2);
+ if (error) goto Exit;
+
+ error = ft_stroke_border_get_counts(stroker->borders + 1, &count3, &count4);
+ if (error) goto Exit;
+
+ num_points = count1 + count3;
+ num_contours = count2 + count4;
+
+Exit:
+ *anum_points = num_points;
+ *anum_contours = num_contours;
+ return error;
+}
+
+/* documentation is in ftstroke.h */
+
+void SW_FT_Stroker_ExportBorder(SW_FT_Stroker stroker,
+ SW_FT_StrokerBorder border,
+ SW_FT_Outline* outline)
+{
+ if (border == SW_FT_STROKER_BORDER_LEFT ||
+ border == SW_FT_STROKER_BORDER_RIGHT) {
+ SW_FT_StrokeBorder sborder = &stroker->borders[border];
+
+ if (sborder->valid) ft_stroke_border_export(sborder, outline);
+ }
+}
+
+/* documentation is in ftstroke.h */
+
+void SW_FT_Stroker_Export(SW_FT_Stroker stroker, SW_FT_Outline* outline)
+{
+ SW_FT_Stroker_ExportBorder(stroker, SW_FT_STROKER_BORDER_LEFT, outline);
+ SW_FT_Stroker_ExportBorder(stroker, SW_FT_STROKER_BORDER_RIGHT, outline);
+}
+
+/* documentation is in ftstroke.h */
+
+/*
+ * The following is very similar to SW_FT_Outline_Decompose, except
+ * that we do support opened paths, and do not scale the outline.
+ */
+SW_FT_Error SW_FT_Stroker_ParseOutline(SW_FT_Stroker stroker,
+ const SW_FT_Outline* outline)
+{
+ SW_FT_Vector v_last;
+ SW_FT_Vector v_control;
+ SW_FT_Vector v_start;
+
+ SW_FT_Vector* point;
+ SW_FT_Vector* limit;
+ char* tags;
+
+ SW_FT_Error error;
+
+ SW_FT_Int n; /* index of contour in outline */
+ SW_FT_UInt first; /* index of first point in contour */
+ SW_FT_Int tag; /* current point's state */
+
+ if (!outline || !stroker) return -1; // SW_FT_THROW( Invalid_Argument );
+
+ SW_FT_Stroker_Rewind(stroker);
+
+ first = 0;
+
+ for (n = 0; n < outline->n_contours; n++) {
+ SW_FT_UInt last; /* index of last point in contour */
+
+ last = outline->contours[n];
+ limit = outline->points + last;
+
+ /* skip empty points; we don't stroke these */
+ if (last <= first) {
+ first = last + 1;
+ continue;
+ }
+
+ v_start = outline->points[first];
+ v_last = outline->points[last];
+
+ v_control = v_start;
+
+ point = outline->points + first;
+ tags = outline->tags + first;
+ tag = SW_FT_CURVE_TAG(tags[0]);
+
+ /* A contour cannot start with a cubic control point! */
+ if (tag == SW_FT_CURVE_TAG_CUBIC) goto Invalid_Outline;
+
+ /* check first point to determine origin */
+ if (tag == SW_FT_CURVE_TAG_CONIC) {
+ /* First point is conic control. Yes, this happens. */
+ if (SW_FT_CURVE_TAG(outline->tags[last]) == SW_FT_CURVE_TAG_ON) {
+ /* start at last point if it is on the curve */
+ v_start = v_last;
+ limit--;
+ } else {
+ /* if both first and last points are conic, */
+ /* start at their middle */
+ v_start.x = (v_start.x + v_last.x) / 2;
+ v_start.y = (v_start.y + v_last.y) / 2;
+ }
+ point--;
+ tags--;
+ }
+
+ error = SW_FT_Stroker_BeginSubPath(stroker, &v_start, outline->contours_flag[n]);
+ if (error) goto Exit;
+
+ while (point < limit) {
+ point++;
+ tags++;
+
+ tag = SW_FT_CURVE_TAG(tags[0]);
+ switch (tag) {
+ case SW_FT_CURVE_TAG_ON: /* emit a single line_to */
+ {
+ SW_FT_Vector vec;
+
+ vec.x = point->x;
+ vec.y = point->y;
+
+ error = SW_FT_Stroker_LineTo(stroker, &vec);
+ if (error) goto Exit;
+ continue;
+ }
+
+ case SW_FT_CURVE_TAG_CONIC: /* consume conic arcs */
+ v_control.x = point->x;
+ v_control.y = point->y;
+
+ Do_Conic:
+ if (point < limit) {
+ SW_FT_Vector vec;
+ SW_FT_Vector v_middle;
+
+ point++;
+ tags++;
+ tag = SW_FT_CURVE_TAG(tags[0]);
+
+ vec = point[0];
+
+ if (tag == SW_FT_CURVE_TAG_ON) {
+ error =
+ SW_FT_Stroker_ConicTo(stroker, &v_control, &vec);
+ if (error) goto Exit;
+ continue;
+ }
+
+ if (tag != SW_FT_CURVE_TAG_CONIC) goto Invalid_Outline;
+
+ v_middle.x = (v_control.x + vec.x) / 2;
+ v_middle.y = (v_control.y + vec.y) / 2;
+
+ error =
+ SW_FT_Stroker_ConicTo(stroker, &v_control, &v_middle);
+ if (error) goto Exit;
+
+ v_control = vec;
+ goto Do_Conic;
+ }
+
+ error = SW_FT_Stroker_ConicTo(stroker, &v_control, &v_start);
+ goto Close;
+
+ default: /* SW_FT_CURVE_TAG_CUBIC */
+ {
+ SW_FT_Vector vec1, vec2;
+
+ if (point + 1 > limit ||
+ SW_FT_CURVE_TAG(tags[1]) != SW_FT_CURVE_TAG_CUBIC)
+ goto Invalid_Outline;
+
+ point += 2;
+ tags += 2;
+
+ vec1 = point[-2];
+ vec2 = point[-1];
+
+ if (point <= limit) {
+ SW_FT_Vector vec;
+
+ vec = point[0];
+
+ error = SW_FT_Stroker_CubicTo(stroker, &vec1, &vec2, &vec);
+ if (error) goto Exit;
+ continue;
+ }
+
+ error = SW_FT_Stroker_CubicTo(stroker, &vec1, &vec2, &v_start);
+ goto Close;
+ }
+ }
+ }
+
+ Close:
+ if (error) goto Exit;
+
+ /* don't try to end the path if no segments have been generated */
+ if (!stroker->first_point) {
+ error = SW_FT_Stroker_EndSubPath(stroker);
+ if (error) goto Exit;
+ }
+
+ first = last + 1;
+ }
+
+ return 0;
+
+Exit:
+ return error;
+
+Invalid_Outline:
+ return -2; // SW_FT_THROW( Invalid_Outline );
+}
+
+/* END */
diff --git a/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_stroker.h b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_stroker.h
new file mode 100644
index 00000000..0fdeb276
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_stroker.h
@@ -0,0 +1,319 @@
+#ifndef V_FT_STROKER_H
+#define V_FT_STROKER_H
+/***************************************************************************/
+/* */
+/* ftstroke.h */
+/* */
+/* FreeType path stroker (specification). */
+/* */
+/* Copyright 2002-2006, 2008, 2009, 2011-2012 by */
+/* David Turner, Robert Wilhelm, and Werner Lemberg. */
+/* */
+/* This file is part of the FreeType project, and may only be used, */
+/* modified, and distributed under the terms of the FreeType project */
+/* license, LICENSE.TXT. By continuing to use, modify, or distribute */
+/* this file you indicate that you have read the license and */
+/* understand and accept it fully. */
+/* */
+/***************************************************************************/
+
+#include "vector_freetype_v_ft_raster.h"
+
+ /**************************************************************
+ *
+ * @type:
+ * SW_FT_Stroker
+ *
+ * @description:
+ * Opaque handler to a path stroker object.
+ */
+ typedef struct SW_FT_StrokerRec_* SW_FT_Stroker;
+
+
+ /**************************************************************
+ *
+ * @enum:
+ * SW_FT_Stroker_LineJoin
+ *
+ * @description:
+ * These values determine how two joining lines are rendered
+ * in a stroker.
+ *
+ * @values:
+ * SW_FT_STROKER_LINEJOIN_ROUND ::
+ * Used to render rounded line joins. Circular arcs are used
+ * to join two lines smoothly.
+ *
+ * SW_FT_STROKER_LINEJOIN_BEVEL ::
+ * Used to render beveled line joins. The outer corner of
+ * the joined lines is filled by enclosing the triangular
+ * region of the corner with a straight line between the
+ * outer corners of each stroke.
+ *
+ * SW_FT_STROKER_LINEJOIN_MITER_FIXED ::
+ * Used to render mitered line joins, with fixed bevels if the
+ * miter limit is exceeded. The outer edges of the strokes
+ * for the two segments are extended until they meet at an
+ * angle. If the segments meet at too sharp an angle (such
+ * that the miter would extend from the intersection of the
+ * segments a distance greater than the product of the miter
+ * limit value and the border radius), then a bevel join (see
+ * above) is used instead. This prevents long spikes being
+ * created. SW_FT_STROKER_LINEJOIN_MITER_FIXED generates a miter
+ * line join as used in PostScript and PDF.
+ *
+ * SW_FT_STROKER_LINEJOIN_MITER_VARIABLE ::
+ * SW_FT_STROKER_LINEJOIN_MITER ::
+ * Used to render mitered line joins, with variable bevels if
+ * the miter limit is exceeded. The intersection of the
+ * strokes is clipped at a line perpendicular to the bisector
+ * of the angle between the strokes, at the distance from the
+ * intersection of the segments equal to the product of the
+ * miter limit value and the border radius. This prevents
+ * long spikes being created.
+ * SW_FT_STROKER_LINEJOIN_MITER_VARIABLE generates a mitered line
+ * join as used in XPS. SW_FT_STROKER_LINEJOIN_MITER is an alias
+ * for SW_FT_STROKER_LINEJOIN_MITER_VARIABLE, retained for
+ * backwards compatibility.
+ */
+ typedef enum SW_FT_Stroker_LineJoin_
+ {
+ SW_FT_STROKER_LINEJOIN_ROUND = 0,
+ SW_FT_STROKER_LINEJOIN_BEVEL = 1,
+ SW_FT_STROKER_LINEJOIN_MITER_VARIABLE = 2,
+ SW_FT_STROKER_LINEJOIN_MITER = SW_FT_STROKER_LINEJOIN_MITER_VARIABLE,
+ SW_FT_STROKER_LINEJOIN_MITER_FIXED = 3
+
+ } SW_FT_Stroker_LineJoin;
+
+
+ /**************************************************************
+ *
+ * @enum:
+ * SW_FT_Stroker_LineCap
+ *
+ * @description:
+ * These values determine how the end of opened sub-paths are
+ * rendered in a stroke.
+ *
+ * @values:
+ * SW_FT_STROKER_LINECAP_BUTT ::
+ * The end of lines is rendered as a full stop on the last
+ * point itself.
+ *
+ * SW_FT_STROKER_LINECAP_ROUND ::
+ * The end of lines is rendered as a half-circle around the
+ * last point.
+ *
+ * SW_FT_STROKER_LINECAP_SQUARE ::
+ * The end of lines is rendered as a square around the
+ * last point.
+ */
+ typedef enum SW_FT_Stroker_LineCap_
+ {
+ SW_FT_STROKER_LINECAP_BUTT = 0,
+ SW_FT_STROKER_LINECAP_ROUND,
+ SW_FT_STROKER_LINECAP_SQUARE
+
+ } SW_FT_Stroker_LineCap;
+
+
+ /**************************************************************
+ *
+ * @enum:
+ * SW_FT_StrokerBorder
+ *
+ * @description:
+ * These values are used to select a given stroke border
+ * in @SW_FT_Stroker_GetBorderCounts and @SW_FT_Stroker_ExportBorder.
+ *
+ * @values:
+ * SW_FT_STROKER_BORDER_LEFT ::
+ * Select the left border, relative to the drawing direction.
+ *
+ * SW_FT_STROKER_BORDER_RIGHT ::
+ * Select the right border, relative to the drawing direction.
+ *
+ * @note:
+ * Applications are generally interested in the `inside' and `outside'
+ * borders. However, there is no direct mapping between these and the
+ * `left' and `right' ones, since this really depends on the glyph's
+ * drawing orientation, which varies between font formats.
+ *
+ * You can however use @SW_FT_Outline_GetInsideBorder and
+ * @SW_FT_Outline_GetOutsideBorder to get these.
+ */
+ typedef enum SW_FT_StrokerBorder_
+ {
+ SW_FT_STROKER_BORDER_LEFT = 0,
+ SW_FT_STROKER_BORDER_RIGHT
+
+ } SW_FT_StrokerBorder;
+
+
+ /**************************************************************
+ *
+ * @function:
+ * SW_FT_Stroker_New
+ *
+ * @description:
+ * Create a new stroker object.
+ *
+ * @input:
+ * library ::
+ * FreeType library handle.
+ *
+ * @output:
+ * astroker ::
+ * A new stroker object handle. NULL in case of error.
+ *
+ * @return:
+ * FreeType error code. 0~means success.
+ */
+ SW_FT_Error
+ SW_FT_Stroker_New( SW_FT_Stroker *astroker );
+
+
+ /**************************************************************
+ *
+ * @function:
+ * SW_FT_Stroker_Set
+ *
+ * @description:
+ * Reset a stroker object's attributes.
+ *
+ * @input:
+ * stroker ::
+ * The target stroker handle.
+ *
+ * radius ::
+ * The border radius.
+ *
+ * line_cap ::
+ * The line cap style.
+ *
+ * line_join ::
+ * The line join style.
+ *
+ * miter_limit ::
+ * The miter limit for the SW_FT_STROKER_LINEJOIN_MITER_FIXED and
+ * SW_FT_STROKER_LINEJOIN_MITER_VARIABLE line join styles,
+ * expressed as 16.16 fixed-point value.
+ *
+ * @note:
+ * The radius is expressed in the same units as the outline
+ * coordinates.
+ */
+ void
+ SW_FT_Stroker_Set( SW_FT_Stroker stroker,
+ SW_FT_Fixed radius,
+ SW_FT_Stroker_LineCap line_cap,
+ SW_FT_Stroker_LineJoin line_join,
+ SW_FT_Fixed miter_limit );
+
+ /**************************************************************
+ *
+ * @function:
+ * SW_FT_Stroker_ParseOutline
+ *
+ * @description:
+ * A convenience function used to parse a whole outline with
+ * the stroker. The resulting outline(s) can be retrieved
+ * later by functions like @SW_FT_Stroker_GetCounts and @SW_FT_Stroker_Export.
+ *
+ * @input:
+ * stroker ::
+ * The target stroker handle.
+ *
+ * outline ::
+ * The source outline.
+ *
+ *
+ * @return:
+ * FreeType error code. 0~means success.
+ *
+ * @note:
+ * If `opened' is~0 (the default), the outline is treated as a closed
+ * path, and the stroker generates two distinct `border' outlines.
+ *
+ *
+ * This function calls @SW_FT_Stroker_Rewind automatically.
+ */
+ SW_FT_Error
+ SW_FT_Stroker_ParseOutline( SW_FT_Stroker stroker,
+ const SW_FT_Outline* outline);
+
+
+ /**************************************************************
+ *
+ * @function:
+ * SW_FT_Stroker_GetCounts
+ *
+ * @description:
+ * Call this function once you have finished parsing your paths
+ * with the stroker. It returns the number of points and
+ * contours necessary to export all points/borders from the stroked
+ * outline/path.
+ *
+ * @input:
+ * stroker ::
+ * The target stroker handle.
+ *
+ * @output:
+ * anum_points ::
+ * The number of points.
+ *
+ * anum_contours ::
+ * The number of contours.
+ *
+ * @return:
+ * FreeType error code. 0~means success.
+ */
+ SW_FT_Error
+ SW_FT_Stroker_GetCounts( SW_FT_Stroker stroker,
+ SW_FT_UInt *anum_points,
+ SW_FT_UInt *anum_contours );
+
+
+ /**************************************************************
+ *
+ * @function:
+ * SW_FT_Stroker_Export
+ *
+ * @description:
+ * Call this function after @SW_FT_Stroker_GetBorderCounts to
+ * export all borders to your own @SW_FT_Outline structure.
+ *
+ * Note that this function appends the border points and
+ * contours to your outline, but does not try to resize its
+ * arrays.
+ *
+ * @input:
+ * stroker ::
+ * The target stroker handle.
+ *
+ * outline ::
+ * The target outline handle.
+ */
+ void
+ SW_FT_Stroker_Export( SW_FT_Stroker stroker,
+ SW_FT_Outline* outline );
+
+
+ /**************************************************************
+ *
+ * @function:
+ * SW_FT_Stroker_Done
+ *
+ * @description:
+ * Destroy a stroker object.
+ *
+ * @input:
+ * stroker ::
+ * A stroker handle. Can be NULL.
+ */
+ void
+ SW_FT_Stroker_Done( SW_FT_Stroker stroker );
+
+
+#endif // V_FT_STROKER_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_types.h b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_types.h
new file mode 100644
index 00000000..a01c4f28
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_types.h
@@ -0,0 +1,160 @@
+#ifndef V_FT_TYPES_H
+#define V_FT_TYPES_H
+
+/*************************************************************************/
+/* */
+/* <Type> */
+/* SW_FT_Fixed */
+/* */
+/* <Description> */
+/* This type is used to store 16.16 fixed-point values, like scaling */
+/* values or matrix coefficients. */
+/* */
+typedef signed long SW_FT_Fixed;
+
+
+/*************************************************************************/
+/* */
+/* <Type> */
+/* SW_FT_Int */
+/* */
+/* <Description> */
+/* A typedef for the int type. */
+/* */
+typedef signed int SW_FT_Int;
+
+
+/*************************************************************************/
+/* */
+/* <Type> */
+/* SW_FT_UInt */
+/* */
+/* <Description> */
+/* A typedef for the unsigned int type. */
+/* */
+typedef unsigned int SW_FT_UInt;
+
+
+/*************************************************************************/
+/* */
+/* <Type> */
+/* SW_FT_Long */
+/* */
+/* <Description> */
+/* A typedef for signed long. */
+/* */
+typedef signed long SW_FT_Long;
+
+
+/*************************************************************************/
+/* */
+/* <Type> */
+/* SW_FT_ULong */
+/* */
+/* <Description> */
+/* A typedef for unsigned long. */
+/* */
+typedef unsigned long SW_FT_ULong;
+
+/*************************************************************************/
+/* */
+/* <Type> */
+/* SW_FT_Short */
+/* */
+/* <Description> */
+/* A typedef for signed short. */
+/* */
+typedef signed short SW_FT_Short;
+
+
+/*************************************************************************/
+/* */
+/* <Type> */
+/* SW_FT_Byte */
+/* */
+/* <Description> */
+/* A simple typedef for the _unsigned_ char type. */
+/* */
+typedef unsigned char SW_FT_Byte;
+
+
+/*************************************************************************/
+/* */
+/* <Type> */
+/* SW_FT_Bool */
+/* */
+/* <Description> */
+/* A typedef of unsigned char, used for simple booleans. As usual, */
+/* values 1 and~0 represent true and false, respectively. */
+/* */
+typedef unsigned char SW_FT_Bool;
+
+
+
+/*************************************************************************/
+/* */
+/* <Type> */
+/* SW_FT_Error */
+/* */
+/* <Description> */
+/* The FreeType error code type. A value of~0 is always interpreted */
+/* as a successful operation. */
+/* */
+typedef int SW_FT_Error;
+
+
+/*************************************************************************/
+/* */
+/* <Type> */
+/* SW_FT_Pos */
+/* */
+/* <Description> */
+/* The type SW_FT_Pos is used to store vectorial coordinates. Depending */
+/* on the context, these can represent distances in integer font */
+/* units, or 16.16, or 26.6 fixed-point pixel coordinates. */
+/* */
+typedef signed long SW_FT_Pos;
+
+
+/*************************************************************************/
+/* */
+/* <Struct> */
+/* SW_FT_Vector */
+/* */
+/* <Description> */
+/* A simple structure used to store a 2D vector; coordinates are of */
+/* the SW_FT_Pos type. */
+/* */
+/* <Fields> */
+/* x :: The horizontal coordinate. */
+/* y :: The vertical coordinate. */
+/* */
+typedef struct SW_FT_Vector_
+{
+ SW_FT_Pos x;
+ SW_FT_Pos y;
+
+} SW_FT_Vector;
+
+
+typedef long long int SW_FT_Int64;
+typedef unsigned long long int SW_FT_UInt64;
+
+typedef signed int SW_FT_Int32;
+typedef unsigned int SW_FT_UInt32;
+
+
+#define SW_FT_BOOL( x ) ( (SW_FT_Bool)( x ) )
+
+#define SW_FT_SIZEOF_LONG 4
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+
+#endif // V_FT_TYPES_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_pixman_pixman-arm-neon-asm.S b/vendor/github.com/Benau/go_rlottie/vector_pixman_pixman-arm-neon-asm.S
new file mode 100644
index 00000000..71554ab5
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_pixman_pixman-arm-neon-asm.S
@@ -0,0 +1,500 @@
+#include "config.h"
+#ifdef USE_ARM_NEON
+/*
+ * Copyright © 2009 Nokia Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com)
+ */
+
+/*
+ * This file contains implementations of NEON optimized pixel processing
+ * functions. There is no full and detailed tutorial, but some functions
+ * (those which are exposing some new or interesting features) are
+ * extensively commented and can be used as examples.
+ *
+ * You may want to have a look at the comments for following functions:
+ * - pixman_composite_over_8888_0565_asm_neon
+ * - pixman_composite_over_n_8_0565_asm_neon
+ */
+
+/* Prevent the stack from becoming executable for no reason... */
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
+ .text
+ .fpu neon
+ .arch armv7a
+ .object_arch armv4
+ .eabi_attribute 10, 0 /* suppress Tag_FP_arch */
+ .eabi_attribute 12, 0 /* suppress Tag_Advanced_SIMD_arch */
+ .arm
+ .altmacro
+ .p2align 2
+
+
+//#include "pixman-arm-asm.h"
+/* Supplementary macro for setting function attributes */
+.macro pixman_asm_function fname
+ .func fname
+ .global fname
+#ifdef __ELF__
+ .hidden fname
+ .type fname, %function
+#endif
+fname:
+.endm
+
+//#include "pixman-private.h"
+/*
+ * The defines which are shared between C and assembly code
+ */
+
+/* bilinear interpolation precision (must be < 8) */
+#define BILINEAR_INTERPOLATION_BITS 7
+#define BILINEAR_INTERPOLATION_RANGE (1 << BILINEAR_INTERPOLATION_BITS)
+
+#include "vector_pixman_pixman-arm-neon-asm.h"
+
+/* Global configuration options and preferences */
+
+/*
+ * The code can optionally make use of unaligned memory accesses to improve
+ * performance of handling leading/trailing pixels for each scanline.
+ * Configuration variable RESPECT_STRICT_ALIGNMENT can be set to 0 for
+ * example in linux if unaligned memory accesses are not configured to
+ * generate.exceptions.
+ */
+.set RESPECT_STRICT_ALIGNMENT, 1
+
+/*
+ * Set default prefetch type. There is a choice between the following options:
+ *
+ * PREFETCH_TYPE_NONE (may be useful for the ARM cores where PLD is set to work
+ * as NOP to workaround some HW bugs or for whatever other reason)
+ *
+ * PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where
+ * advanced prefetch intruduces heavy overhead)
+ *
+ * PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8
+ * which can run ARM and NEON instructions simultaneously so that extra ARM
+ * instructions do not add (many) extra cycles, but improve prefetch efficiency)
+ *
+ * Note: some types of function can't support advanced prefetch and fallback
+ * to simple one (those which handle 24bpp pixels)
+ */
+.set PREFETCH_TYPE_DEFAULT, PREFETCH_TYPE_ADVANCED
+
+/* Prefetch distance in pixels for simple prefetch */
+.set PREFETCH_DISTANCE_SIMPLE, 64
+
+/*
+ * Implementation of pixman_composite_over_8888_0565_asm_neon
+ *
+ * This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and
+ * performs OVER compositing operation. Function fast_composite_over_8888_0565
+ * from pixman-fast-path.c does the same in C and can be used as a reference.
+ *
+ * First we need to have some NEON assembly code which can do the actual
+ * operation on the pixels and provide it to the template macro.
+ *
+ * Template macro quite conveniently takes care of emitting all the necessary
+ * code for memory reading and writing (including quite tricky cases of
+ * handling unaligned leading/trailing pixels), so we only need to deal with
+ * the data in NEON registers.
+ *
+ * NEON registers allocation in general is recommented to be the following:
+ * d0, d1, d2, d3 - contain loaded source pixel data
+ * d4, d5, d6, d7 - contain loaded destination pixels (if they are needed)
+ * d24, d25, d26, d27 - contain loading mask pixel data (if mask is used)
+ * d28, d29, d30, d31 - place for storing the result (destination pixels)
+ *
+ * As can be seen above, four 64-bit NEON registers are used for keeping
+ * intermediate pixel data and up to 8 pixels can be processed in one step
+ * for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp).
+ *
+ * This particular function uses the following registers allocation:
+ * d0, d1, d2, d3 - contain loaded source pixel data
+ * d4, d5 - contain loaded destination pixels (they are needed)
+ * d28, d29 - place for storing the result (destination pixels)
+ */
+
+/*
+ * Step one. We need to have some code to do some arithmetics on pixel data.
+ * This is implemented as a pair of macros: '*_head' and '*_tail'. When used
+ * back-to-back, they take pixel data from {d0, d1, d2, d3} and {d4, d5},
+ * perform all the needed calculations and write the result to {d28, d29}.
+ * The rationale for having two macros and not just one will be explained
+ * later. In practice, any single monolitic function which does the work can
+ * be split into two parts in any arbitrary way without affecting correctness.
+ *
+ * There is one special trick here too. Common template macro can optionally
+ * make our life a bit easier by doing R, G, B, A color components
+ * deinterleaving for 32bpp pixel formats (and this feature is used in
+ * 'pixman_composite_over_8888_0565_asm_neon' function). So it means that
+ * instead of having 8 packed pixels in {d0, d1, d2, d3} registers, we
+ * actually use d0 register for blue channel (a vector of eight 8-bit
+ * values), d1 register for green, d2 for red and d3 for alpha. This
+ * simple conversion can be also done with a few NEON instructions:
+ *
+ * Packed to planar conversion:
+ * vuzp.8 d0, d1
+ * vuzp.8 d2, d3
+ * vuzp.8 d1, d3
+ * vuzp.8 d0, d2
+ *
+ * Planar to packed conversion:
+ * vzip.8 d0, d2
+ * vzip.8 d1, d3
+ * vzip.8 d2, d3
+ * vzip.8 d0, d1
+ *
+ * But pixel can be loaded directly in planar format using VLD4.8 NEON
+ * instruction. It is 1 cycle slower than VLD1.32, so this is not always
+ * desirable, that's why deinterleaving is optional.
+ *
+ * But anyway, here is the code:
+ */
+
+/*
+ * OK, now we got almost everything that we need. Using the above two
+ * macros, the work can be done right. But now we want to optimize
+ * it a bit. ARM Cortex-A8 is an in-order core, and benefits really
+ * a lot from good code scheduling and software pipelining.
+ *
+ * Let's construct some code, which will run in the core main loop.
+ * Some pseudo-code of the main loop will look like this:
+ * head
+ * while (...) {
+ * tail
+ * head
+ * }
+ * tail
+ *
+ * It may look a bit weird, but this setup allows to hide instruction
+ * latencies better and also utilize dual-issue capability more
+ * efficiently (make pairs of load-store and ALU instructions).
+ *
+ * So what we need now is a '*_tail_head' macro, which will be used
+ * in the core main loop. A trivial straightforward implementation
+ * of this macro would look like this:
+ *
+ * pixman_composite_over_8888_0565_process_pixblock_tail
+ * vst1.16 {d28, d29}, [DST_W, :128]!
+ * vld1.16 {d4, d5}, [DST_R, :128]!
+ * vld4.32 {d0, d1, d2, d3}, [SRC]!
+ * pixman_composite_over_8888_0565_process_pixblock_head
+ * cache_preload 8, 8
+ *
+ * Now it also got some VLD/VST instructions. We simply can't move from
+ * processing one block of pixels to the other one with just arithmetics.
+ * The previously processed data needs to be written to memory and new
+ * data needs to be fetched. Fortunately, this main loop does not deal
+ * with partial leading/trailing pixels and can load/store a full block
+ * of pixels in a bulk. Additionally, destination buffer is already
+ * 16 bytes aligned here (which is good for performance).
+ *
+ * New things here are DST_R, DST_W, SRC and MASK identifiers. These
+ * are the aliases for ARM registers which are used as pointers for
+ * accessing data. We maintain separate pointers for reading and writing
+ * destination buffer (DST_R and DST_W).
+ *
+ * Another new thing is 'cache_preload' macro. It is used for prefetching
+ * data into CPU L2 cache and improve performance when dealing with large
+ * images which are far larger than cache size. It uses one argument
+ * (actually two, but they need to be the same here) - number of pixels
+ * in a block. Looking into 'pixman-arm-neon-asm.h' can provide some
+ * details about this macro. Moreover, if good performance is needed
+ * the code from this macro needs to be copied into '*_tail_head' macro
+ * and mixed with the rest of code for optimal instructions scheduling.
+ * We are actually doing it below.
+ *
+ * Now after all the explanations, here is the optimized code.
+ * Different instruction streams (originaling from '*_head', '*_tail'
+ * and 'cache_preload' macro) use different indentation levels for
+ * better readability. Actually taking the code from one of these
+ * indentation levels and ignoring a few VLD/VST instructions would
+ * result in exactly the code from '*_head', '*_tail' or 'cache_preload'
+ * macro!
+ */
+
+/*
+ * And now the final part. We are using 'generate_composite_function' macro
+ * to put all the stuff together. We are specifying the name of the function
+ * which we want to get, number of bits per pixel for the source, mask and
+ * destination (0 if unused, like mask in this case). Next come some bit
+ * flags:
+ * FLAG_DST_READWRITE - tells that the destination buffer is both read
+ * and written, for write-only buffer we would use
+ * FLAG_DST_WRITEONLY flag instead
+ * FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data
+ * and separate color channels for 32bpp format.
+ * The next things are:
+ * - the number of pixels processed per iteration (8 in this case, because
+ * that's the maximum what can fit into four 64-bit NEON registers).
+ * - prefetch distance, measured in pixel blocks. In this case it is 5 times
+ * by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal
+ * prefetch distance can be selected by running some benchmarks.
+ *
+ * After that we specify some macros, these are 'default_init',
+ * 'default_cleanup' here which are empty (but it is possible to have custom
+ * init/cleanup macros to be able to save/restore some extra NEON registers
+ * like d8-d15 or do anything else) followed by
+ * 'pixman_composite_over_8888_0565_process_pixblock_head',
+ * 'pixman_composite_over_8888_0565_process_pixblock_tail' and
+ * 'pixman_composite_over_8888_0565_process_pixblock_tail_head'
+ * which we got implemented above.
+ *
+ * The last part is the NEON registers allocation scheme.
+ */
+
+/******************************************************************************/
+
+/******************************************************************************/
+ .macro pixman_composite_out_reverse_8888_8888_process_pixblock_head
+ vmvn.8 d24, d3 /* get inverted alpha */
+ /* do alpha blending */
+ vmull.u8 q8, d24, d4
+ vmull.u8 q9, d24, d5
+ vmull.u8 q10, d24, d6
+ vmull.u8 q11, d24, d7
+ .endm
+
+ .macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail
+ vrshr.u16 q14, q8, #8
+ vrshr.u16 q15, q9, #8
+ vrshr.u16 q12, q10, #8
+ vrshr.u16 q13, q11, #8
+ vraddhn.u16 d28, q14, q8
+ vraddhn.u16 d29, q15, q9
+ vraddhn.u16 d30, q12, q10
+ vraddhn.u16 d31, q13, q11
+ .endm
+
+/******************************************************************************/
+
+.macro pixman_composite_over_8888_8888_process_pixblock_head
+ pixman_composite_out_reverse_8888_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_over_8888_8888_process_pixblock_tail
+ pixman_composite_out_reverse_8888_8888_process_pixblock_tail
+ vqadd.u8 q14, q0, q14
+ vqadd.u8 q15, q1, q15
+.endm
+
+.macro pixman_composite_over_8888_8888_process_pixblock_tail_head
+ vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
+ vrshr.u16 q14, q8, #8
+ PF add PF_X, PF_X, #8
+ PF tst PF_CTL, #0xF
+ vrshr.u16 q15, q9, #8
+ vrshr.u16 q12, q10, #8
+ vrshr.u16 q13, q11, #8
+ PF addne PF_X, PF_X, #8
+ PF subne PF_CTL, PF_CTL, #1
+ vraddhn.u16 d28, q14, q8
+ vraddhn.u16 d29, q15, q9
+ PF cmp PF_X, ORIG_W
+ vraddhn.u16 d30, q12, q10
+ vraddhn.u16 d31, q13, q11
+ vqadd.u8 q14, q0, q14
+ vqadd.u8 q15, q1, q15
+ fetch_src_pixblock
+ PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+ vmvn.8 d22, d3
+ PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
+ vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
+ PF subge PF_X, PF_X, ORIG_W
+ vmull.u8 q8, d22, d4
+ PF subges PF_CTL, PF_CTL, #0x10
+ vmull.u8 q9, d22, d5
+ PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+ vmull.u8 q10, d22, d6
+ PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
+ vmull.u8 q11, d22, d7
+.endm
+
+generate_composite_function \
+ pixman_composite_over_8888_8888_asm_neon, 32, 0, 32, \
+ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+ 8, /* number of pixels, processed in a single block */ \
+ 5, /* prefetch distance */ \
+ default_init, \
+ default_cleanup, \
+ pixman_composite_over_8888_8888_process_pixblock_head, \
+ pixman_composite_over_8888_8888_process_pixblock_tail, \
+ pixman_composite_over_8888_8888_process_pixblock_tail_head
+
+generate_composite_function_single_scanline \
+ pixman_composite_scanline_over_asm_neon, 32, 0, 32, \
+ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+ 8, /* number of pixels, processed in a single block */ \
+ default_init, \
+ default_cleanup, \
+ pixman_composite_over_8888_8888_process_pixblock_head, \
+ pixman_composite_over_8888_8888_process_pixblock_tail, \
+ pixman_composite_over_8888_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_n_8888_process_pixblock_head
+ /* deinterleaved source pixels in {d0, d1, d2, d3} */
+ /* inverted alpha in {d24} */
+ /* destination pixels in {d4, d5, d6, d7} */
+ vmull.u8 q8, d24, d4
+ vmull.u8 q9, d24, d5
+ vmull.u8 q10, d24, d6
+ vmull.u8 q11, d24, d7
+.endm
+
+.macro pixman_composite_over_n_8888_process_pixblock_tail
+ vrshr.u16 q14, q8, #8
+ vrshr.u16 q15, q9, #8
+ vrshr.u16 q2, q10, #8
+ vrshr.u16 q3, q11, #8
+ vraddhn.u16 d28, q14, q8
+ vraddhn.u16 d29, q15, q9
+ vraddhn.u16 d30, q2, q10
+ vraddhn.u16 d31, q3, q11
+ vqadd.u8 q14, q0, q14
+ vqadd.u8 q15, q1, q15
+.endm
+
+.macro pixman_composite_over_n_8888_process_pixblock_tail_head
+ vrshr.u16 q14, q8, #8
+ vrshr.u16 q15, q9, #8
+ vrshr.u16 q2, q10, #8
+ vrshr.u16 q3, q11, #8
+ vraddhn.u16 d28, q14, q8
+ vraddhn.u16 d29, q15, q9
+ vraddhn.u16 d30, q2, q10
+ vraddhn.u16 d31, q3, q11
+ vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
+ vqadd.u8 q14, q0, q14
+ PF add PF_X, PF_X, #8
+ PF tst PF_CTL, #0x0F
+ PF addne PF_X, PF_X, #8
+ PF subne PF_CTL, PF_CTL, #1
+ vqadd.u8 q15, q1, q15
+ PF cmp PF_X, ORIG_W
+ vmull.u8 q8, d24, d4
+ PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
+ vmull.u8 q9, d24, d5
+ PF subge PF_X, PF_X, ORIG_W
+ vmull.u8 q10, d24, d6
+ PF subges PF_CTL, PF_CTL, #0x10
+ vmull.u8 q11, d24, d7
+ PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
+ vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
+.endm
+
+.macro pixman_composite_over_n_8888_init
+ add DUMMY, sp, #ARGS_STACK_OFFSET
+ vld1.32 {d3[0]}, [DUMMY]
+ vdup.8 d0, d3[0]
+ vdup.8 d1, d3[1]
+ vdup.8 d2, d3[2]
+ vdup.8 d3, d3[3]
+ vmvn.8 d24, d3 /* get inverted alpha */
+.endm
+
+generate_composite_function \
+ pixman_composite_over_n_8888_asm_neon, 0, 0, 32, \
+ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+ 8, /* number of pixels, processed in a single block */ \
+ 5, /* prefetch distance */ \
+ pixman_composite_over_n_8888_init, \
+ default_cleanup, \
+ pixman_composite_over_8888_8888_process_pixblock_head, \
+ pixman_composite_over_8888_8888_process_pixblock_tail, \
+ pixman_composite_over_n_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_src_n_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_n_8888_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_n_8888_process_pixblock_tail_head
+ vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
+.endm
+
+.macro pixman_composite_src_n_8888_init
+ add DUMMY, sp, #ARGS_STACK_OFFSET
+ vld1.32 {d0[0]}, [DUMMY]
+ vsli.u64 d0, d0, #32
+ vorr d1, d0, d0
+ vorr q1, q0, q0
+.endm
+
+.macro pixman_composite_src_n_8888_cleanup
+.endm
+
+generate_composite_function \
+ pixman_composite_src_n_8888_asm_neon, 0, 0, 32, \
+ FLAG_DST_WRITEONLY, \
+ 8, /* number of pixels, processed in a single block */ \
+ 0, /* prefetch distance */ \
+ pixman_composite_src_n_8888_init, \
+ pixman_composite_src_n_8888_cleanup, \
+ pixman_composite_src_n_8888_process_pixblock_head, \
+ pixman_composite_src_n_8888_process_pixblock_tail, \
+ pixman_composite_src_n_8888_process_pixblock_tail_head, \
+ 0, /* dst_w_basereg */ \
+ 0, /* dst_r_basereg */ \
+ 0, /* src_basereg */ \
+ 0 /* mask_basereg */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_8888_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_8888_8888_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_8888_8888_process_pixblock_tail_head
+ vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
+ fetch_src_pixblock
+ cache_preload 8, 8
+.endm
+
+generate_composite_function \
+ pixman_composite_src_8888_8888_asm_neon, 32, 0, 32, \
+ FLAG_DST_WRITEONLY, \
+ 8, /* number of pixels, processed in a single block */ \
+ 10, /* prefetch distance */ \
+ default_init, \
+ default_cleanup, \
+ pixman_composite_src_8888_8888_process_pixblock_head, \
+ pixman_composite_src_8888_8888_process_pixblock_tail, \
+ pixman_composite_src_8888_8888_process_pixblock_tail_head, \
+ 0, /* dst_w_basereg */ \
+ 0, /* dst_r_basereg */ \
+ 0, /* src_basereg */ \
+ 0 /* mask_basereg */
+
+/******************************************************************************/
+#endif
diff --git a/vendor/github.com/Benau/go_rlottie/vector_pixman_pixman-arm-neon-asm.h b/vendor/github.com/Benau/go_rlottie/vector_pixman_pixman-arm-neon-asm.h
new file mode 100644
index 00000000..6add220a
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_pixman_pixman-arm-neon-asm.h
@@ -0,0 +1,1126 @@
+/*
+ * Copyright © 2009 Nokia Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com)
+ */
+
+/*
+ * This file contains a macro ('generate_composite_function') which can
+ * construct 2D image processing functions, based on a common template.
+ * Any combinations of source, destination and mask images with 8bpp,
+ * 16bpp, 24bpp, 32bpp color formats are supported.
+ *
+ * This macro takes care of:
+ * - handling of leading and trailing unaligned pixels
+ * - doing most of the work related to L2 cache preload
+ * - encourages the use of software pipelining for better instructions
+ * scheduling
+ *
+ * The user of this macro has to provide some configuration parameters
+ * (bit depths for the images, prefetch distance, etc.) and a set of
+ * macros, which should implement basic code chunks responsible for
+ * pixels processing. See 'pixman-arm-neon-asm.S' file for the usage
+ * examples.
+ *
+ * TODO:
+ * - try overlapped pixel method (from Ian Rickards) when processing
+ * exactly two blocks of pixels
+ * - maybe add an option to do reverse scanline processing
+ */
+
+/*
+ * Bit flags for 'generate_composite_function' macro which are used
+ * to tune generated functions behavior.
+ */
+.set FLAG_DST_WRITEONLY, 0
+.set FLAG_DST_READWRITE, 1
+.set FLAG_DEINTERLEAVE_32BPP, 2
+
+/*
+ * Offset in stack where mask and source pointer/stride can be accessed
+ * from 'init' macro. This is useful for doing special handling for solid mask.
+ */
+.set ARGS_STACK_OFFSET, 40
+
+/*
+ * Constants for selecting preferable prefetch type.
+ */
+.set PREFETCH_TYPE_NONE, 0 /* No prefetch at all */
+.set PREFETCH_TYPE_SIMPLE, 1 /* A simple, fixed-distance-ahead prefetch */
+.set PREFETCH_TYPE_ADVANCED, 2 /* Advanced fine-grained prefetch */
+
+/*
+ * Definitions of supplementary pixld/pixst macros (for partial load/store of
+ * pixel data).
+ */
+
+.macro pixldst1 op, elem_size, reg1, mem_operand, abits
+.if abits > 0
+ op&.&elem_size {d&reg1}, [&mem_operand&, :&abits&]!
+.else
+ op&.&elem_size {d&reg1}, [&mem_operand&]!
+.endif
+.endm
+
+.macro pixldst2 op, elem_size, reg1, reg2, mem_operand, abits
+.if abits > 0
+ op&.&elem_size {d&reg1, d&reg2}, [&mem_operand&, :&abits&]!
+.else
+ op&.&elem_size {d&reg1, d&reg2}, [&mem_operand&]!
+.endif
+.endm
+
+.macro pixldst4 op, elem_size, reg1, reg2, reg3, reg4, mem_operand, abits
+.if abits > 0
+ op&.&elem_size {d&reg1, d&reg2, d&reg3, d&reg4}, [&mem_operand&, :&abits&]!
+.else
+ op&.&elem_size {d&reg1, d&reg2, d&reg3, d&reg4}, [&mem_operand&]!
+.endif
+.endm
+
+.macro pixldst0 op, elem_size, reg1, idx, mem_operand, abits
+ op&.&elem_size {d&reg1[idx]}, [&mem_operand&]!
+.endm
+
+.macro pixldst3 op, elem_size, reg1, reg2, reg3, mem_operand
+ op&.&elem_size {d&reg1, d&reg2, d&reg3}, [&mem_operand&]!
+.endm
+
+.macro pixldst30 op, elem_size, reg1, reg2, reg3, idx, mem_operand
+ op&.&elem_size {d&reg1[idx], d&reg2[idx], d&reg3[idx]}, [&mem_operand&]!
+.endm
+
+.macro pixldst numbytes, op, elem_size, basereg, mem_operand, abits
+.if numbytes == 32
+ pixldst4 op, elem_size, %(basereg+4), %(basereg+5), \
+ %(basereg+6), %(basereg+7), mem_operand, abits
+.elseif numbytes == 16
+ pixldst2 op, elem_size, %(basereg+2), %(basereg+3), mem_operand, abits
+.elseif numbytes == 8
+ pixldst1 op, elem_size, %(basereg+1), mem_operand, abits
+.elseif numbytes == 4
+ .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 32)
+ pixldst0 op, 32, %(basereg+0), 1, mem_operand, abits
+ .elseif elem_size == 16
+ pixldst0 op, 16, %(basereg+0), 2, mem_operand, abits
+ pixldst0 op, 16, %(basereg+0), 3, mem_operand, abits
+ .else
+ pixldst0 op, 8, %(basereg+0), 4, mem_operand, abits
+ pixldst0 op, 8, %(basereg+0), 5, mem_operand, abits
+ pixldst0 op, 8, %(basereg+0), 6, mem_operand, abits
+ pixldst0 op, 8, %(basereg+0), 7, mem_operand, abits
+ .endif
+.elseif numbytes == 2
+ .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 16)
+ pixldst0 op, 16, %(basereg+0), 1, mem_operand, abits
+ .else
+ pixldst0 op, 8, %(basereg+0), 2, mem_operand, abits
+ pixldst0 op, 8, %(basereg+0), 3, mem_operand, abits
+ .endif
+.elseif numbytes == 1
+ pixldst0 op, 8, %(basereg+0), 1, mem_operand, abits
+.else
+ .error "unsupported size: numbytes"
+.endif
+.endm
+
+.macro pixld numpix, bpp, basereg, mem_operand, abits=0
+.if bpp > 0
+.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+ pixldst4 vld4, 8, %(basereg+4), %(basereg+5), \
+ %(basereg+6), %(basereg+7), mem_operand, abits
+.elseif (bpp == 24) && (numpix == 8)
+ pixldst3 vld3, 8, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand
+.elseif (bpp == 24) && (numpix == 4)
+ pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand
+ pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand
+ pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand
+ pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand
+.elseif (bpp == 24) && (numpix == 2)
+ pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand
+ pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand
+.elseif (bpp == 24) && (numpix == 1)
+ pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand
+.else
+ pixldst %(numpix * bpp / 8), vld1, %(bpp), basereg, mem_operand, abits
+.endif
+.endif
+.endm
+
+.macro pixst numpix, bpp, basereg, mem_operand, abits=0
+.if bpp > 0
+.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+ pixldst4 vst4, 8, %(basereg+4), %(basereg+5), \
+ %(basereg+6), %(basereg+7), mem_operand, abits
+.elseif (bpp == 24) && (numpix == 8)
+ pixldst3 vst3, 8, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand
+.elseif (bpp == 24) && (numpix == 4)
+ pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand
+ pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand
+ pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand
+ pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand
+.elseif (bpp == 24) && (numpix == 2)
+ pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand
+ pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand
+.elseif (bpp == 24) && (numpix == 1)
+ pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand
+.else
+ pixldst %(numpix * bpp / 8), vst1, %(bpp), basereg, mem_operand, abits
+.endif
+.endif
+.endm
+
+.macro pixld_a numpix, bpp, basereg, mem_operand
+.if (bpp * numpix) <= 128
+ pixld numpix, bpp, basereg, mem_operand, %(bpp * numpix)
+.else
+ pixld numpix, bpp, basereg, mem_operand, 128
+.endif
+.endm
+
+.macro pixst_a numpix, bpp, basereg, mem_operand
+.if (bpp * numpix) <= 128
+ pixst numpix, bpp, basereg, mem_operand, %(bpp * numpix)
+.else
+ pixst numpix, bpp, basereg, mem_operand, 128
+.endif
+.endm
+
+/*
+ * Pixel fetcher for nearest scaling (needs TMP1, TMP2, VX, UNIT_X register
+ * aliases to be defined)
+ */
+.macro pixld1_s elem_size, reg1, mem_operand
+.if elem_size == 16
+ mov TMP1, VX, asr #16
+ adds VX, VX, UNIT_X
+5: subpls VX, VX, SRC_WIDTH_FIXED
+ bpl 5b
+ add TMP1, mem_operand, TMP1, asl #1
+ mov TMP2, VX, asr #16
+ adds VX, VX, UNIT_X
+5: subpls VX, VX, SRC_WIDTH_FIXED
+ bpl 5b
+ add TMP2, mem_operand, TMP2, asl #1
+ vld1.16 {d&reg1&[0]}, [TMP1, :16]
+ mov TMP1, VX, asr #16
+ adds VX, VX, UNIT_X
+5: subpls VX, VX, SRC_WIDTH_FIXED
+ bpl 5b
+ add TMP1, mem_operand, TMP1, asl #1
+ vld1.16 {d&reg1&[1]}, [TMP2, :16]
+ mov TMP2, VX, asr #16
+ adds VX, VX, UNIT_X
+5: subpls VX, VX, SRC_WIDTH_FIXED
+ bpl 5b
+ add TMP2, mem_operand, TMP2, asl #1
+ vld1.16 {d&reg1&[2]}, [TMP1, :16]
+ vld1.16 {d&reg1&[3]}, [TMP2, :16]
+.elseif elem_size == 32
+ mov TMP1, VX, asr #16
+ adds VX, VX, UNIT_X
+5: subpls VX, VX, SRC_WIDTH_FIXED
+ bpl 5b
+ add TMP1, mem_operand, TMP1, asl #2
+ mov TMP2, VX, asr #16
+ adds VX, VX, UNIT_X
+5: subpls VX, VX, SRC_WIDTH_FIXED
+ bpl 5b
+ add TMP2, mem_operand, TMP2, asl #2
+ vld1.32 {d&reg1&[0]}, [TMP1, :32]
+ vld1.32 {d&reg1&[1]}, [TMP2, :32]
+.else
+ .error "unsupported"
+.endif
+.endm
+
+.macro pixld2_s elem_size, reg1, reg2, mem_operand
+.if 0 /* elem_size == 32 */
+ mov TMP1, VX, asr #16
+ add VX, VX, UNIT_X, asl #1
+ add TMP1, mem_operand, TMP1, asl #2
+ mov TMP2, VX, asr #16
+ sub VX, VX, UNIT_X
+ add TMP2, mem_operand, TMP2, asl #2
+ vld1.32 {d&reg1&[0]}, [TMP1, :32]
+ mov TMP1, VX, asr #16
+ add VX, VX, UNIT_X, asl #1
+ add TMP1, mem_operand, TMP1, asl #2
+ vld1.32 {d&reg2&[0]}, [TMP2, :32]
+ mov TMP2, VX, asr #16
+ add VX, VX, UNIT_X
+ add TMP2, mem_operand, TMP2, asl #2
+ vld1.32 {d&reg1&[1]}, [TMP1, :32]
+ vld1.32 {d&reg2&[1]}, [TMP2, :32]
+.else
+ pixld1_s elem_size, reg1, mem_operand
+ pixld1_s elem_size, reg2, mem_operand
+.endif
+.endm
+
+.macro pixld0_s elem_size, reg1, idx, mem_operand
+.if elem_size == 16
+ mov TMP1, VX, asr #16
+ adds VX, VX, UNIT_X
+5: subpls VX, VX, SRC_WIDTH_FIXED
+ bpl 5b
+ add TMP1, mem_operand, TMP1, asl #1
+ vld1.16 {d&reg1&[idx]}, [TMP1, :16]
+.elseif elem_size == 32
+ mov TMP1, VX, asr #16
+ adds VX, VX, UNIT_X
+5: subpls VX, VX, SRC_WIDTH_FIXED
+ bpl 5b
+ add TMP1, mem_operand, TMP1, asl #2
+ vld1.32 {d&reg1&[idx]}, [TMP1, :32]
+.endif
+.endm
+
+.macro pixld_s_internal numbytes, elem_size, basereg, mem_operand
+.if numbytes == 32
+ pixld2_s elem_size, %(basereg+4), %(basereg+5), mem_operand
+ pixld2_s elem_size, %(basereg+6), %(basereg+7), mem_operand
+ pixdeinterleave elem_size, %(basereg+4)
+.elseif numbytes == 16
+ pixld2_s elem_size, %(basereg+2), %(basereg+3), mem_operand
+.elseif numbytes == 8
+ pixld1_s elem_size, %(basereg+1), mem_operand
+.elseif numbytes == 4
+ .if elem_size == 32
+ pixld0_s elem_size, %(basereg+0), 1, mem_operand
+ .elseif elem_size == 16
+ pixld0_s elem_size, %(basereg+0), 2, mem_operand
+ pixld0_s elem_size, %(basereg+0), 3, mem_operand
+ .else
+ pixld0_s elem_size, %(basereg+0), 4, mem_operand
+ pixld0_s elem_size, %(basereg+0), 5, mem_operand
+ pixld0_s elem_size, %(basereg+0), 6, mem_operand
+ pixld0_s elem_size, %(basereg+0), 7, mem_operand
+ .endif
+.elseif numbytes == 2
+ .if elem_size == 16
+ pixld0_s elem_size, %(basereg+0), 1, mem_operand
+ .else
+ pixld0_s elem_size, %(basereg+0), 2, mem_operand
+ pixld0_s elem_size, %(basereg+0), 3, mem_operand
+ .endif
+.elseif numbytes == 1
+ pixld0_s elem_size, %(basereg+0), 1, mem_operand
+.else
+ .error "unsupported size: numbytes"
+.endif
+.endm
+
+.macro pixld_s numpix, bpp, basereg, mem_operand
+.if bpp > 0
+ pixld_s_internal %(numpix * bpp / 8), %(bpp), basereg, mem_operand
+.endif
+.endm
+
+.macro vuzp8 reg1, reg2
+ vuzp.8 d&reg1, d&reg2
+.endm
+
+.macro vzip8 reg1, reg2
+ vzip.8 d&reg1, d&reg2
+.endm
+
+/* deinterleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
+.macro pixdeinterleave bpp, basereg
+.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+ vuzp8 %(basereg+0), %(basereg+1)
+ vuzp8 %(basereg+2), %(basereg+3)
+ vuzp8 %(basereg+1), %(basereg+3)
+ vuzp8 %(basereg+0), %(basereg+2)
+.endif
+.endm
+
+/* interleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
+.macro pixinterleave bpp, basereg
+.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+ vzip8 %(basereg+0), %(basereg+2)
+ vzip8 %(basereg+1), %(basereg+3)
+ vzip8 %(basereg+2), %(basereg+3)
+ vzip8 %(basereg+0), %(basereg+1)
+.endif
+.endm
+
+/*
+ * This is a macro for implementing cache preload. The main idea is that
+ * cache preload logic is mostly independent from the rest of pixels
+ * processing code. It starts at the top left pixel and moves forward
+ * across pixels and can jump across scanlines. Prefetch distance is
+ * handled in an 'incremental' way: it starts from 0 and advances to the
+ * optimal distance over time. After reaching optimal prefetch distance,
+ * it is kept constant. There are some checks which prevent prefetching
+ * unneeded pixel lines below the image (but it still can prefetch a bit
+ * more data on the right side of the image - not a big issue and may
+ * be actually helpful when rendering text glyphs). Additional trick is
+ * the use of LDR instruction for prefetch instead of PLD when moving to
+ * the next line, the point is that we have a high chance of getting TLB
+ * miss in this case, and PLD would be useless.
+ *
+ * This sounds like it may introduce a noticeable overhead (when working with
+ * fully cached data). But in reality, due to having a separate pipeline and
+ * instruction queue for NEON unit in ARM Cortex-A8, normal ARM code can
+ * execute simultaneously with NEON and be completely shadowed by it. Thus
+ * we get no performance overhead at all (*). This looks like a very nice
+ * feature of Cortex-A8, if used wisely. We don't have a hardware prefetcher,
+ * but still can implement some rather advanced prefetch logic in software
+ * for almost zero cost!
+ *
+ * (*) The overhead of the prefetcher is visible when running some trivial
+ * pixels processing like simple copy. Anyway, having prefetch is a must
+ * when working with the graphics data.
+ */
+.macro PF a, x:vararg
+.if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_ADVANCED)
+ a x
+.endif
+.endm
+
+.macro cache_preload std_increment, boost_increment
+.if (src_bpp_shift >= 0) || (dst_r_bpp != 0) || (mask_bpp_shift >= 0)
+.if regs_shortage
+ PF ldr ORIG_W, [sp] /* If we are short on regs, ORIG_W is kept on stack */
+.endif
+.if std_increment != 0
+ PF add PF_X, PF_X, #std_increment
+.endif
+ PF tst PF_CTL, #0xF
+ PF addne PF_X, PF_X, #boost_increment
+ PF subne PF_CTL, PF_CTL, #1
+ PF cmp PF_X, ORIG_W
+.if src_bpp_shift >= 0
+ PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+.endif
+.if dst_r_bpp != 0
+ PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
+.endif
+.if mask_bpp_shift >= 0
+ PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift]
+.endif
+ PF subge PF_X, PF_X, ORIG_W
+ PF subges PF_CTL, PF_CTL, #0x10
+.if src_bpp_shift >= 0
+ PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+.endif
+.if dst_r_bpp != 0
+ PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
+.endif
+.if mask_bpp_shift >= 0
+ PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]!
+.endif
+.endif
+.endm
+
+.macro cache_preload_simple
+.if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_SIMPLE)
+.if src_bpp > 0
+ pld [SRC, #(PREFETCH_DISTANCE_SIMPLE * src_bpp / 8)]
+.endif
+.if dst_r_bpp > 0
+ pld [DST_R, #(PREFETCH_DISTANCE_SIMPLE * dst_r_bpp / 8)]
+.endif
+.if mask_bpp > 0
+ pld [MASK, #(PREFETCH_DISTANCE_SIMPLE * mask_bpp / 8)]
+.endif
+.endif
+.endm
+
+.macro fetch_mask_pixblock
+ pixld pixblock_size, mask_bpp, \
+ (mask_basereg - pixblock_size * mask_bpp / 64), MASK
+.endm
+
+/*
+ * Macro which is used to process leading pixels until destination
+ * pointer is properly aligned (at 16 bytes boundary). When destination
+ * buffer uses 16bpp format, this is unnecessary, or even pointless.
+ */
+.macro ensure_destination_ptr_alignment process_pixblock_head, \
+ process_pixblock_tail, \
+ process_pixblock_tail_head
+.if dst_w_bpp != 24
+ tst DST_R, #0xF
+ beq 2f
+
+.irp lowbit, 1, 2, 4, 8, 16
+local skip1
+.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
+.if lowbit < 16 /* we don't need more than 16-byte alignment */
+ tst DST_R, #lowbit
+ beq 1f
+.endif
+ pixld_src (lowbit * 8 / dst_w_bpp), src_bpp, src_basereg, SRC
+ pixld (lowbit * 8 / dst_w_bpp), mask_bpp, mask_basereg, MASK
+.if dst_r_bpp > 0
+ pixld_a (lowbit * 8 / dst_r_bpp), dst_r_bpp, dst_r_basereg, DST_R
+.else
+ add DST_R, DST_R, #lowbit
+.endif
+ PF add PF_X, PF_X, #(lowbit * 8 / dst_w_bpp)
+ sub W, W, #(lowbit * 8 / dst_w_bpp)
+1:
+.endif
+.endr
+ pixdeinterleave src_bpp, src_basereg
+ pixdeinterleave mask_bpp, mask_basereg
+ pixdeinterleave dst_r_bpp, dst_r_basereg
+
+ process_pixblock_head
+ cache_preload 0, pixblock_size
+ cache_preload_simple
+ process_pixblock_tail
+
+ pixinterleave dst_w_bpp, dst_w_basereg
+.irp lowbit, 1, 2, 4, 8, 16
+.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
+.if lowbit < 16 /* we don't need more than 16-byte alignment */
+ tst DST_W, #lowbit
+ beq 1f
+.endif
+ pixst_a (lowbit * 8 / dst_w_bpp), dst_w_bpp, dst_w_basereg, DST_W
+1:
+.endif
+.endr
+.endif
+2:
+.endm
+
+/*
+ * Special code for processing up to (pixblock_size - 1) remaining
+ * trailing pixels. As SIMD processing performs operation on
+ * pixblock_size pixels, anything smaller than this has to be loaded
+ * and stored in a special way. Loading and storing of pixel data is
+ * performed in such a way that we fill some 'slots' in the NEON
+ * registers (some slots naturally are unused), then perform compositing
+ * operation as usual. In the end, the data is taken from these 'slots'
+ * and saved to memory.
+ *
+ * cache_preload_flag - allows to suppress prefetch if
+ * set to 0
+ * dst_aligned_flag - selects whether destination buffer
+ * is aligned
+ */
+.macro process_trailing_pixels cache_preload_flag, \
+ dst_aligned_flag, \
+ process_pixblock_head, \
+ process_pixblock_tail, \
+ process_pixblock_tail_head
+ tst W, #(pixblock_size - 1)
+ beq 2f
+.irp chunk_size, 16, 8, 4, 2, 1
+.if pixblock_size > chunk_size
+ tst W, #chunk_size
+ beq 1f
+ pixld_src chunk_size, src_bpp, src_basereg, SRC
+ pixld chunk_size, mask_bpp, mask_basereg, MASK
+.if dst_aligned_flag != 0
+ pixld_a chunk_size, dst_r_bpp, dst_r_basereg, DST_R
+.else
+ pixld chunk_size, dst_r_bpp, dst_r_basereg, DST_R
+.endif
+.if cache_preload_flag != 0
+ PF add PF_X, PF_X, #chunk_size
+.endif
+1:
+.endif
+.endr
+ pixdeinterleave src_bpp, src_basereg
+ pixdeinterleave mask_bpp, mask_basereg
+ pixdeinterleave dst_r_bpp, dst_r_basereg
+
+ process_pixblock_head
+.if cache_preload_flag != 0
+ cache_preload 0, pixblock_size
+ cache_preload_simple
+.endif
+ process_pixblock_tail
+ pixinterleave dst_w_bpp, dst_w_basereg
+.irp chunk_size, 16, 8, 4, 2, 1
+.if pixblock_size > chunk_size
+ tst W, #chunk_size
+ beq 1f
+.if dst_aligned_flag != 0
+ pixst_a chunk_size, dst_w_bpp, dst_w_basereg, DST_W
+.else
+ pixst chunk_size, dst_w_bpp, dst_w_basereg, DST_W
+.endif
+1:
+.endif
+.endr
+2:
+.endm
+
+/*
+ * Macro, which performs all the needed operations to switch to the next
+ * scanline and start the next loop iteration unless all the scanlines
+ * are already processed.
+ */
+.macro advance_to_next_scanline start_of_loop_label
+.if regs_shortage
+ ldrd W, [sp] /* load W and H (width and height) from stack */
+.else
+ mov W, ORIG_W
+.endif
+ add DST_W, DST_W, DST_STRIDE, lsl #dst_bpp_shift
+.if src_bpp != 0
+ add SRC, SRC, SRC_STRIDE, lsl #src_bpp_shift
+.endif
+.if mask_bpp != 0
+ add MASK, MASK, MASK_STRIDE, lsl #mask_bpp_shift
+.endif
+.if (dst_w_bpp != 24)
+ sub DST_W, DST_W, W, lsl #dst_bpp_shift
+.endif
+.if (src_bpp != 24) && (src_bpp != 0)
+ sub SRC, SRC, W, lsl #src_bpp_shift
+.endif
+.if (mask_bpp != 24) && (mask_bpp != 0)
+ sub MASK, MASK, W, lsl #mask_bpp_shift
+.endif
+ subs H, H, #1
+ mov DST_R, DST_W
+.if regs_shortage
+ str H, [sp, #4] /* save updated height to stack */
+.endif
+ bge start_of_loop_label
+.endm
+
+/*
+ * Registers are allocated in the following way by default:
+ * d0, d1, d2, d3 - reserved for loading source pixel data
+ * d4, d5, d6, d7 - reserved for loading destination pixel data
+ * d24, d25, d26, d27 - reserved for loading mask pixel data
+ * d28, d29, d30, d31 - final destination pixel data for writeback to memory
+ */
+.macro generate_composite_function fname, \
+ src_bpp_, \
+ mask_bpp_, \
+ dst_w_bpp_, \
+ flags, \
+ pixblock_size_, \
+ prefetch_distance, \
+ init, \
+ cleanup, \
+ process_pixblock_head, \
+ process_pixblock_tail, \
+ process_pixblock_tail_head, \
+ dst_w_basereg_ = 28, \
+ dst_r_basereg_ = 4, \
+ src_basereg_ = 0, \
+ mask_basereg_ = 24
+
+ pixman_asm_function fname
+
+ push {r4-r12, lr} /* save all registers */
+
+/*
+ * Select prefetch type for this function. If prefetch distance is
+ * set to 0 or one of the color formats is 24bpp, SIMPLE prefetch
+ * has to be used instead of ADVANCED.
+ */
+ .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_DEFAULT
+.if prefetch_distance == 0
+ .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE
+.elseif (PREFETCH_TYPE_CURRENT > PREFETCH_TYPE_SIMPLE) && \
+ ((src_bpp_ == 24) || (mask_bpp_ == 24) || (dst_w_bpp_ == 24))
+ .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_SIMPLE
+.endif
+
+/*
+ * Make some macro arguments globally visible and accessible
+ * from other macros
+ */
+ .set src_bpp, src_bpp_
+ .set mask_bpp, mask_bpp_
+ .set dst_w_bpp, dst_w_bpp_
+ .set pixblock_size, pixblock_size_
+ .set dst_w_basereg, dst_w_basereg_
+ .set dst_r_basereg, dst_r_basereg_
+ .set src_basereg, src_basereg_
+ .set mask_basereg, mask_basereg_
+
+ .macro pixld_src x:vararg
+ pixld x
+ .endm
+ .macro fetch_src_pixblock
+ pixld_src pixblock_size, src_bpp, \
+ (src_basereg - pixblock_size * src_bpp / 64), SRC
+ .endm
+/*
+ * Assign symbolic names to registers
+ */
+ W .req r0 /* width (is updated during processing) */
+ H .req r1 /* height (is updated during processing) */
+ DST_W .req r2 /* destination buffer pointer for writes */
+ DST_STRIDE .req r3 /* destination image stride */
+ SRC .req r4 /* source buffer pointer */
+ SRC_STRIDE .req r5 /* source image stride */
+ DST_R .req r6 /* destination buffer pointer for reads */
+
+ MASK .req r7 /* mask pointer */
+ MASK_STRIDE .req r8 /* mask stride */
+
+ PF_CTL .req r9 /* combined lines counter and prefetch */
+ /* distance increment counter */
+ PF_X .req r10 /* pixel index in a scanline for current */
+ /* pretetch position */
+ PF_SRC .req r11 /* pointer to source scanline start */
+ /* for prefetch purposes */
+ PF_DST .req r12 /* pointer to destination scanline start */
+ /* for prefetch purposes */
+ PF_MASK .req r14 /* pointer to mask scanline start */
+ /* for prefetch purposes */
+/*
+ * Check whether we have enough registers for all the local variables.
+ * If we don't have enough registers, original width and height are
+ * kept on top of stack (and 'regs_shortage' variable is set to indicate
+ * this for the rest of code). Even if there are enough registers, the
+ * allocation scheme may be a bit different depending on whether source
+ * or mask is not used.
+ */
+.if (PREFETCH_TYPE_CURRENT < PREFETCH_TYPE_ADVANCED)
+ ORIG_W .req r10 /* saved original width */
+ DUMMY .req r12 /* temporary register */
+ .set regs_shortage, 0
+.elseif mask_bpp == 0
+ ORIG_W .req r7 /* saved original width */
+ DUMMY .req r8 /* temporary register */
+ .set regs_shortage, 0
+.elseif src_bpp == 0
+ ORIG_W .req r4 /* saved original width */
+ DUMMY .req r5 /* temporary register */
+ .set regs_shortage, 0
+.else
+ ORIG_W .req r1 /* saved original width */
+ DUMMY .req r1 /* temporary register */
+ .set regs_shortage, 1
+.endif
+
+ .set mask_bpp_shift, -1
+.if src_bpp == 32
+ .set src_bpp_shift, 2
+.elseif src_bpp == 24
+ .set src_bpp_shift, 0
+.elseif src_bpp == 16
+ .set src_bpp_shift, 1
+.elseif src_bpp == 8
+ .set src_bpp_shift, 0
+.elseif src_bpp == 0
+ .set src_bpp_shift, -1
+.else
+ .error "requested src bpp (src_bpp) is not supported"
+.endif
+.if mask_bpp == 32
+ .set mask_bpp_shift, 2
+.elseif mask_bpp == 24
+ .set mask_bpp_shift, 0
+.elseif mask_bpp == 8
+ .set mask_bpp_shift, 0
+.elseif mask_bpp == 0
+ .set mask_bpp_shift, -1
+.else
+ .error "requested mask bpp (mask_bpp) is not supported"
+.endif
+.if dst_w_bpp == 32
+ .set dst_bpp_shift, 2
+.elseif dst_w_bpp == 24
+ .set dst_bpp_shift, 0
+.elseif dst_w_bpp == 16
+ .set dst_bpp_shift, 1
+.elseif dst_w_bpp == 8
+ .set dst_bpp_shift, 0
+.else
+ .error "requested dst bpp (dst_w_bpp) is not supported"
+.endif
+
+.if (((flags) & FLAG_DST_READWRITE) != 0)
+ .set dst_r_bpp, dst_w_bpp
+.else
+ .set dst_r_bpp, 0
+.endif
+.if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0)
+ .set DEINTERLEAVE_32BPP_ENABLED, 1
+.else
+ .set DEINTERLEAVE_32BPP_ENABLED, 0
+.endif
+
+.if prefetch_distance < 0 || prefetch_distance > 15
+ .error "invalid prefetch distance (prefetch_distance)"
+.endif
+
+.if src_bpp > 0
+ ldr SRC, [sp, #40]
+.endif
+.if mask_bpp > 0
+ ldr MASK, [sp, #48]
+.endif
+ PF mov PF_X, #0
+.if src_bpp > 0
+ ldr SRC_STRIDE, [sp, #44]
+.endif
+.if mask_bpp > 0
+ ldr MASK_STRIDE, [sp, #52]
+.endif
+ mov DST_R, DST_W
+
+.if src_bpp == 24
+ sub SRC_STRIDE, SRC_STRIDE, W
+ sub SRC_STRIDE, SRC_STRIDE, W, lsl #1
+.endif
+.if mask_bpp == 24
+ sub MASK_STRIDE, MASK_STRIDE, W
+ sub MASK_STRIDE, MASK_STRIDE, W, lsl #1
+.endif
+.if dst_w_bpp == 24
+ sub DST_STRIDE, DST_STRIDE, W
+ sub DST_STRIDE, DST_STRIDE, W, lsl #1
+.endif
+
+/*
+ * Setup advanced prefetcher initial state
+ */
+ PF mov PF_SRC, SRC
+ PF mov PF_DST, DST_R
+ PF mov PF_MASK, MASK
+ /* PF_CTL = prefetch_distance | ((h - 1) << 4) */
+ PF mov PF_CTL, H, lsl #4
+ PF add PF_CTL, #(prefetch_distance - 0x10)
+
+ init
+.if regs_shortage
+ push {r0, r1}
+.endif
+ subs H, H, #1
+.if regs_shortage
+ str H, [sp, #4] /* save updated height to stack */
+.else
+ mov ORIG_W, W
+.endif
+ blt 9f
+ cmp W, #(pixblock_size * 2)
+ blt 8f
+/*
+ * This is the start of the pipelined loop, which if optimized for
+ * long scanlines
+ */
+0:
+ ensure_destination_ptr_alignment process_pixblock_head, \
+ process_pixblock_tail, \
+ process_pixblock_tail_head
+
+ /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */
+ pixld_a pixblock_size, dst_r_bpp, \
+ (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
+ fetch_src_pixblock
+ pixld pixblock_size, mask_bpp, \
+ (mask_basereg - pixblock_size * mask_bpp / 64), MASK
+ PF add PF_X, PF_X, #pixblock_size
+ process_pixblock_head
+ cache_preload 0, pixblock_size
+ cache_preload_simple
+ subs W, W, #(pixblock_size * 2)
+ blt 2f
+1:
+ process_pixblock_tail_head
+ cache_preload_simple
+ subs W, W, #pixblock_size
+ bge 1b
+2:
+ process_pixblock_tail
+ pixst_a pixblock_size, dst_w_bpp, \
+ (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
+
+ /* Process the remaining trailing pixels in the scanline */
+ process_trailing_pixels 1, 1, \
+ process_pixblock_head, \
+ process_pixblock_tail, \
+ process_pixblock_tail_head
+ advance_to_next_scanline 0b
+
+.if regs_shortage
+ pop {r0, r1}
+.endif
+ cleanup
+ pop {r4-r12, pc} /* exit */
+/*
+ * This is the start of the loop, designed to process images with small width
+ * (less than pixblock_size * 2 pixels). In this case neither pipelining
+ * nor prefetch are used.
+ */
+8:
+ /* Process exactly pixblock_size pixels if needed */
+ tst W, #pixblock_size
+ beq 1f
+ pixld pixblock_size, dst_r_bpp, \
+ (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
+ fetch_src_pixblock
+ pixld pixblock_size, mask_bpp, \
+ (mask_basereg - pixblock_size * mask_bpp / 64), MASK
+ process_pixblock_head
+ process_pixblock_tail
+ pixst pixblock_size, dst_w_bpp, \
+ (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
+1:
+ /* Process the remaining trailing pixels in the scanline */
+ process_trailing_pixels 0, 0, \
+ process_pixblock_head, \
+ process_pixblock_tail, \
+ process_pixblock_tail_head
+ advance_to_next_scanline 8b
+9:
+.if regs_shortage
+ pop {r0, r1}
+.endif
+ cleanup
+ pop {r4-r12, pc} /* exit */
+
+ .purgem fetch_src_pixblock
+ .purgem pixld_src
+
+ .unreq SRC
+ .unreq MASK
+ .unreq DST_R
+ .unreq DST_W
+ .unreq ORIG_W
+ .unreq W
+ .unreq H
+ .unreq SRC_STRIDE
+ .unreq DST_STRIDE
+ .unreq MASK_STRIDE
+ .unreq PF_CTL
+ .unreq PF_X
+ .unreq PF_SRC
+ .unreq PF_DST
+ .unreq PF_MASK
+ .unreq DUMMY
+ .endfunc
+.endm
+
+/*
+ * A simplified variant of function generation template for a single
+ * scanline processing (for implementing pixman combine functions)
+ */
+.macro generate_composite_function_scanline use_nearest_scaling, \
+ fname, \
+ src_bpp_, \
+ mask_bpp_, \
+ dst_w_bpp_, \
+ flags, \
+ pixblock_size_, \
+ init, \
+ cleanup, \
+ process_pixblock_head, \
+ process_pixblock_tail, \
+ process_pixblock_tail_head, \
+ dst_w_basereg_ = 28, \
+ dst_r_basereg_ = 4, \
+ src_basereg_ = 0, \
+ mask_basereg_ = 24
+
+ pixman_asm_function fname
+
+ .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE
+/*
+ * Make some macro arguments globally visible and accessible
+ * from other macros
+ */
+ .set src_bpp, src_bpp_
+ .set mask_bpp, mask_bpp_
+ .set dst_w_bpp, dst_w_bpp_
+ .set pixblock_size, pixblock_size_
+ .set dst_w_basereg, dst_w_basereg_
+ .set dst_r_basereg, dst_r_basereg_
+ .set src_basereg, src_basereg_
+ .set mask_basereg, mask_basereg_
+
+.if use_nearest_scaling != 0
+ /*
+ * Assign symbolic names to registers for nearest scaling
+ */
+ W .req r0
+ DST_W .req r1
+ SRC .req r2
+ VX .req r3
+ UNIT_X .req ip
+ MASK .req lr
+ TMP1 .req r4
+ TMP2 .req r5
+ DST_R .req r6
+ SRC_WIDTH_FIXED .req r7
+
+ .macro pixld_src x:vararg
+ pixld_s x
+ .endm
+
+ ldr UNIT_X, [sp]
+ push {r4-r8, lr}
+ ldr SRC_WIDTH_FIXED, [sp, #(24 + 4)]
+ .if mask_bpp != 0
+ ldr MASK, [sp, #(24 + 8)]
+ .endif
+.else
+ /*
+ * Assign symbolic names to registers
+ */
+ W .req r0 /* width (is updated during processing) */
+ DST_W .req r1 /* destination buffer pointer for writes */
+ SRC .req r2 /* source buffer pointer */
+ DST_R .req ip /* destination buffer pointer for reads */
+ MASK .req r3 /* mask pointer */
+
+ .macro pixld_src x:vararg
+ pixld x
+ .endm
+.endif
+
+.if (((flags) & FLAG_DST_READWRITE) != 0)
+ .set dst_r_bpp, dst_w_bpp
+.else
+ .set dst_r_bpp, 0
+.endif
+.if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0)
+ .set DEINTERLEAVE_32BPP_ENABLED, 1
+.else
+ .set DEINTERLEAVE_32BPP_ENABLED, 0
+.endif
+
+ .macro fetch_src_pixblock
+ pixld_src pixblock_size, src_bpp, \
+ (src_basereg - pixblock_size * src_bpp / 64), SRC
+ .endm
+
+ init
+ mov DST_R, DST_W
+
+ cmp W, #pixblock_size
+ blt 8f
+
+ ensure_destination_ptr_alignment process_pixblock_head, \
+ process_pixblock_tail, \
+ process_pixblock_tail_head
+
+ subs W, W, #pixblock_size
+ blt 7f
+
+ /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */
+ pixld_a pixblock_size, dst_r_bpp, \
+ (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
+ fetch_src_pixblock
+ pixld pixblock_size, mask_bpp, \
+ (mask_basereg - pixblock_size * mask_bpp / 64), MASK
+ process_pixblock_head
+ subs W, W, #pixblock_size
+ blt 2f
+1:
+ process_pixblock_tail_head
+ subs W, W, #pixblock_size
+ bge 1b
+2:
+ process_pixblock_tail
+ pixst_a pixblock_size, dst_w_bpp, \
+ (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
+7:
+ /* Process the remaining trailing pixels in the scanline (dst aligned) */
+ process_trailing_pixels 0, 1, \
+ process_pixblock_head, \
+ process_pixblock_tail, \
+ process_pixblock_tail_head
+
+ cleanup
+.if use_nearest_scaling != 0
+ pop {r4-r8, pc} /* exit */
+.else
+ bx lr /* exit */
+.endif
+8:
+ /* Process the remaining trailing pixels in the scanline (dst unaligned) */
+ process_trailing_pixels 0, 0, \
+ process_pixblock_head, \
+ process_pixblock_tail, \
+ process_pixblock_tail_head
+
+ cleanup
+
+.if use_nearest_scaling != 0
+ pop {r4-r8, pc} /* exit */
+
+ .unreq DST_R
+ .unreq SRC
+ .unreq W
+ .unreq VX
+ .unreq UNIT_X
+ .unreq TMP1
+ .unreq TMP2
+ .unreq DST_W
+ .unreq MASK
+ .unreq SRC_WIDTH_FIXED
+
+.else
+ bx lr /* exit */
+
+ .unreq SRC
+ .unreq MASK
+ .unreq DST_R
+ .unreq DST_W
+ .unreq W
+.endif
+
+ .purgem fetch_src_pixblock
+ .purgem pixld_src
+
+ .endfunc
+.endm
+
+.macro generate_composite_function_single_scanline x:vararg
+ generate_composite_function_scanline 0, x
+.endm
+
+.macro generate_composite_function_nearest_scanline x:vararg
+ generate_composite_function_scanline 1, x
+.endm
+
+/* Default prologue/epilogue, nothing special needs to be done */
+
+.macro default_init
+.endm
+
+.macro default_cleanup
+.endm
+
+/*
+ * Prologue/epilogue variant which additionally saves/restores d8-d15
+ * registers (they need to be saved/restored by callee according to ABI).
+ * This is required if the code needs to use all the NEON registers.
+ */
+
+.macro default_init_need_all_regs
+ vpush {d8-d15}
+.endm
+
+.macro default_cleanup_need_all_regs
+ vpop {d8-d15}
+.endm
+
+/******************************************************************************/
diff --git a/vendor/github.com/Benau/go_rlottie/vector_stb_stb_image.cpp b/vendor/github.com/Benau/go_rlottie/vector_stb_stb_image.cpp
new file mode 100644
index 00000000..6fd89c88
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_stb_stb_image.cpp
@@ -0,0 +1,59 @@
+/*
+ * configure stb_image about
+ * the image we will support
+ */
+#define STB_IMAGE_IMPLEMENTATION
+
+#define STBI_ONLY_JPEG
+#define STBI_ONLY_PNG
+#define STBI_NO_HDR
+#define STBI_NO_LINEAR
+#define STBI_NO_GIF
+#define STBI_NO_PIC
+
+#include "vector_stb_stb_image.h"
+
+#if defined _WIN32 || defined __CYGWIN__
+ #ifdef RLOTTIE_BUILD
+ #define RLOTTIE_API __declspec(dllexport)
+ #else
+ #define RLOTTIE_API __declspec(dllimport)
+ #endif
+#else
+ #ifdef RLOTTIE_BUILD
+ #define RLOTTIE_API __attribute__ ((visibility ("default")))
+ #else
+ #define RLOTTIE_API
+ #endif
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * exported function wrapper from the library
+ */
+
+RLOTTIE_API unsigned char *lottie_image_load(char const *filename, int *x,
+ int *y, int *comp, int req_comp)
+{
+ return stbi_load(filename, x, y, comp, req_comp);
+}
+
+RLOTTIE_API unsigned char *lottie_image_load_from_data(const char *imageData,
+ int len, int *x, int *y,
+ int *comp, int req_comp)
+{
+ unsigned char *data = (unsigned char *)imageData;
+ return stbi_load_from_memory(data, len, x, y, comp, req_comp);
+}
+
+RLOTTIE_API void lottie_image_free(unsigned char *data)
+{
+ stbi_image_free(data);
+}
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/vendor/github.com/Benau/go_rlottie/vector_stb_stb_image.h b/vendor/github.com/Benau/go_rlottie/vector_stb_stb_image.h
new file mode 100644
index 00000000..56f81830
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_stb_stb_image.h
@@ -0,0 +1,7509 @@
+/* stb_image - v2.19 - public domain image loader - http://nothings.org/stb
+ no warranty implied; use at your own risk
+
+ Do this:
+ #define STB_IMAGE_IMPLEMENTATION
+ before you include this file in *one* C or C++ file to create the implementation.
+
+ // i.e. it should look like this:
+ #include ...
+ #include ...
+ #include ...
+ #define STB_IMAGE_IMPLEMENTATION
+ #include "stb_image.h"
+
+ You can #define STBI_ASSERT(x) before the #include to avoid using assert.h.
+ And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free
+
+
+ QUICK NOTES:
+ Primarily of interest to game developers and other people who can
+ avoid problematic images and only need the trivial interface
+
+ JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib)
+ PNG 1/2/4/8/16-bit-per-channel
+
+ TGA (not sure what subset, if a subset)
+ BMP non-1bpp, non-RLE
+ PSD (composited view only, no extra channels, 8/16 bit-per-channel)
+
+ GIF (*comp always reports as 4-channel)
+ HDR (radiance rgbE format)
+ PIC (Softimage PIC)
+ PNM (PPM and PGM binary only)
+
+ Animated GIF still needs a proper API, but here's one way to do it:
+ http://gist.github.com/urraka/685d9a6340b26b830d49
+
+ - decode from memory or through FILE (define STBI_NO_STDIO to remove code)
+ - decode from arbitrary I/O callbacks
+ - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON)
+
+ Full documentation under "DOCUMENTATION" below.
+
+
+LICENSE
+
+ See end of file for license information.
+
+RECENT REVISION HISTORY:
+
+ 2.19 (2018-02-11) fix warning
+ 2.18 (2018-01-30) fix warnings
+ 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings
+ 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes
+ 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC
+ 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs
+ 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes
+ 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes
+ 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64
+ RGB-format JPEG; remove white matting in PSD;
+ allocate large structures on the stack;
+ correct channel count for PNG & BMP
+ 2.10 (2016-01-22) avoid warning introduced in 2.09
+ 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED
+
+ See end of file for full revision history.
+
+
+ ============================ Contributors =========================
+
+ Image formats Extensions, features
+ Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info)
+ Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info)
+ Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG)
+ Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks)
+ Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG)
+ Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip)
+ Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD)
+ github:urraka (animated gif) Junggon Kim (PNM comments)
+ Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA)
+ socks-the-fox (16-bit PNG)
+ Jeremy Sawicki (handle all ImageNet JPGs)
+ Optimizations & bugfixes Mikhail Morozov (1-bit BMP)
+ Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query)
+ Arseny Kapoulkine
+ John-Mark Allen
+
+ Bug & warning fixes
+ Marc LeBlanc David Woo Guillaume George Martins Mozeiko
+ Christpher Lloyd Jerry Jansson Joseph Thomson Phil Jordan
+ Dave Moore Roy Eltham Hayaki Saito Nathan Reed
+ Won Chun Luke Graham Johan Duparc Nick Verigakis
+ the Horde3D community Thomas Ruf Ronny Chevalier github:rlyeh
+ Janez Zemva John Bartholomew Michal Cichon github:romigrou
+ Jonathan Blow Ken Hamada Tero Hanninen github:svdijk
+ Laurent Gomila Cort Stratton Sergio Gonzalez github:snagar
+ Aruelien Pocheville Thibault Reuille Cass Everitt github:Zelex
+ Ryamond Barbiero Paul Du Bois Engin Manap github:grim210
+ Aldo Culquicondor Philipp Wiesemann Dale Weiler github:sammyhw
+ Oriol Ferrer Mesia Josh Tobin Matthew Gregan github:phprus
+ Julian Raschke Gregory Mullen Baldur Karlsson github:poppolopoppo
+ Christian Floisand Kevin Schmidt github:darealshinji
+ Blazej Dariusz Roszkowski github:Michaelangel007
+*/
+
+#ifndef STBI_INCLUDE_STB_IMAGE_H
+#define STBI_INCLUDE_STB_IMAGE_H
+
+// DOCUMENTATION
+//
+// Limitations:
+// - no 12-bit-per-channel JPEG
+// - no JPEGs with arithmetic coding
+// - GIF always returns *comp=4
+//
+// Basic usage (see HDR discussion below for HDR usage):
+// int x,y,n;
+// unsigned char *data = stbi_load(filename, &x, &y, &n, 0);
+// // ... process data if not NULL ...
+// // ... x = width, y = height, n = # 8-bit components per pixel ...
+// // ... replace '0' with '1'..'4' to force that many components per pixel
+// // ... but 'n' will always be the number that it would have been if you said 0
+// stbi_image_free(data)
+//
+// Standard parameters:
+// int *x -- outputs image width in pixels
+// int *y -- outputs image height in pixels
+// int *channels_in_file -- outputs # of image components in image file
+// int desired_channels -- if non-zero, # of image components requested in result
+//
+// The return value from an image loader is an 'unsigned char *' which points
+// to the pixel data, or NULL on an allocation failure or if the image is
+// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels,
+// with each pixel consisting of N interleaved 8-bit components; the first
+// pixel pointed to is top-left-most in the image. There is no padding between
+// image scanlines or between pixels, regardless of format. The number of
+// components N is 'desired_channels' if desired_channels is non-zero, or
+// *channels_in_file otherwise. If desired_channels is non-zero,
+// *channels_in_file has the number of components that _would_ have been
+// output otherwise. E.g. if you set desired_channels to 4, you will always
+// get RGBA output, but you can check *channels_in_file to see if it's trivially
+// opaque because e.g. there were only 3 channels in the source image.
+//
+// An output image with N components has the following components interleaved
+// in this order in each pixel:
+//
+// N=#comp components
+// 1 grey
+// 2 grey, alpha
+// 3 red, green, blue
+// 4 red, green, blue, alpha
+//
+// If image loading fails for any reason, the return value will be NULL,
+// and *x, *y, *channels_in_file will be unchanged. The function
+// stbi_failure_reason() can be queried for an extremely brief, end-user
+// unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS
+// to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly
+// more user-friendly ones.
+//
+// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized.
+//
+// ===========================================================================
+//
+// Philosophy
+//
+// stb libraries are designed with the following priorities:
+//
+// 1. easy to use
+// 2. easy to maintain
+// 3. good performance
+//
+// Sometimes I let "good performance" creep up in priority over "easy to maintain",
+// and for best performance I may provide less-easy-to-use APIs that give higher
+// performance, in addition to the easy to use ones. Nevertheless, it's important
+// to keep in mind that from the standpoint of you, a client of this library,
+// all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all.
+//
+// Some secondary priorities arise directly from the first two, some of which
+// make more explicit reasons why performance can't be emphasized.
+//
+// - Portable ("ease of use")
+// - Small source code footprint ("easy to maintain")
+// - No dependencies ("ease of use")
+//
+// ===========================================================================
+//
+// I/O callbacks
+//
+// I/O callbacks allow you to read from arbitrary sources, like packaged
+// files or some other source. Data read from callbacks are processed
+// through a small internal buffer (currently 128 bytes) to try to reduce
+// overhead.
+//
+// The three functions you must define are "read" (reads some bytes of data),
+// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end).
+//
+// ===========================================================================
+//
+// SIMD support
+//
+// The JPEG decoder will try to automatically use SIMD kernels on x86 when
+// supported by the compiler. For ARM Neon support, you must explicitly
+// request it.
+//
+// (The old do-it-yourself SIMD API is no longer supported in the current
+// code.)
+//
+// On x86, SSE2 will automatically be used when available based on a run-time
+// test; if not, the generic C versions are used as a fall-back. On ARM targets,
+// the typical path is to have separate builds for NEON and non-NEON devices
+// (at least this is true for iOS and Android). Therefore, the NEON support is
+// toggled by a build flag: define STBI_NEON to get NEON loops.
+//
+// If for some reason you do not want to use any of SIMD code, or if
+// you have issues compiling it, you can disable it entirely by
+// defining STBI_NO_SIMD.
+//
+// ===========================================================================
+//
+// HDR image support (disable by defining STBI_NO_HDR)
+//
+// stb_image now supports loading HDR images in general, and currently
+// the Radiance .HDR file format, although the support is provided
+// generically. You can still load any file through the existing interface;
+// if you attempt to load an HDR file, it will be automatically remapped to
+// LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1;
+// both of these constants can be reconfigured through this interface:
+//
+// stbi_hdr_to_ldr_gamma(2.2f);
+// stbi_hdr_to_ldr_scale(1.0f);
+//
+// (note, do not use _inverse_ constants; stbi_image will invert them
+// appropriately).
+//
+// Additionally, there is a new, parallel interface for loading files as
+// (linear) floats to preserve the full dynamic range:
+//
+// float *data = stbi_loadf(filename, &x, &y, &n, 0);
+//
+// If you load LDR images through this interface, those images will
+// be promoted to floating point values, run through the inverse of
+// constants corresponding to the above:
+//
+// stbi_ldr_to_hdr_scale(1.0f);
+// stbi_ldr_to_hdr_gamma(2.2f);
+//
+// Finally, given a filename (or an open file or memory block--see header
+// file for details) containing image data, you can query for the "most
+// appropriate" interface to use (that is, whether the image is HDR or
+// not), using:
+//
+// stbi_is_hdr(char *filename);
+//
+// ===========================================================================
+//
+// iPhone PNG support:
+//
+// By default we convert iphone-formatted PNGs back to RGB, even though
+// they are internally encoded differently. You can disable this conversion
+// by by calling stbi_convert_iphone_png_to_rgb(0), in which case
+// you will always just get the native iphone "format" through (which
+// is BGR stored in RGB).
+//
+// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per
+// pixel to remove any premultiplied alpha *only* if the image file explicitly
+// says there's premultiplied data (currently only happens in iPhone images,
+// and only if iPhone convert-to-rgb processing is on).
+//
+// ===========================================================================
+//
+// ADDITIONAL CONFIGURATION
+//
+// - You can suppress implementation of any of the decoders to reduce
+// your code footprint by #defining one or more of the following
+// symbols before creating the implementation.
+//
+// STBI_NO_JPEG
+// STBI_NO_PNG
+// STBI_NO_BMP
+// STBI_NO_PSD
+// STBI_NO_TGA
+// STBI_NO_GIF
+// STBI_NO_HDR
+// STBI_NO_PIC
+// STBI_NO_PNM (.ppm and .pgm)
+//
+// - You can request *only* certain decoders and suppress all other ones
+// (this will be more forward-compatible, as addition of new decoders
+// doesn't require you to disable them explicitly):
+//
+// STBI_ONLY_JPEG
+// STBI_ONLY_PNG
+// STBI_ONLY_BMP
+// STBI_ONLY_PSD
+// STBI_ONLY_TGA
+// STBI_ONLY_GIF
+// STBI_ONLY_HDR
+// STBI_ONLY_PIC
+// STBI_ONLY_PNM (.ppm and .pgm)
+//
+// - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still
+// want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB
+//
+
+
+#ifndef STBI_NO_STDIO
+#include <stdio.h>
+#endif // STBI_NO_STDIO
+
+#if defined _WIN32 || defined __CYGWIN__
+#include <windows.h>
+#endif // defined _WIN32 || defined __CYGWIN__
+
+#define STBI_VERSION 1
+
+enum
+{
+ STBI_default = 0, // only used for desired_channels
+
+ STBI_grey = 1,
+ STBI_grey_alpha = 2,
+ STBI_rgb = 3,
+ STBI_rgb_alpha = 4
+};
+
+typedef unsigned char stbi_uc;
+typedef unsigned short stbi_us;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef STB_IMAGE_STATIC
+#define STBIDEF static
+#else
+#define STBIDEF extern
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// PRIMARY API - works on images of any type
+//
+
+//
+// load image by filename, open file, or memory buffer
+//
+
+typedef struct
+{
+ int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read
+ void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative
+ int (*eof) (void *user); // returns nonzero if we are at end of file/data
+} stbi_io_callbacks;
+
+////////////////////////////////////
+//
+// 8-bits-per-channel interface
+//
+
+STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels);
+STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels);
+#ifndef STBI_NO_GIF
+STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp);
+#endif
+
+
+#ifndef STBI_NO_STDIO
+STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels);
+STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels);
+// for stbi_load_from_file, file pointer is left pointing immediately after image
+#endif
+
+////////////////////////////////////
+//
+// 16-bits-per-channel interface
+//
+
+STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels);
+STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels);
+
+#ifndef STBI_NO_STDIO
+STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels);
+STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels);
+#endif
+
+////////////////////////////////////
+//
+// float-per-channel interface
+//
+#ifndef STBI_NO_LINEAR
+ STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels);
+ STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels);
+
+ #ifndef STBI_NO_STDIO
+ STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels);
+ STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels);
+ #endif
+#endif
+
+#ifndef STBI_NO_HDR
+ STBIDEF void stbi_hdr_to_ldr_gamma(float gamma);
+ STBIDEF void stbi_hdr_to_ldr_scale(float scale);
+#endif // STBI_NO_HDR
+
+#ifndef STBI_NO_LINEAR
+ STBIDEF void stbi_ldr_to_hdr_gamma(float gamma);
+ STBIDEF void stbi_ldr_to_hdr_scale(float scale);
+#endif // STBI_NO_LINEAR
+
+// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR
+STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user);
+STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len);
+#ifndef STBI_NO_STDIO
+STBIDEF int stbi_is_hdr (char const *filename);
+STBIDEF int stbi_is_hdr_from_file(FILE *f);
+#endif // STBI_NO_STDIO
+
+
+// get a VERY brief reason for failure
+// NOT THREADSAFE
+STBIDEF const char *stbi_failure_reason (void);
+
+// free the loaded image -- this is just free()
+STBIDEF void stbi_image_free (void *retval_from_stbi_load);
+
+// get image dimensions & components without fully decoding
+STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp);
+STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp);
+STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len);
+STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user);
+
+#ifndef STBI_NO_STDIO
+STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp);
+STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp);
+STBIDEF int stbi_is_16_bit (char const *filename);
+STBIDEF int stbi_is_16_bit_from_file(FILE *f);
+#endif
+
+
+
+// for image formats that explicitly notate that they have premultiplied alpha,
+// we just return the colors as stored in the file. set this flag to force
+// unpremultiplication. results are undefined if the unpremultiply overflow.
+STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply);
+
+// indicate whether we should process iphone images back to canonical format,
+// or just pass them through "as-is"
+STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert);
+
+// flip the image vertically, so the first pixel in the output array is the bottom left
+STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip);
+
+// ZLIB client - used by PNG, available for other purposes
+
+STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen);
+STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header);
+STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen);
+STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen);
+
+STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen);
+STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+//
+//
+//// end header file /////////////////////////////////////////////////////
+#endif // STBI_INCLUDE_STB_IMAGE_H
+
+#ifdef STB_IMAGE_IMPLEMENTATION
+
+#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \
+ || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \
+ || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \
+ || defined(STBI_ONLY_ZLIB)
+ #ifndef STBI_ONLY_JPEG
+ #define STBI_NO_JPEG
+ #endif
+ #ifndef STBI_ONLY_PNG
+ #define STBI_NO_PNG
+ #endif
+ #ifndef STBI_ONLY_BMP
+ #define STBI_NO_BMP
+ #endif
+ #ifndef STBI_ONLY_PSD
+ #define STBI_NO_PSD
+ #endif
+ #ifndef STBI_ONLY_TGA
+ #define STBI_NO_TGA
+ #endif
+ #ifndef STBI_ONLY_GIF
+ #define STBI_NO_GIF
+ #endif
+ #ifndef STBI_ONLY_HDR
+ #define STBI_NO_HDR
+ #endif
+ #ifndef STBI_ONLY_PIC
+ #define STBI_NO_PIC
+ #endif
+ #ifndef STBI_ONLY_PNM
+ #define STBI_NO_PNM
+ #endif
+#endif
+
+#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB)
+#define STBI_NO_ZLIB
+#endif
+
+
+#include <stdarg.h>
+#include <stddef.h> // ptrdiff_t on osx
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+
+#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR)
+#include <math.h> // ldexp, pow
+#endif
+
+#ifndef STBI_NO_STDIO
+#include <stdio.h>
+#endif
+
+#ifndef STBI_ASSERT
+#include <assert.h>
+#define STBI_ASSERT(x) assert(x)
+#endif
+
+
+#ifndef _MSC_VER
+ #ifdef __cplusplus
+ #define stbi_inline inline
+ #else
+ #define stbi_inline
+ #endif
+#else
+ #define stbi_inline __forceinline
+#endif
+
+
+#ifdef _MSC_VER
+typedef unsigned short stbi__uint16;
+typedef signed short stbi__int16;
+typedef unsigned int stbi__uint32;
+typedef signed int stbi__int32;
+#else
+#include <stdint.h>
+typedef uint16_t stbi__uint16;
+typedef int16_t stbi__int16;
+typedef uint32_t stbi__uint32;
+typedef int32_t stbi__int32;
+#endif
+
+// should produce compiler error if size is wrong
+typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1];
+
+#ifdef _MSC_VER
+#define STBI_NOTUSED(v) (void)(v)
+#else
+#define STBI_NOTUSED(v) (void)sizeof(v)
+#endif
+
+#ifdef _MSC_VER
+#define STBI_HAS_LROTL
+#endif
+
+#ifdef STBI_HAS_LROTL
+ #define stbi_lrot(x,y) _lrotl(x,y)
+#else
+ #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y))))
+#endif
+
+#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED))
+// ok
+#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED)
+// ok
+#else
+#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)."
+#endif
+
+#ifndef STBI_MALLOC
+#define STBI_MALLOC(sz) malloc(sz)
+#define STBI_REALLOC(p,newsz) realloc(p,newsz)
+#define STBI_FREE(p) free(p)
+#endif
+
+#ifndef STBI_REALLOC_SIZED
+#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz)
+#endif
+
+// x86/x64 detection
+#if defined(__x86_64__) || defined(_M_X64)
+#define STBI__X64_TARGET
+#elif defined(__i386) || defined(_M_IX86)
+#define STBI__X86_TARGET
+#endif
+
+#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD)
+// gcc doesn't support sse2 intrinsics unless you compile with -msse2,
+// which in turn means it gets to use SSE2 everywhere. This is unfortunate,
+// but previous attempts to provide the SSE2 functions with runtime
+// detection caused numerous issues. The way architecture extensions are
+// exposed in GCC/Clang is, sadly, not really suited for one-file libs.
+// New behavior: if compiled with -msse2, we use SSE2 without any
+// detection; if not, we don't use it at all.
+#define STBI_NO_SIMD
+#endif
+
+#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD)
+// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET
+//
+// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the
+// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant.
+// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not
+// simultaneously enabling "-mstackrealign".
+//
+// See https://github.com/nothings/stb/issues/81 for more information.
+//
+// So default to no SSE2 on 32-bit MinGW. If you've read this far and added
+// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2.
+#define STBI_NO_SIMD
+#endif
+
+#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET))
+#define STBI_SSE2
+#include <emmintrin.h>
+
+#ifdef _MSC_VER
+
+#if _MSC_VER >= 1400 // not VC6
+#include <intrin.h> // __cpuid
+static int stbi__cpuid3(void)
+{
+ int info[4];
+ __cpuid(info,1);
+ return info[3];
+}
+#else
+static int stbi__cpuid3(void)
+{
+ int res;
+ __asm {
+ mov eax,1
+ cpuid
+ mov res,edx
+ }
+ return res;
+}
+#endif
+
+#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name
+
+static int stbi__sse2_available(void)
+{
+ int info3 = stbi__cpuid3();
+ return ((info3 >> 26) & 1) != 0;
+}
+#else // assume GCC-style if not VC++
+#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16)))
+
+static int stbi__sse2_available(void)
+{
+ // If we're even attempting to compile this on GCC/Clang, that means
+ // -msse2 is on, which means the compiler is allowed to use SSE2
+ // instructions at will, and so are we.
+ return 1;
+}
+#endif
+#endif
+
+// ARM NEON
+#if defined(STBI_NO_SIMD) && defined(STBI_NEON)
+#undef STBI_NEON
+#endif
+
+#ifdef STBI_NEON
+#include <arm_neon.h>
+// assume GCC or Clang on ARM targets
+#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16)))
+#endif
+
+#ifndef STBI_SIMD_ALIGN
+#define STBI_SIMD_ALIGN(type, name) type name
+#endif
+
+///////////////////////////////////////////////
+//
+// stbi__context struct and start_xxx functions
+
+// stbi__context structure is our basic context used by all images, so it
+// contains all the IO context, plus some basic image information
+typedef struct
+{
+ stbi__uint32 img_x, img_y;
+ int img_n, img_out_n;
+
+ stbi_io_callbacks io;
+ void *io_user_data;
+
+ int read_from_callbacks;
+ int buflen;
+ stbi_uc buffer_start[128];
+
+ stbi_uc *img_buffer, *img_buffer_end;
+ stbi_uc *img_buffer_original, *img_buffer_original_end;
+} stbi__context;
+
+
+static void stbi__refill_buffer(stbi__context *s);
+
+// initialize a memory-decode context
+static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len)
+{
+ s->io.read = NULL;
+ s->read_from_callbacks = 0;
+ s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer;
+ s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len;
+}
+
+// initialize a callback-based context
+static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user)
+{
+ s->io = *c;
+ s->io_user_data = user;
+ s->buflen = sizeof(s->buffer_start);
+ s->read_from_callbacks = 1;
+ s->img_buffer_original = s->buffer_start;
+ stbi__refill_buffer(s);
+ s->img_buffer_original_end = s->img_buffer_end;
+}
+
+// this is not threadsafe
+static const char *stbi__g_failure_reason;
+
+STBIDEF const char *stbi_failure_reason(void)
+{
+ return stbi__g_failure_reason;
+}
+
+static int stbi__err(const char *str)
+{
+ stbi__g_failure_reason = str;
+ return 0;
+}
+
+static void *stbi__malloc(size_t size)
+{
+ return STBI_MALLOC(size);
+}
+
+
+#ifndef STBI_NO_STDIO
+
+static int stbi__stdio_read(void *user, char *data, int size)
+{
+ return (int) fread(data,1,size,(FILE*) user);
+}
+
+static void stbi__stdio_skip(void *user, int n)
+{
+ if (fseek((FILE*) user, n, SEEK_CUR) == -1)
+ stbi__err("fseek() error");
+}
+
+static int stbi__stdio_eof(void *user)
+{
+ return feof((FILE*) user);
+}
+
+static stbi_io_callbacks stbi__stdio_callbacks =
+{
+ stbi__stdio_read,
+ stbi__stdio_skip,
+ stbi__stdio_eof,
+};
+
+static void stbi__start_file(stbi__context *s, FILE *f)
+{
+ stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f);
+}
+
+//static void stop_file(stbi__context *s) { }
+
+#endif // !STBI_NO_STDIO
+
+static void stbi__rewind(stbi__context *s)
+{
+ // conceptually rewind SHOULD rewind to the beginning of the stream,
+ // but we just rewind to the beginning of the initial buffer, because
+ // we only use it after doing 'test', which only ever looks at at most 92 bytes
+ s->img_buffer = s->img_buffer_original;
+ s->img_buffer_end = s->img_buffer_original_end;
+}
+
+enum
+{
+ STBI_ORDER_RGB,
+ STBI_ORDER_BGR
+};
+
+typedef struct
+{
+ int bits_per_channel;
+ int num_channels;
+ int channel_order;
+} stbi__result_info;
+
+#ifndef STBI_NO_JPEG
+static int stbi__jpeg_test(stbi__context *s);
+static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
+static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp);
+#endif
+
+#ifndef STBI_NO_PNG
+static int stbi__png_test(stbi__context *s);
+static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
+static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp);
+static int stbi__png_is16(stbi__context *s);
+#endif
+
+#ifndef STBI_NO_BMP
+static int stbi__bmp_test(stbi__context *s);
+static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
+static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp);
+#endif
+
+#ifndef STBI_NO_TGA
+static int stbi__tga_test(stbi__context *s);
+static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
+static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp);
+#endif
+
+#ifndef STBI_NO_PSD
+static int stbi__psd_test(stbi__context *s);
+static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc);
+static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp);
+static int stbi__psd_is16(stbi__context *s);
+#endif
+
+#ifndef STBI_NO_HDR
+static int stbi__hdr_test(stbi__context *s);
+static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
+static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp);
+#endif
+
+#ifndef STBI_NO_PIC
+static int stbi__pic_test(stbi__context *s);
+static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
+static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp);
+#endif
+
+#ifndef STBI_NO_GIF
+static int stbi__gif_test(stbi__context *s);
+static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
+static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp);
+static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp);
+#endif
+
+#ifndef STBI_NO_PNM
+static int stbi__pnm_test(stbi__context *s);
+static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
+static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp);
+#endif
+
+// stb_image uses ints pervasively, including for offset calculations.
+// therefore the largest decoded image size we can support with the
+// current code, even on 64-bit targets, is INT_MAX. this is not a
+// significant limitation for the intended use case.
+//
+// we do, however, need to make sure our size calculations don't
+// overflow. hence a few helper functions for size calculations that
+// multiply integers together, making sure that they're non-negative
+// and no overflow occurs.
+
+// return 1 if the sum is valid, 0 on overflow.
+// negative terms are considered invalid.
+static int stbi__addsizes_valid(int a, int b)
+{
+ if (b < 0) return 0;
+ // now 0 <= b <= INT_MAX, hence also
+ // 0 <= INT_MAX - b <= INTMAX.
+ // And "a + b <= INT_MAX" (which might overflow) is the
+ // same as a <= INT_MAX - b (no overflow)
+ return a <= INT_MAX - b;
+}
+
+// returns 1 if the product is valid, 0 on overflow.
+// negative factors are considered invalid.
+static int stbi__mul2sizes_valid(int a, int b)
+{
+ if (a < 0 || b < 0) return 0;
+ if (b == 0) return 1; // mul-by-0 is always safe
+ // portable way to check for no overflows in a*b
+ return a <= INT_MAX/b;
+}
+
+// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow
+static int stbi__mad2sizes_valid(int a, int b, int add)
+{
+ return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add);
+}
+
+// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow
+static int stbi__mad3sizes_valid(int a, int b, int c, int add)
+{
+ return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) &&
+ stbi__addsizes_valid(a*b*c, add);
+}
+
+// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow
+#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR)
+static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add)
+{
+ return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) &&
+ stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add);
+}
+#endif
+
+// mallocs with size overflow checking
+static void *stbi__malloc_mad2(int a, int b, int add)
+{
+ if (!stbi__mad2sizes_valid(a, b, add)) return NULL;
+ return stbi__malloc(a*b + add);
+}
+
+static void *stbi__malloc_mad3(int a, int b, int c, int add)
+{
+ if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL;
+ return stbi__malloc(a*b*c + add);
+}
+
+#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR)
+static void *stbi__malloc_mad4(int a, int b, int c, int d, int add)
+{
+ if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL;
+ return stbi__malloc(a*b*c*d + add);
+}
+#endif
+
+// stbi__err - error
+// stbi__errpf - error returning pointer to float
+// stbi__errpuc - error returning pointer to unsigned char
+
+#ifdef STBI_NO_FAILURE_STRINGS
+ #define stbi__err(x,y) 0
+#elif defined(STBI_FAILURE_USERMSG)
+ #define stbi__err(x,y) stbi__err(y)
+#else
+ #define stbi__err(x,y) stbi__err(x)
+#endif
+
+#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL))
+#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL))
+
+STBIDEF void stbi_image_free(void *retval_from_stbi_load)
+{
+ STBI_FREE(retval_from_stbi_load);
+}
+
+#ifndef STBI_NO_LINEAR
+static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp);
+#endif
+
+#ifndef STBI_NO_HDR
+static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp);
+#endif
+
+static int stbi__vertically_flip_on_load = 0;
+
+STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip)
+{
+ stbi__vertically_flip_on_load = flag_true_if_should_flip;
+}
+
+static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int /*bpc*/)
+{
+ memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields
+ ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed
+ ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order
+ ri->num_channels = 0;
+
+ #ifndef STBI_NO_JPEG
+ if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri);
+ #endif
+ #ifndef STBI_NO_PNG
+ if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri);
+ #endif
+ #ifndef STBI_NO_BMP
+ if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri);
+ #endif
+ #ifndef STBI_NO_GIF
+ if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri);
+ #endif
+ #ifndef STBI_NO_PSD
+ if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc);
+ #endif
+ #ifndef STBI_NO_PIC
+ if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri);
+ #endif
+ #ifndef STBI_NO_PNM
+ if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri);
+ #endif
+
+ #ifndef STBI_NO_HDR
+ if (stbi__hdr_test(s)) {
+ float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri);
+ return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp);
+ }
+ #endif
+
+ #ifndef STBI_NO_TGA
+ // test tga last because it's a crappy test!
+ if (stbi__tga_test(s))
+ return stbi__tga_load(s,x,y,comp,req_comp, ri);
+ #endif
+
+ return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt");
+}
+
+static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels)
+{
+ int i;
+ int img_len = w * h * channels;
+ stbi_uc *reduced;
+
+ reduced = (stbi_uc *) stbi__malloc(img_len);
+ if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory");
+
+ for (i = 0; i < img_len; ++i)
+ reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling
+
+ STBI_FREE(orig);
+ return reduced;
+}
+
+static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels)
+{
+ int i;
+ int img_len = w * h * channels;
+ stbi__uint16 *enlarged;
+
+ enlarged = (stbi__uint16 *) stbi__malloc(img_len*2);
+ if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory");
+
+ for (i = 0; i < img_len; ++i)
+ enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff
+
+ STBI_FREE(orig);
+ return enlarged;
+}
+
+static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel)
+{
+ int row;
+ size_t bytes_per_row = (size_t)w * bytes_per_pixel;
+ stbi_uc temp[2048];
+ stbi_uc *bytes = (stbi_uc *)image;
+
+ for (row = 0; row < (h>>1); row++) {
+ stbi_uc *row0 = bytes + row*bytes_per_row;
+ stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row;
+ // swap row0 with row1
+ size_t bytes_left = bytes_per_row;
+ while (bytes_left) {
+ size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp);
+ memcpy(temp, row0, bytes_copy);
+ memcpy(row0, row1, bytes_copy);
+ memcpy(row1, temp, bytes_copy);
+ row0 += bytes_copy;
+ row1 += bytes_copy;
+ bytes_left -= bytes_copy;
+ }
+ }
+}
+
+#ifndef STBI_NO_GIF
+static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel)
+{
+ int slice;
+ int slice_size = w * h * bytes_per_pixel;
+
+ stbi_uc *bytes = (stbi_uc *)image;
+ for (slice = 0; slice < z; ++slice) {
+ stbi__vertical_flip(bytes, w, h, bytes_per_pixel);
+ bytes += slice_size;
+ }
+}
+#endif
+
+static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp)
+{
+ stbi__result_info ri;
+ void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8);
+
+ if (result == NULL)
+ return NULL;
+
+ if (ri.bits_per_channel != 8) {
+ STBI_ASSERT(ri.bits_per_channel == 16);
+ result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp);
+ ri.bits_per_channel = 8;
+ }
+
+ // @TODO: move stbi__convert_format to here
+
+ if (stbi__vertically_flip_on_load) {
+ int channels = req_comp ? req_comp : *comp;
+ stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc));
+ }
+
+ return (unsigned char *) result;
+}
+
+static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp)
+{
+ stbi__result_info ri;
+ void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16);
+
+ if (result == NULL)
+ return NULL;
+
+ if (ri.bits_per_channel != 16) {
+ STBI_ASSERT(ri.bits_per_channel == 8);
+ result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp);
+ ri.bits_per_channel = 16;
+ }
+
+ // @TODO: move stbi__convert_format16 to here
+ // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision
+
+ if (stbi__vertically_flip_on_load) {
+ int channels = req_comp ? req_comp : *comp;
+ stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16));
+ }
+
+ return (stbi__uint16 *) result;
+}
+
+#if !defined(STBI_NO_HDR) || !defined(STBI_NO_LINEAR)
+static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp)
+{
+ if (stbi__vertically_flip_on_load && result != NULL) {
+ int channels = req_comp ? req_comp : *comp;
+ stbi__vertical_flip(result, *x, *y, channels * sizeof(float));
+ }
+}
+#endif
+
+#ifndef STBI_NO_STDIO
+
+static FILE *stbi__fopen(char const *filename, char const *mode)
+{
+ FILE *f;
+#if defined(_MSC_VER)
+ DWORD cch =
+ MultiByteToWideChar(CP_UTF8, 0, filename, -1, nullptr, 0);
+ wchar_t *filenameU = new wchar_t[cch];
+ if(!filenameU)
+ {
+ return 0;
+ }
+ MultiByteToWideChar(CP_UTF8, 0, filename, -1, filenameU, cch);
+ cch = MultiByteToWideChar(CP_UTF8, 0, mode, -1, nullptr, 0);
+ wchar_t *modeU = new wchar_t[cch];
+ if(!modeU)
+ {
+ delete[] filenameU;
+ return 0;
+ }
+ MultiByteToWideChar(CP_UTF8, 0, mode, -1, modeU, cch);
+#if _MSC_VER >= 1400
+ if(0 != _wfopen_s(&f, filenameU, modeU))
+ f = 0;
+ delete[] filenameU;
+ delete[] modeU;
+#else // _MSC_VER >= 1400
+ f = _wfopen(filenameU, modeU);
+ delete[] filenameU;
+ delete[] modeU;
+#endif // _MSC_VER >= 1400
+#else // _MSC_VER
+ f = fopen(filename, mode);
+#endif //_MSC_VER
+ return f;
+}
+
+
+STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp)
+{
+ FILE *f = stbi__fopen(filename, "rb");
+ unsigned char *result;
+ if (!f) return stbi__errpuc("can't fopen", "Unable to open file");
+ result = stbi_load_from_file(f,x,y,comp,req_comp);
+ fclose(f);
+ return result;
+}
+
+STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp)
+{
+ unsigned char *result;
+ stbi__context s;
+ stbi__start_file(&s,f);
+ result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp);
+ if (result) {
+ // need to 'unget' all the characters in the IO buffer
+ if (fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR) == -1) {
+ stbi_image_free(result);
+ return stbi__errpuc("fseek() error", "File Seek Fail");
+ }
+ }
+ return result;
+}
+
+STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp)
+{
+ stbi__uint16 *result;
+ stbi__context s;
+ stbi__start_file(&s,f);
+ result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp);
+ if (result) {
+ // need to 'unget' all the characters in the IO buffer
+ if (fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR) == -1) {
+ stbi_image_free(result);
+ return (stbi__uint16 *) stbi__errpuc("fseek() error", "File Seek Fail");
+ }
+ }
+ return result;
+}
+
+STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp)
+{
+ FILE *f = stbi__fopen(filename, "rb");
+ stbi__uint16 *result;
+ if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file");
+ result = stbi_load_from_file_16(f,x,y,comp,req_comp);
+ fclose(f);
+ return result;
+}
+
+
+#endif //!STBI_NO_STDIO
+
+STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels)
+{
+ stbi__context s;
+ stbi__start_mem(&s,buffer,len);
+ return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels);
+}
+
+STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels)
+{
+ stbi__context s;
+ stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user);
+ return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels);
+}
+
+STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp)
+{
+ stbi__context s;
+ stbi__start_mem(&s,buffer,len);
+ return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp);
+}
+
+STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp)
+{
+ stbi__context s;
+ stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user);
+ return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp);
+}
+
+#ifndef STBI_NO_GIF
+STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp)
+{
+ unsigned char *result;
+ stbi__context s;
+ stbi__start_mem(&s,buffer,len);
+
+ result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp);
+ if (stbi__vertically_flip_on_load) {
+ stbi__vertical_flip_slices( result, *x, *y, *z, *comp );
+ }
+
+ return result;
+}
+#endif
+
+#ifndef STBI_NO_LINEAR
+static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp)
+{
+ unsigned char *data;
+ #ifndef STBI_NO_HDR
+ if (stbi__hdr_test(s)) {
+ stbi__result_info ri;
+ float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri);
+ if (hdr_data)
+ stbi__float_postprocess(hdr_data,x,y,comp,req_comp);
+ return hdr_data;
+ }
+ #endif
+ data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp);
+ if (data)
+ return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp);
+ return stbi__errpf("unknown image type", "Image not of any known type, or corrupt");
+}
+
+STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp)
+{
+ stbi__context s;
+ stbi__start_mem(&s,buffer,len);
+ return stbi__loadf_main(&s,x,y,comp,req_comp);
+}
+
+STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp)
+{
+ stbi__context s;
+ stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user);
+ return stbi__loadf_main(&s,x,y,comp,req_comp);
+}
+
+#ifndef STBI_NO_STDIO
+STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp)
+{
+ float *result;
+ FILE *f = stbi__fopen(filename, "rb");
+ if (!f) return stbi__errpf("can't fopen", "Unable to open file");
+ result = stbi_loadf_from_file(f,x,y,comp,req_comp);
+ fclose(f);
+ return result;
+}
+
+STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp)
+{
+ stbi__context s;
+ stbi__start_file(&s,f);
+ return stbi__loadf_main(&s,x,y,comp,req_comp);
+}
+#endif // !STBI_NO_STDIO
+
+#endif // !STBI_NO_LINEAR
+
+// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is
+// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always
+// reports false!
+
+STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len)
+{
+ #ifndef STBI_NO_HDR
+ stbi__context s;
+ stbi__start_mem(&s,buffer,len);
+ return stbi__hdr_test(&s);
+ #else
+ STBI_NOTUSED(buffer);
+ STBI_NOTUSED(len);
+ return 0;
+ #endif
+}
+
+#ifndef STBI_NO_STDIO
+STBIDEF int stbi_is_hdr (char const *filename)
+{
+ FILE *f = stbi__fopen(filename, "rb");
+ int result=0;
+ if (f) {
+ result = stbi_is_hdr_from_file(f);
+ fclose(f);
+ }
+ return result;
+}
+
+STBIDEF int stbi_is_hdr_from_file(FILE *f)
+{
+ #ifndef STBI_NO_HDR
+ long pos = ftell(f);
+ int res;
+ stbi__context s;
+ stbi__start_file(&s,f);
+ res = stbi__hdr_test(&s);
+ if (fseek(f, pos, SEEK_SET) == -1) return stbi__err("fseek() error", "File Seek Fail");
+ return res;
+ #else
+ STBI_NOTUSED(f);
+ return 0;
+ #endif
+}
+#endif // !STBI_NO_STDIO
+
+STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user)
+{
+ #ifndef STBI_NO_HDR
+ stbi__context s;
+ stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user);
+ return stbi__hdr_test(&s);
+ #else
+ STBI_NOTUSED(clbk);
+ STBI_NOTUSED(user);
+ return 0;
+ #endif
+}
+
+#ifndef STBI_NO_LINEAR
+static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f;
+
+STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; }
+STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; }
+#endif
+
+static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f;
+
+STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; }
+STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; }
+
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Common code used by all image loaders
+//
+
+enum
+{
+ STBI__SCAN_load=0,
+ STBI__SCAN_type,
+ STBI__SCAN_header
+};
+
+static void stbi__refill_buffer(stbi__context *s)
+{
+ int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen);
+ if (n == 0) {
+ // at end of file, treat same as if from memory, but need to handle case
+ // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file
+ s->read_from_callbacks = 0;
+ s->img_buffer = s->buffer_start;
+ s->img_buffer_end = s->buffer_start+1;
+ *s->img_buffer = 0;
+ } else {
+ s->img_buffer = s->buffer_start;
+ s->img_buffer_end = s->buffer_start + n;
+ }
+}
+
+stbi_inline static stbi_uc stbi__get8(stbi__context *s)
+{
+ if (s->img_buffer < s->img_buffer_end)
+ return *s->img_buffer++;
+ if (s->read_from_callbacks) {
+ stbi__refill_buffer(s);
+ return *s->img_buffer++;
+ }
+ return 0;
+}
+
+stbi_inline static int stbi__at_eof(stbi__context *s)
+{
+ if (s->io.read) {
+ if (!(s->io.eof)(s->io_user_data)) return 0;
+ // if feof() is true, check if buffer = end
+ // special case: we've only got the special 0 character at the end
+ if (s->read_from_callbacks == 0) return 1;
+ }
+
+ return s->img_buffer >= s->img_buffer_end;
+}
+
+static void stbi__skip(stbi__context *s, int n)
+{
+ if (n < 0) {
+ s->img_buffer = s->img_buffer_end;
+ return;
+ }
+ if (s->io.read) {
+ int blen = (int) (s->img_buffer_end - s->img_buffer);
+ if (blen < n) {
+ s->img_buffer = s->img_buffer_end;
+ (s->io.skip)(s->io_user_data, n - blen);
+ return;
+ }
+ }
+ s->img_buffer += n;
+}
+
+static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n)
+{
+ if (s->io.read) {
+ int blen = (int) (s->img_buffer_end - s->img_buffer);
+ if (blen < n) {
+ int res, count;
+
+ memcpy(buffer, s->img_buffer, blen);
+
+ count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen);
+ res = (count == (n-blen));
+ s->img_buffer = s->img_buffer_end;
+ return res;
+ }
+ }
+
+ if (s->img_buffer+n <= s->img_buffer_end) {
+ memcpy(buffer, s->img_buffer, n);
+ s->img_buffer += n;
+ return 1;
+ } else
+ return 0;
+}
+
+static int stbi__get16be(stbi__context *s)
+{
+ int z = stbi__get8(s);
+ return (z << 8) + stbi__get8(s);
+}
+
+static stbi__uint32 stbi__get32be(stbi__context *s)
+{
+ stbi__uint32 z = stbi__get16be(s);
+ return (z << 16) + stbi__get16be(s);
+}
+
+#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF)
+// nothing
+#else
+static int stbi__get16le(stbi__context *s)
+{
+ int z = stbi__get8(s);
+ return z + (stbi__get8(s) << 8);
+}
+#endif
+
+#ifndef STBI_NO_BMP
+static stbi__uint32 stbi__get32le(stbi__context *s)
+{
+ stbi__uint32 z = stbi__get16le(s);
+ return z + (stbi__get16le(s) << 16);
+}
+#endif
+
+#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings
+
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// generic converter from built-in img_n to req_comp
+// individual types do this automatically as much as possible (e.g. jpeg
+// does all cases internally since it needs to colorspace convert anyway,
+// and it never has alpha, so very few cases ). png can automatically
+// interleave an alpha=255 channel, but falls back to this for other cases
+//
+// assume data buffer is malloced, so malloc a new one and free that one
+// only failure mode is malloc failing
+
+static stbi_uc stbi__compute_y(int r, int g, int b)
+{
+ return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8);
+}
+
+static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y)
+{
+ int i,j;
+ unsigned char *good;
+
+ if (req_comp == img_n) return data;
+ STBI_ASSERT(req_comp >= 1 && req_comp <= 4);
+
+ good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0);
+ if (good == NULL) {
+ STBI_FREE(data);
+ return stbi__errpuc("outofmem", "Out of memory");
+ }
+
+ for (j=0; j < (int) y; ++j) {
+ unsigned char *src = data + j * x * img_n ;
+ unsigned char *dest = good + j * x * req_comp;
+
+ #define STBI__COMBO(a,b) ((a)*8+(b))
+ #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b)
+ // convert source image with img_n components to one with req_comp components;
+ // avoid switch per pixel, so use switch per scanline and massive macros
+ switch (STBI__COMBO(img_n, req_comp)) {
+ STBI__CASE(1,2) { dest[0]=src[0], dest[1]=255; } break;
+ STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break;
+ STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=255; } break;
+ STBI__CASE(2,1) { dest[0]=src[0]; } break;
+ STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break;
+ STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=src[1]; } break;
+ STBI__CASE(3,4) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2],dest[3]=255; } break;
+ STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break;
+ STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = 255; } break;
+ STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break;
+ STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = src[3]; } break;
+ STBI__CASE(4,3) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2]; } break;
+ default: STBI_ASSERT(0);
+ }
+ #undef STBI__CASE
+ }
+
+ STBI_FREE(data);
+ return good;
+}
+
+static stbi__uint16 stbi__compute_y_16(int r, int g, int b)
+{
+ return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8);
+}
+
+static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y)
+{
+ int i,j;
+ stbi__uint16 *good;
+
+ if (req_comp == img_n) return data;
+ STBI_ASSERT(req_comp >= 1 && req_comp <= 4);
+
+ good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2);
+ if (good == NULL) {
+ STBI_FREE(data);
+ return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory");
+ }
+
+ for (j=0; j < (int) y; ++j) {
+ stbi__uint16 *src = data + j * x * img_n ;
+ stbi__uint16 *dest = good + j * x * req_comp;
+
+ #define STBI__COMBO(a,b) ((a)*8+(b))
+ #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b)
+ // convert source image with img_n components to one with req_comp components;
+ // avoid switch per pixel, so use switch per scanline and massive macros
+ switch (STBI__COMBO(img_n, req_comp)) {
+ STBI__CASE(1,2) { dest[0]=src[0], dest[1]=0xffff; } break;
+ STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break;
+ STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=0xffff; } break;
+ STBI__CASE(2,1) { dest[0]=src[0]; } break;
+ STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break;
+ STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=src[1]; } break;
+ STBI__CASE(3,4) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2],dest[3]=0xffff; } break;
+ STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break;
+ STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]), dest[1] = 0xffff; } break;
+ STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break;
+ STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]), dest[1] = src[3]; } break;
+ STBI__CASE(4,3) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2]; } break;
+ default: STBI_ASSERT(0);
+ }
+ #undef STBI__CASE
+ }
+
+ STBI_FREE(data);
+ return good;
+}
+
+#ifndef STBI_NO_LINEAR
+static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp)
+{
+ int i,k,n;
+ float *output;
+ if (!data) return NULL;
+ output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0);
+ if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); }
+ // compute number of non-alpha components
+ if (comp & 1) n = comp; else n = comp-1;
+ for (i=0; i < x*y; ++i) {
+ for (k=0; k < n; ++k) {
+ output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale);
+ }
+ if (k < comp) output[i*comp + k] = data[i*comp+k]/255.0f;
+ }
+ STBI_FREE(data);
+ return output;
+}
+#endif
+
+#ifndef STBI_NO_HDR
+#define stbi__float2int(x) ((int) (x))
+static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp)
+{
+ int i,k,n;
+ stbi_uc *output;
+ if (!data) return NULL;
+ output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0);
+ if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); }
+ // compute number of non-alpha components
+ if (comp & 1) n = comp; else n = comp-1;
+ for (i=0; i < x*y; ++i) {
+ for (k=0; k < n; ++k) {
+ float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f;
+ if (z < 0) z = 0;
+ if (z > 255) z = 255;
+ output[i*comp + k] = (stbi_uc) stbi__float2int(z);
+ }
+ if (k < comp) {
+ float z = data[i*comp+k] * 255 + 0.5f;
+ if (z < 0) z = 0;
+ if (z > 255) z = 255;
+ output[i*comp + k] = (stbi_uc) stbi__float2int(z);
+ }
+ }
+ STBI_FREE(data);
+ return output;
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// "baseline" JPEG/JFIF decoder
+//
+// simple implementation
+// - doesn't support delayed output of y-dimension
+// - simple interface (only one output format: 8-bit interleaved RGB)
+// - doesn't try to recover corrupt jpegs
+// - doesn't allow partial loading, loading multiple at once
+// - still fast on x86 (copying globals into locals doesn't help x86)
+// - allocates lots of intermediate memory (full size of all components)
+// - non-interleaved case requires this anyway
+// - allows good upsampling (see next)
+// high-quality
+// - upsampled channels are bilinearly interpolated, even across blocks
+// - quality integer IDCT derived from IJG's 'slow'
+// performance
+// - fast huffman; reasonable integer IDCT
+// - some SIMD kernels for common paths on targets with SSE2/NEON
+// - uses a lot of intermediate memory, could cache poorly
+
+#ifndef STBI_NO_JPEG
+
+// huffman decoding acceleration
+#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache
+
+typedef struct
+{
+ stbi_uc fast[1 << FAST_BITS];
+ // weirdly, repacking this into AoS is a 10% speed loss, instead of a win
+ stbi__uint16 code[256];
+ stbi_uc values[256];
+ stbi_uc size[257];
+ unsigned int maxcode[18];
+ int delta[17]; // old 'firstsymbol' - old 'firstcode'
+} stbi__huffman;
+
+typedef struct
+{
+ stbi__context *s;
+ stbi__huffman huff_dc[4];
+ stbi__huffman huff_ac[4];
+ stbi__uint16 dequant[4][64];
+ stbi__int16 fast_ac[4][1 << FAST_BITS];
+
+// sizes for components, interleaved MCUs
+ int img_h_max, img_v_max;
+ int img_mcu_x, img_mcu_y;
+ int img_mcu_w, img_mcu_h;
+
+// definition of jpeg image component
+ struct
+ {
+ int id;
+ int h,v;
+ int tq;
+ int hd,ha;
+ int dc_pred;
+
+ int x,y,w2,h2;
+ stbi_uc *data;
+ void *raw_data, *raw_coeff;
+ stbi_uc *linebuf;
+ short *coeff; // progressive only
+ int coeff_w, coeff_h; // number of 8x8 coefficient blocks
+ } img_comp[4];
+
+ stbi__uint32 code_buffer; // jpeg entropy-coded buffer
+ int code_bits; // number of valid bits
+ unsigned char marker; // marker seen while filling entropy buffer
+ int nomore; // flag if we saw a marker so must stop
+
+ int progressive;
+ int spec_start;
+ int spec_end;
+ int succ_high;
+ int succ_low;
+ int eob_run;
+ int jfif;
+ int app14_color_transform; // Adobe APP14 tag
+ int rgb;
+
+ int scan_n, order[4];
+ int restart_interval, todo;
+
+// kernels
+ void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]);
+ void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step);
+ stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs);
+} stbi__jpeg;
+
+static int stbi__build_huffman(stbi__huffman *h, int *count)
+{
+ int i,j,k=0;
+ unsigned int code;
+ // build size list for each symbol (from JPEG spec)
+ for (i=0; i < 16; ++i)
+ for (j=0; j < count[i]; ++j)
+ h->size[k++] = (stbi_uc) (i+1);
+ h->size[k] = 0;
+
+ // compute actual symbols (from jpeg spec)
+ code = 0;
+ k = 0;
+ for(j=1; j <= 16; ++j) {
+ // compute delta to add to code to compute symbol id
+ h->delta[j] = k - code;
+ if (h->size[k] == j) {
+ while (h->size[k] == j)
+ h->code[k++] = (stbi__uint16) (code++);
+ if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG");
+ }
+ // compute largest code + 1 for this size, preshifted as needed later
+ h->maxcode[j] = code << (16-j);
+ code <<= 1;
+ }
+ h->maxcode[j] = 0xffffffff;
+
+ // build non-spec acceleration table; 255 is flag for not-accelerated
+ memset(h->fast, 255, 1 << FAST_BITS);
+ for (i=0; i < k; ++i) {
+ int s = h->size[i];
+ if (s <= FAST_BITS) {
+ int c = h->code[i] << (FAST_BITS-s);
+ int m = 1 << (FAST_BITS-s);
+ for (j=0; j < m; ++j) {
+ h->fast[c+j] = (stbi_uc) i;
+ }
+ }
+ }
+ return 1;
+}
+
+// build a table that decodes both magnitude and value of small ACs in
+// one go.
+static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h)
+{
+ int i;
+ for (i=0; i < (1 << FAST_BITS); ++i) {
+ stbi_uc fast = h->fast[i];
+ fast_ac[i] = 0;
+ if (fast < 255) {
+ int rs = h->values[fast];
+ int run = (rs >> 4) & 15;
+ int magbits = rs & 15;
+ int len = h->size[fast];
+
+ if (magbits && len + magbits <= FAST_BITS) {
+ // magnitude code followed by receive_extend code
+ int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits);
+ int m = 1 << (magbits - 1);
+ if (k < m) k += (~0U << magbits) + 1;
+ // if the result is small enough, we can fit it in fast_ac table
+ if (k >= -128 && k <= 127)
+ fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits));
+ }
+ }
+ }
+}
+
+static void stbi__grow_buffer_unsafe(stbi__jpeg *j)
+{
+ do {
+ unsigned int b = j->nomore ? 0 : stbi__get8(j->s);
+ if (b == 0xff) {
+ int c = stbi__get8(j->s);
+ while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes
+ if (c != 0) {
+ j->marker = (unsigned char) c;
+ j->nomore = 1;
+ return;
+ }
+ }
+ j->code_buffer |= b << (24 - j->code_bits);
+ j->code_bits += 8;
+ } while (j->code_bits <= 24);
+}
+
+// (1 << n) - 1
+static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535};
+
+// decode a jpeg huffman value from the bitstream
+stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h)
+{
+ unsigned int temp;
+ int c,k;
+
+ if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
+
+ // look at the top FAST_BITS and determine what symbol ID it is,
+ // if the code is <= FAST_BITS
+ c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1);
+ k = h->fast[c];
+ if (k < 255) {
+ int s = h->size[k];
+ if (s > j->code_bits)
+ return -1;
+ j->code_buffer <<= s;
+ j->code_bits -= s;
+ return h->values[k];
+ }
+
+ // naive test is to shift the code_buffer down so k bits are
+ // valid, then test against maxcode. To speed this up, we've
+ // preshifted maxcode left so that it has (16-k) 0s at the
+ // end; in other words, regardless of the number of bits, it
+ // wants to be compared against something shifted to have 16;
+ // that way we don't need to shift inside the loop.
+ temp = j->code_buffer >> 16;
+ for (k=FAST_BITS+1 ; ; ++k)
+ if (temp < h->maxcode[k])
+ break;
+ if (k == 17) {
+ // error! code not found
+ j->code_bits -= 16;
+ return -1;
+ }
+
+ if (k > j->code_bits)
+ return -1;
+
+ // convert the huffman code to the symbol id
+ c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k];
+ STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]);
+
+ // convert the id to a symbol
+ j->code_bits -= k;
+ j->code_buffer <<= k;
+ return h->values[c];
+}
+
+// bias[n] = (-1<<n) + 1
+static const int stbi__jbias[16] = {0,-1,-3,-7,-15,-31,-63,-127,-255,-511,-1023,-2047,-4095,-8191,-16383,-32767};
+
+// combined JPEG 'receive' and JPEG 'extend', since baseline
+// always extends everything it receives.
+stbi_inline static int stbi__extend_receive(stbi__jpeg *j, int n)
+{
+ unsigned int k;
+ int sgn;
+ if (j->code_bits < n) stbi__grow_buffer_unsafe(j);
+
+ sgn = (stbi__int32)j->code_buffer >> 31; // sign bit is always in MSB
+ k = stbi_lrot(j->code_buffer, n);
+ STBI_ASSERT(n >= 0 && n < (int) (sizeof(stbi__bmask)/sizeof(*stbi__bmask)));
+ j->code_buffer = k & ~stbi__bmask[n];
+ k &= stbi__bmask[n];
+ j->code_bits -= n;
+ return k + (stbi__jbias[n] & ~sgn);
+}
+
+// get some unsigned bits
+stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n)
+{
+ unsigned int k;
+ if (j->code_bits < n) stbi__grow_buffer_unsafe(j);
+ k = stbi_lrot(j->code_buffer, n);
+ j->code_buffer = k & ~stbi__bmask[n];
+ k &= stbi__bmask[n];
+ j->code_bits -= n;
+ return k;
+}
+
+stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j)
+{
+ unsigned int k;
+ if (j->code_bits < 1) stbi__grow_buffer_unsafe(j);
+ k = j->code_buffer;
+ j->code_buffer <<= 1;
+ --j->code_bits;
+ return k & 0x80000000;
+}
+
+// given a value that's at position X in the zigzag stream,
+// where does it appear in the 8x8 matrix coded as row-major?
+static const stbi_uc stbi__jpeg_dezigzag[64+15] =
+{
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63,
+ // let corrupt input sample past end
+ 63, 63, 63, 63, 63, 63, 63, 63,
+ 63, 63, 63, 63, 63, 63, 63
+};
+
+// decode one 64-entry block--
+static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant)
+{
+ int diff,dc,k;
+ int t;
+
+ if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
+ t = stbi__jpeg_huff_decode(j, hdc);
+ if (t < 0) return stbi__err("bad huffman code","Corrupt JPEG");
+
+ // 0 all the ac values now so we can do it 32-bits at a time
+ memset(data,0,64*sizeof(data[0]));
+
+ diff = t ? stbi__extend_receive(j, t) : 0;
+ dc = j->img_comp[b].dc_pred + diff;
+ j->img_comp[b].dc_pred = dc;
+ data[0] = (short) (dc * dequant[0]);
+
+ // decode AC components, see JPEG spec
+ k = 1;
+ do {
+ unsigned int zig;
+ int c,r,s;
+ if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
+ c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1);
+ r = fac[c];
+ if (r) { // fast-AC path
+ k += (r >> 4) & 15; // run
+ s = r & 15; // combined length
+ j->code_buffer <<= s;
+ j->code_bits -= s;
+ // decode into unzigzag'd location
+ zig = stbi__jpeg_dezigzag[k++];
+ data[zig] = (short) ((r >> 8) * dequant[zig]);
+ } else {
+ int rs = stbi__jpeg_huff_decode(j, hac);
+ if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG");
+ s = rs & 15;
+ r = rs >> 4;
+ if (s == 0) {
+ if (rs != 0xf0) break; // end block
+ k += 16;
+ } else {
+ k += r;
+ // decode into unzigzag'd location
+ zig = stbi__jpeg_dezigzag[k++];
+ data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]);
+ }
+ }
+ } while (k < 64);
+ return 1;
+}
+
+static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b)
+{
+ int diff,dc;
+ int t;
+ if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG");
+
+ if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
+
+ if (j->succ_high == 0) {
+ // first scan for DC coefficient, must be first
+ memset(data,0,64*sizeof(data[0])); // 0 all the ac values now
+ t = stbi__jpeg_huff_decode(j, hdc);
+ diff = t ? stbi__extend_receive(j, t) : 0;
+
+ dc = j->img_comp[b].dc_pred + diff;
+ j->img_comp[b].dc_pred = dc;
+ data[0] = (short) (dc << j->succ_low);
+ } else {
+ // refinement scan for DC coefficient
+ if (stbi__jpeg_get_bit(j))
+ data[0] += (short) (1 << j->succ_low);
+ }
+ return 1;
+}
+
+// @OPTIMIZE: store non-zigzagged during the decode passes,
+// and only de-zigzag when dequantizing
+static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac)
+{
+ int k;
+ if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG");
+
+ if (j->succ_high == 0) {
+ int shift = j->succ_low;
+
+ if (j->eob_run) {
+ --j->eob_run;
+ return 1;
+ }
+
+ k = j->spec_start;
+ do {
+ unsigned int zig;
+ int c,r,s;
+ if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
+ c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1);
+ r = fac[c];
+ if (r) { // fast-AC path
+ k += (r >> 4) & 15; // run
+ s = r & 15; // combined length
+ j->code_buffer <<= s;
+ j->code_bits -= s;
+ zig = stbi__jpeg_dezigzag[k++];
+ data[zig] = (short) ((r >> 8) << shift);
+ } else {
+ int rs = stbi__jpeg_huff_decode(j, hac);
+ if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG");
+ s = rs & 15;
+ r = rs >> 4;
+ if (s == 0) {
+ if (r < 15) {
+ j->eob_run = (1 << r);
+ if (r)
+ j->eob_run += stbi__jpeg_get_bits(j, r);
+ --j->eob_run;
+ break;
+ }
+ k += 16;
+ } else {
+ k += r;
+ zig = stbi__jpeg_dezigzag[k++];
+ data[zig] = (short) (stbi__extend_receive(j,s) << shift);
+ }
+ }
+ } while (k <= j->spec_end);
+ } else {
+ // refinement scan for these AC coefficients
+
+ short bit = (short) (1 << j->succ_low);
+
+ if (j->eob_run) {
+ --j->eob_run;
+ for (k = j->spec_start; k <= j->spec_end; ++k) {
+ short *p = &data[stbi__jpeg_dezigzag[k]];
+ if (*p != 0)
+ if (stbi__jpeg_get_bit(j))
+ if ((*p & bit)==0) {
+ if (*p > 0)
+ *p += bit;
+ else
+ *p -= bit;
+ }
+ }
+ } else {
+ k = j->spec_start;
+ do {
+ int r,s;
+ int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh
+ if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG");
+ s = rs & 15;
+ r = rs >> 4;
+ if (s == 0) {
+ if (r < 15) {
+ j->eob_run = (1 << r) - 1;
+ if (r)
+ j->eob_run += stbi__jpeg_get_bits(j, r);
+ r = 64; // force end of block
+ } else {
+ // r=15 s=0 should write 16 0s, so we just do
+ // a run of 15 0s and then write s (which is 0),
+ // so we don't have to do anything special here
+ }
+ } else {
+ if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG");
+ // sign bit
+ if (stbi__jpeg_get_bit(j))
+ s = bit;
+ else
+ s = -bit;
+ }
+
+ // advance by r
+ while (k <= j->spec_end) {
+ short *p = &data[stbi__jpeg_dezigzag[k++]];
+ if (*p != 0) {
+ if (stbi__jpeg_get_bit(j))
+ if ((*p & bit)==0) {
+ if (*p > 0)
+ *p += bit;
+ else
+ *p -= bit;
+ }
+ } else {
+ if (r == 0) {
+ *p = (short) s;
+ break;
+ }
+ --r;
+ }
+ }
+ } while (k <= j->spec_end);
+ }
+ }
+ return 1;
+}
+
+// take a -128..127 value and stbi__clamp it and convert to 0..255
+stbi_inline static stbi_uc stbi__clamp(int x)
+{
+ // trick to use a single test to catch both cases
+ if ((unsigned int) x > 255) {
+ if (x < 0) return 0;
+ if (x > 255) return 255;
+ }
+ return (stbi_uc) x;
+}
+
+#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5)))
+#define stbi__fsh(x) ((x) * 4096)
+
+// derived from jidctint -- DCT_ISLOW
+#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \
+ int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \
+ p2 = s2; \
+ p3 = s6; \
+ p1 = (p2+p3) * stbi__f2f(0.5411961f); \
+ t2 = p1 + p3*stbi__f2f(-1.847759065f); \
+ t3 = p1 + p2*stbi__f2f( 0.765366865f); \
+ p2 = s0; \
+ p3 = s4; \
+ t0 = stbi__fsh(p2+p3); \
+ t1 = stbi__fsh(p2-p3); \
+ x0 = t0+t3; \
+ x3 = t0-t3; \
+ x1 = t1+t2; \
+ x2 = t1-t2; \
+ t0 = s7; \
+ t1 = s5; \
+ t2 = s3; \
+ t3 = s1; \
+ p3 = t0+t2; \
+ p4 = t1+t3; \
+ p1 = t0+t3; \
+ p2 = t1+t2; \
+ p5 = (p3+p4)*stbi__f2f( 1.175875602f); \
+ t0 = t0*stbi__f2f( 0.298631336f); \
+ t1 = t1*stbi__f2f( 2.053119869f); \
+ t2 = t2*stbi__f2f( 3.072711026f); \
+ t3 = t3*stbi__f2f( 1.501321110f); \
+ p1 = p5 + p1*stbi__f2f(-0.899976223f); \
+ p2 = p5 + p2*stbi__f2f(-2.562915447f); \
+ p3 = p3*stbi__f2f(-1.961570560f); \
+ p4 = p4*stbi__f2f(-0.390180644f); \
+ t3 += p1+p4; \
+ t2 += p2+p3; \
+ t1 += p2+p4; \
+ t0 += p1+p3;
+
+static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64])
+{
+ int i,val[64],*v=val;
+ stbi_uc *o;
+ short *d = data;
+
+ // columns
+ for (i=0; i < 8; ++i,++d, ++v) {
+ // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing
+ if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0
+ && d[40]==0 && d[48]==0 && d[56]==0) {
+ // no shortcut 0 seconds
+ // (1|2|3|4|5|6|7)==0 0 seconds
+ // all separate -0.047 seconds
+ // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds
+ int dcterm = d[0]*4;
+ v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm;
+ } else {
+ STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56])
+ // constants scaled things up by 1<<12; let's bring them back
+ // down, but keep 2 extra bits of precision
+ x0 += 512; x1 += 512; x2 += 512; x3 += 512;
+ v[ 0] = (x0+t3) >> 10;
+ v[56] = (x0-t3) >> 10;
+ v[ 8] = (x1+t2) >> 10;
+ v[48] = (x1-t2) >> 10;
+ v[16] = (x2+t1) >> 10;
+ v[40] = (x2-t1) >> 10;
+ v[24] = (x3+t0) >> 10;
+ v[32] = (x3-t0) >> 10;
+ }
+ }
+
+ for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) {
+ // no fast case since the first 1D IDCT spread components out
+ STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7])
+ // constants scaled things up by 1<<12, plus we had 1<<2 from first
+ // loop, plus horizontal and vertical each scale by sqrt(8) so together
+ // we've got an extra 1<<3, so 1<<17 total we need to remove.
+ // so we want to round that, which means adding 0.5 * 1<<17,
+ // aka 65536. Also, we'll end up with -128 to 127 that we want
+ // to encode as 0..255 by adding 128, so we'll add that before the shift
+ x0 += 65536 + (128<<17);
+ x1 += 65536 + (128<<17);
+ x2 += 65536 + (128<<17);
+ x3 += 65536 + (128<<17);
+ // tried computing the shifts into temps, or'ing the temps to see
+ // if any were out of range, but that was slower
+ o[0] = stbi__clamp((x0+t3) >> 17);
+ o[7] = stbi__clamp((x0-t3) >> 17);
+ o[1] = stbi__clamp((x1+t2) >> 17);
+ o[6] = stbi__clamp((x1-t2) >> 17);
+ o[2] = stbi__clamp((x2+t1) >> 17);
+ o[5] = stbi__clamp((x2-t1) >> 17);
+ o[3] = stbi__clamp((x3+t0) >> 17);
+ o[4] = stbi__clamp((x3-t0) >> 17);
+ }
+}
+
+#ifdef STBI_SSE2
+// sse2 integer IDCT. not the fastest possible implementation but it
+// produces bit-identical results to the generic C version so it's
+// fully "transparent".
+static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64])
+{
+ // This is constructed to match our regular (generic) integer IDCT exactly.
+ __m128i row0, row1, row2, row3, row4, row5, row6, row7;
+ __m128i tmp;
+
+ // dot product constant: even elems=x, odd elems=y
+ #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y))
+
+ // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit)
+ // out(1) = c1[even]*x + c1[odd]*y
+ #define dct_rot(out0,out1, x,y,c0,c1) \
+ __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \
+ __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \
+ __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \
+ __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \
+ __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \
+ __m128i out1##_h = _mm_madd_epi16(c0##hi, c1)
+
+ // out = in << 12 (in 16-bit, out 32-bit)
+ #define dct_widen(out, in) \
+ __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \
+ __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4)
+
+ // wide add
+ #define dct_wadd(out, a, b) \
+ __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \
+ __m128i out##_h = _mm_add_epi32(a##_h, b##_h)
+
+ // wide sub
+ #define dct_wsub(out, a, b) \
+ __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \
+ __m128i out##_h = _mm_sub_epi32(a##_h, b##_h)
+
+ // butterfly a/b, add bias, then shift by "s" and pack
+ #define dct_bfly32o(out0, out1, a,b,bias,s) \
+ { \
+ __m128i abiased_l = _mm_add_epi32(a##_l, bias); \
+ __m128i abiased_h = _mm_add_epi32(a##_h, bias); \
+ dct_wadd(sum, abiased, b); \
+ dct_wsub(dif, abiased, b); \
+ out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \
+ out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \
+ }
+
+ // 8-bit interleave step (for transposes)
+ #define dct_interleave8(a, b) \
+ tmp = a; \
+ a = _mm_unpacklo_epi8(a, b); \
+ b = _mm_unpackhi_epi8(tmp, b)
+
+ // 16-bit interleave step (for transposes)
+ #define dct_interleave16(a, b) \
+ tmp = a; \
+ a = _mm_unpacklo_epi16(a, b); \
+ b = _mm_unpackhi_epi16(tmp, b)
+
+ #define dct_pass(bias,shift) \
+ { \
+ /* even part */ \
+ dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \
+ __m128i sum04 = _mm_add_epi16(row0, row4); \
+ __m128i dif04 = _mm_sub_epi16(row0, row4); \
+ dct_widen(t0e, sum04); \
+ dct_widen(t1e, dif04); \
+ dct_wadd(x0, t0e, t3e); \
+ dct_wsub(x3, t0e, t3e); \
+ dct_wadd(x1, t1e, t2e); \
+ dct_wsub(x2, t1e, t2e); \
+ /* odd part */ \
+ dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \
+ dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \
+ __m128i sum17 = _mm_add_epi16(row1, row7); \
+ __m128i sum35 = _mm_add_epi16(row3, row5); \
+ dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \
+ dct_wadd(x4, y0o, y4o); \
+ dct_wadd(x5, y1o, y5o); \
+ dct_wadd(x6, y2o, y5o); \
+ dct_wadd(x7, y3o, y4o); \
+ dct_bfly32o(row0,row7, x0,x7,bias,shift); \
+ dct_bfly32o(row1,row6, x1,x6,bias,shift); \
+ dct_bfly32o(row2,row5, x2,x5,bias,shift); \
+ dct_bfly32o(row3,row4, x3,x4,bias,shift); \
+ }
+
+ __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f));
+ __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f));
+ __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f));
+ __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f));
+ __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f));
+ __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f));
+ __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f));
+ __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f));
+
+ // rounding biases in column/row passes, see stbi__idct_block for explanation.
+ __m128i bias_0 = _mm_set1_epi32(512);
+ __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17));
+
+ // load
+ row0 = _mm_load_si128((const __m128i *) (data + 0*8));
+ row1 = _mm_load_si128((const __m128i *) (data + 1*8));
+ row2 = _mm_load_si128((const __m128i *) (data + 2*8));
+ row3 = _mm_load_si128((const __m128i *) (data + 3*8));
+ row4 = _mm_load_si128((const __m128i *) (data + 4*8));
+ row5 = _mm_load_si128((const __m128i *) (data + 5*8));
+ row6 = _mm_load_si128((const __m128i *) (data + 6*8));
+ row7 = _mm_load_si128((const __m128i *) (data + 7*8));
+
+ // column pass
+ dct_pass(bias_0, 10);
+
+ {
+ // 16bit 8x8 transpose pass 1
+ dct_interleave16(row0, row4);
+ dct_interleave16(row1, row5);
+ dct_interleave16(row2, row6);
+ dct_interleave16(row3, row7);
+
+ // transpose pass 2
+ dct_interleave16(row0, row2);
+ dct_interleave16(row1, row3);
+ dct_interleave16(row4, row6);
+ dct_interleave16(row5, row7);
+
+ // transpose pass 3
+ dct_interleave16(row0, row1);
+ dct_interleave16(row2, row3);
+ dct_interleave16(row4, row5);
+ dct_interleave16(row6, row7);
+ }
+
+ // row pass
+ dct_pass(bias_1, 17);
+
+ {
+ // pack
+ __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7
+ __m128i p1 = _mm_packus_epi16(row2, row3);
+ __m128i p2 = _mm_packus_epi16(row4, row5);
+ __m128i p3 = _mm_packus_epi16(row6, row7);
+
+ // 8bit 8x8 transpose pass 1
+ dct_interleave8(p0, p2); // a0e0a1e1...
+ dct_interleave8(p1, p3); // c0g0c1g1...
+
+ // transpose pass 2
+ dct_interleave8(p0, p1); // a0c0e0g0...
+ dct_interleave8(p2, p3); // b0d0f0h0...
+
+ // transpose pass 3
+ dct_interleave8(p0, p2); // a0b0c0d0...
+ dct_interleave8(p1, p3); // a4b4c4d4...
+
+ // store
+ _mm_storel_epi64((__m128i *) out, p0); out += out_stride;
+ _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride;
+ _mm_storel_epi64((__m128i *) out, p2); out += out_stride;
+ _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride;
+ _mm_storel_epi64((__m128i *) out, p1); out += out_stride;
+ _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride;
+ _mm_storel_epi64((__m128i *) out, p3); out += out_stride;
+ _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e));
+ }
+
+#undef dct_const
+#undef dct_rot
+#undef dct_widen
+#undef dct_wadd
+#undef dct_wsub
+#undef dct_bfly32o
+#undef dct_interleave8
+#undef dct_interleave16
+#undef dct_pass
+}
+
+#endif // STBI_SSE2
+
+#ifdef STBI_NEON
+
+// NEON integer IDCT. should produce bit-identical
+// results to the generic C version.
+static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64])
+{
+ int16x8_t row0, row1, row2, row3, row4, row5, row6, row7;
+
+ int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f));
+ int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f));
+ int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f));
+ int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f));
+ int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f));
+ int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f));
+ int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f));
+ int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f));
+ int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f));
+ int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f));
+ int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f));
+ int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f));
+
+#define dct_long_mul(out, inq, coeff) \
+ int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \
+ int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff)
+
+#define dct_long_mac(out, acc, inq, coeff) \
+ int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \
+ int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff)
+
+#define dct_widen(out, inq) \
+ int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \
+ int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12)
+
+// wide add
+#define dct_wadd(out, a, b) \
+ int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \
+ int32x4_t out##_h = vaddq_s32(a##_h, b##_h)
+
+// wide sub
+#define dct_wsub(out, a, b) \
+ int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \
+ int32x4_t out##_h = vsubq_s32(a##_h, b##_h)
+
+// butterfly a/b, then shift using "shiftop" by "s" and pack
+#define dct_bfly32o(out0,out1, a,b,shiftop,s) \
+ { \
+ dct_wadd(sum, a, b); \
+ dct_wsub(dif, a, b); \
+ out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \
+ out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \
+ }
+
+#define dct_pass(shiftop, shift) \
+ { \
+ /* even part */ \
+ int16x8_t sum26 = vaddq_s16(row2, row6); \
+ dct_long_mul(p1e, sum26, rot0_0); \
+ dct_long_mac(t2e, p1e, row6, rot0_1); \
+ dct_long_mac(t3e, p1e, row2, rot0_2); \
+ int16x8_t sum04 = vaddq_s16(row0, row4); \
+ int16x8_t dif04 = vsubq_s16(row0, row4); \
+ dct_widen(t0e, sum04); \
+ dct_widen(t1e, dif04); \
+ dct_wadd(x0, t0e, t3e); \
+ dct_wsub(x3, t0e, t3e); \
+ dct_wadd(x1, t1e, t2e); \
+ dct_wsub(x2, t1e, t2e); \
+ /* odd part */ \
+ int16x8_t sum15 = vaddq_s16(row1, row5); \
+ int16x8_t sum17 = vaddq_s16(row1, row7); \
+ int16x8_t sum35 = vaddq_s16(row3, row5); \
+ int16x8_t sum37 = vaddq_s16(row3, row7); \
+ int16x8_t sumodd = vaddq_s16(sum17, sum35); \
+ dct_long_mul(p5o, sumodd, rot1_0); \
+ dct_long_mac(p1o, p5o, sum17, rot1_1); \
+ dct_long_mac(p2o, p5o, sum35, rot1_2); \
+ dct_long_mul(p3o, sum37, rot2_0); \
+ dct_long_mul(p4o, sum15, rot2_1); \
+ dct_wadd(sump13o, p1o, p3o); \
+ dct_wadd(sump24o, p2o, p4o); \
+ dct_wadd(sump23o, p2o, p3o); \
+ dct_wadd(sump14o, p1o, p4o); \
+ dct_long_mac(x4, sump13o, row7, rot3_0); \
+ dct_long_mac(x5, sump24o, row5, rot3_1); \
+ dct_long_mac(x6, sump23o, row3, rot3_2); \
+ dct_long_mac(x7, sump14o, row1, rot3_3); \
+ dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \
+ dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \
+ dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \
+ dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \
+ }
+
+ // load
+ row0 = vld1q_s16(data + 0*8);
+ row1 = vld1q_s16(data + 1*8);
+ row2 = vld1q_s16(data + 2*8);
+ row3 = vld1q_s16(data + 3*8);
+ row4 = vld1q_s16(data + 4*8);
+ row5 = vld1q_s16(data + 5*8);
+ row6 = vld1q_s16(data + 6*8);
+ row7 = vld1q_s16(data + 7*8);
+
+ // add DC bias
+ row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0));
+
+ // column pass
+ dct_pass(vrshrn_n_s32, 10);
+
+ // 16bit 8x8 transpose
+ {
+// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively.
+// whether compilers actually get this is another story, sadly.
+#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; }
+#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); }
+#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); }
+
+ // pass 1
+ dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6
+ dct_trn16(row2, row3);
+ dct_trn16(row4, row5);
+ dct_trn16(row6, row7);
+
+ // pass 2
+ dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4
+ dct_trn32(row1, row3);
+ dct_trn32(row4, row6);
+ dct_trn32(row5, row7);
+
+ // pass 3
+ dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0
+ dct_trn64(row1, row5);
+ dct_trn64(row2, row6);
+ dct_trn64(row3, row7);
+
+#undef dct_trn16
+#undef dct_trn32
+#undef dct_trn64
+ }
+
+ // row pass
+ // vrshrn_n_s32 only supports shifts up to 16, we need
+ // 17. so do a non-rounding shift of 16 first then follow
+ // up with a rounding shift by 1.
+ dct_pass(vshrn_n_s32, 16);
+
+ {
+ // pack and round
+ uint8x8_t p0 = vqrshrun_n_s16(row0, 1);
+ uint8x8_t p1 = vqrshrun_n_s16(row1, 1);
+ uint8x8_t p2 = vqrshrun_n_s16(row2, 1);
+ uint8x8_t p3 = vqrshrun_n_s16(row3, 1);
+ uint8x8_t p4 = vqrshrun_n_s16(row4, 1);
+ uint8x8_t p5 = vqrshrun_n_s16(row5, 1);
+ uint8x8_t p6 = vqrshrun_n_s16(row6, 1);
+ uint8x8_t p7 = vqrshrun_n_s16(row7, 1);
+
+ // again, these can translate into one instruction, but often don't.
+#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; }
+#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); }
+#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); }
+
+ // sadly can't use interleaved stores here since we only write
+ // 8 bytes to each scan line!
+
+ // 8x8 8-bit transpose pass 1
+ dct_trn8_8(p0, p1);
+ dct_trn8_8(p2, p3);
+ dct_trn8_8(p4, p5);
+ dct_trn8_8(p6, p7);
+
+ // pass 2
+ dct_trn8_16(p0, p2);
+ dct_trn8_16(p1, p3);
+ dct_trn8_16(p4, p6);
+ dct_trn8_16(p5, p7);
+
+ // pass 3
+ dct_trn8_32(p0, p4);
+ dct_trn8_32(p1, p5);
+ dct_trn8_32(p2, p6);
+ dct_trn8_32(p3, p7);
+
+ // store
+ vst1_u8(out, p0); out += out_stride;
+ vst1_u8(out, p1); out += out_stride;
+ vst1_u8(out, p2); out += out_stride;
+ vst1_u8(out, p3); out += out_stride;
+ vst1_u8(out, p4); out += out_stride;
+ vst1_u8(out, p5); out += out_stride;
+ vst1_u8(out, p6); out += out_stride;
+ vst1_u8(out, p7);
+
+#undef dct_trn8_8
+#undef dct_trn8_16
+#undef dct_trn8_32
+ }
+
+#undef dct_long_mul
+#undef dct_long_mac
+#undef dct_widen
+#undef dct_wadd
+#undef dct_wsub
+#undef dct_bfly32o
+#undef dct_pass
+}
+
+#endif // STBI_NEON
+
+#define STBI__MARKER_none 0xff
+// if there's a pending marker from the entropy stream, return that
+// otherwise, fetch from the stream and get a marker. if there's no
+// marker, return 0xff, which is never a valid marker value
+static stbi_uc stbi__get_marker(stbi__jpeg *j)
+{
+ stbi_uc x;
+ if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; }
+ x = stbi__get8(j->s);
+ if (x != 0xff) return STBI__MARKER_none;
+ while (x == 0xff)
+ x = stbi__get8(j->s); // consume repeated 0xff fill bytes
+ return x;
+}
+
+// in each scan, we'll have scan_n components, and the order
+// of the components is specified by order[]
+#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7)
+
+// after a restart interval, stbi__jpeg_reset the entropy decoder and
+// the dc prediction
+static void stbi__jpeg_reset(stbi__jpeg *j)
+{
+ j->code_bits = 0;
+ j->code_buffer = 0;
+ j->nomore = 0;
+ j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0;
+ j->marker = STBI__MARKER_none;
+ j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff;
+ j->eob_run = 0;
+ // no more than 1<<31 MCUs if no restart_interal? that's plenty safe,
+ // since we don't even allow 1<<30 pixels
+}
+
+static int stbi__parse_entropy_coded_data(stbi__jpeg *z)
+{
+ stbi__jpeg_reset(z);
+ if (!z->progressive) {
+ if (z->scan_n == 1) {
+ int i,j;
+ STBI_SIMD_ALIGN(short, data[64]);
+ int n = z->order[0];
+ // non-interleaved data, we just need to process one block at a time,
+ // in trivial scanline order
+ // number of blocks to do just depends on how many actual "pixels" this
+ // component has, independent of interleaved MCU blocking and such
+ int w = (z->img_comp[n].x+7) >> 3;
+ int h = (z->img_comp[n].y+7) >> 3;
+ for (j=0; j < h; ++j) {
+ for (i=0; i < w; ++i) {
+ int ha = z->img_comp[n].ha;
+ if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0;
+ z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data);
+ // every data block is an MCU, so countdown the restart interval
+ if (--z->todo <= 0) {
+ if (z->code_bits < 24) stbi__grow_buffer_unsafe(z);
+ // if it's NOT a restart, then just bail, so we get corrupt data
+ // rather than no data
+ if (!STBI__RESTART(z->marker)) return 1;
+ stbi__jpeg_reset(z);
+ }
+ }
+ }
+ return 1;
+ } else { // interleaved
+ int i,j,k,x,y;
+ STBI_SIMD_ALIGN(short, data[64]);
+ for (j=0; j < z->img_mcu_y; ++j) {
+ for (i=0; i < z->img_mcu_x; ++i) {
+ // scan an interleaved mcu... process scan_n components in order
+ for (k=0; k < z->scan_n; ++k) {
+ int n = z->order[k];
+ // scan out an mcu's worth of this component; that's just determined
+ // by the basic H and V specified for the component
+ for (y=0; y < z->img_comp[n].v; ++y) {
+ for (x=0; x < z->img_comp[n].h; ++x) {
+ int x2 = (i*z->img_comp[n].h + x)*8;
+ int y2 = (j*z->img_comp[n].v + y)*8;
+ int ha = z->img_comp[n].ha;
+ if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0;
+ z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data);
+ }
+ }
+ }
+ // after all interleaved components, that's an interleaved MCU,
+ // so now count down the restart interval
+ if (--z->todo <= 0) {
+ if (z->code_bits < 24) stbi__grow_buffer_unsafe(z);
+ if (!STBI__RESTART(z->marker)) return 1;
+ stbi__jpeg_reset(z);
+ }
+ }
+ }
+ return 1;
+ }
+ } else {
+ if (z->scan_n == 1) {
+ int i,j;
+ int n = z->order[0];
+ // non-interleaved data, we just need to process one block at a time,
+ // in trivial scanline order
+ // number of blocks to do just depends on how many actual "pixels" this
+ // component has, independent of interleaved MCU blocking and such
+ int w = (z->img_comp[n].x+7) >> 3;
+ int h = (z->img_comp[n].y+7) >> 3;
+ for (j=0; j < h; ++j) {
+ for (i=0; i < w; ++i) {
+ short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w);
+ if (z->spec_start == 0) {
+ if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n))
+ return 0;
+ } else {
+ int ha = z->img_comp[n].ha;
+ if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha]))
+ return 0;
+ }
+ // every data block is an MCU, so countdown the restart interval
+ if (--z->todo <= 0) {
+ if (z->code_bits < 24) stbi__grow_buffer_unsafe(z);
+ if (!STBI__RESTART(z->marker)) return 1;
+ stbi__jpeg_reset(z);
+ }
+ }
+ }
+ return 1;
+ } else { // interleaved
+ int i,j,k,x,y;
+ for (j=0; j < z->img_mcu_y; ++j) {
+ for (i=0; i < z->img_mcu_x; ++i) {
+ // scan an interleaved mcu... process scan_n components in order
+ for (k=0; k < z->scan_n; ++k) {
+ int n = z->order[k];
+ // scan out an mcu's worth of this component; that's just determined
+ // by the basic H and V specified for the component
+ for (y=0; y < z->img_comp[n].v; ++y) {
+ for (x=0; x < z->img_comp[n].h; ++x) {
+ int x2 = (i*z->img_comp[n].h + x);
+ int y2 = (j*z->img_comp[n].v + y);
+ short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w);
+ if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n))
+ return 0;
+ }
+ }
+ }
+ // after all interleaved components, that's an interleaved MCU,
+ // so now count down the restart interval
+ if (--z->todo <= 0) {
+ if (z->code_bits < 24) stbi__grow_buffer_unsafe(z);
+ if (!STBI__RESTART(z->marker)) return 1;
+ stbi__jpeg_reset(z);
+ }
+ }
+ }
+ return 1;
+ }
+ }
+}
+
+static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant)
+{
+ int i;
+ for (i=0; i < 64; ++i)
+ data[i] *= dequant[i];
+}
+
+static void stbi__jpeg_finish(stbi__jpeg *z)
+{
+ if (z->progressive) {
+ // dequantize and idct the data
+ int i,j,n;
+ for (n=0; n < z->s->img_n; ++n) {
+ int w = (z->img_comp[n].x+7) >> 3;
+ int h = (z->img_comp[n].y+7) >> 3;
+ for (j=0; j < h; ++j) {
+ for (i=0; i < w; ++i) {
+ short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w);
+ stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]);
+ z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data);
+ }
+ }
+ }
+ }
+}
+
+static int stbi__process_marker(stbi__jpeg *z, int m)
+{
+ int L;
+ switch (m) {
+ case STBI__MARKER_none: // no marker found
+ return stbi__err("expected marker","Corrupt JPEG");
+
+ case 0xDD: // DRI - specify restart interval
+ if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG");
+ z->restart_interval = stbi__get16be(z->s);
+ return 1;
+
+ case 0xDB: // DQT - define quantization table
+ L = stbi__get16be(z->s)-2;
+ while (L > 0) {
+ int q = stbi__get8(z->s);
+ int p = q >> 4, sixteen = (p != 0);
+ int t = q & 15,i;
+ if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG");
+ if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG");
+
+ for (i=0; i < 64; ++i)
+ z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s));
+ L -= (sixteen ? 129 : 65);
+ }
+ return L==0;
+
+ case 0xC4: // DHT - define huffman table
+ L = stbi__get16be(z->s)-2;
+ while (L > 0) {
+ stbi_uc *v;
+ int sizes[16],i,n=0;
+ int q = stbi__get8(z->s);
+ int tc = q >> 4;
+ int th = q & 15;
+ if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG");
+ for (i=0; i < 16; ++i) {
+ sizes[i] = stbi__get8(z->s);
+ n += sizes[i];
+ }
+ L -= 17;
+ if (tc == 0) {
+ if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0;
+ v = z->huff_dc[th].values;
+ } else {
+ if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0;
+ v = z->huff_ac[th].values;
+ }
+ for (i=0; i < n; ++i)
+ v[i] = stbi__get8(z->s);
+ if (tc != 0)
+ stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th);
+ L -= n;
+ }
+ return L==0;
+ }
+
+ // check for comment block or APP blocks
+ if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) {
+ L = stbi__get16be(z->s);
+ if (L < 2) {
+ if (m == 0xFE)
+ return stbi__err("bad COM len","Corrupt JPEG");
+ else
+ return stbi__err("bad APP len","Corrupt JPEG");
+ }
+ L -= 2;
+
+ if (m == 0xE0 && L >= 5) { // JFIF APP0 segment
+ static const unsigned char tag[5] = {'J','F','I','F','\0'};
+ int ok = 1;
+ int i;
+ for (i=0; i < 5; ++i)
+ if (stbi__get8(z->s) != tag[i])
+ ok = 0;
+ L -= 5;
+ if (ok)
+ z->jfif = 1;
+ } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment
+ static const unsigned char tag[6] = {'A','d','o','b','e','\0'};
+ int ok = 1;
+ int i;
+ for (i=0; i < 6; ++i)
+ if (stbi__get8(z->s) != tag[i])
+ ok = 0;
+ L -= 6;
+ if (ok) {
+ stbi__get8(z->s); // version
+ stbi__get16be(z->s); // flags0
+ stbi__get16be(z->s); // flags1
+ z->app14_color_transform = stbi__get8(z->s); // color transform
+ L -= 6;
+ }
+ }
+
+ stbi__skip(z->s, L);
+ return 1;
+ }
+
+ return stbi__err("unknown marker","Corrupt JPEG");
+}
+
+// after we see SOS
+static int stbi__process_scan_header(stbi__jpeg *z)
+{
+ int i;
+ int Ls = stbi__get16be(z->s);
+ z->scan_n = stbi__get8(z->s);
+ if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG");
+ if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG");
+ for (i=0; i < z->scan_n; ++i) {
+ int id = stbi__get8(z->s), which;
+ int q = stbi__get8(z->s);
+ for (which = 0; which < z->s->img_n; ++which)
+ if (z->img_comp[which].id == id)
+ break;
+ if (which == z->s->img_n) return 0; // no match
+ z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG");
+ z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG");
+ z->order[i] = which;
+ }
+
+ {
+ int aa;
+ z->spec_start = stbi__get8(z->s);
+ z->spec_end = stbi__get8(z->s); // should be 63, but might be 0
+ aa = stbi__get8(z->s);
+ z->succ_high = (aa >> 4);
+ z->succ_low = (aa & 15);
+ if (z->progressive) {
+ if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13)
+ return stbi__err("bad SOS", "Corrupt JPEG");
+ } else {
+ if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG");
+ if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG");
+ z->spec_end = 63;
+ }
+ }
+
+ return 1;
+}
+
+static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why)
+{
+ int i;
+ for (i=0; i < ncomp; ++i) {
+ if (z->img_comp[i].raw_data) {
+ STBI_FREE(z->img_comp[i].raw_data);
+ z->img_comp[i].raw_data = NULL;
+ z->img_comp[i].data = NULL;
+ }
+ if (z->img_comp[i].raw_coeff) {
+ STBI_FREE(z->img_comp[i].raw_coeff);
+ z->img_comp[i].raw_coeff = 0;
+ z->img_comp[i].coeff = 0;
+ }
+ if (z->img_comp[i].linebuf) {
+ STBI_FREE(z->img_comp[i].linebuf);
+ z->img_comp[i].linebuf = NULL;
+ }
+ }
+ return why;
+}
+
+static int stbi__process_frame_header(stbi__jpeg *z, int scan)
+{
+ stbi__context *s = z->s;
+ int Lf,p,i,q, h_max=1,v_max=1,c;
+ Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG
+ p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline
+ s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG
+ s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires
+ c = stbi__get8(s);
+ if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG");
+ s->img_n = c;
+ for (i=0; i < c; ++i) {
+ z->img_comp[i].data = NULL;
+ z->img_comp[i].linebuf = NULL;
+ }
+
+ if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG");
+
+ z->rgb = 0;
+ for (i=0; i < s->img_n; ++i) {
+ static const unsigned char rgb[3] = { 'R', 'G', 'B' };
+ z->img_comp[i].id = stbi__get8(s);
+ if (s->img_n == 3 && z->img_comp[i].id == rgb[i])
+ ++z->rgb;
+ q = stbi__get8(s);
+ z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG");
+ z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG");
+ z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG");
+ }
+
+ if (scan != STBI__SCAN_load) return 1;
+
+ if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode");
+
+ for (i=0; i < s->img_n; ++i) {
+ if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h;
+ if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v;
+ }
+
+ // compute interleaved mcu info
+ z->img_h_max = h_max;
+ z->img_v_max = v_max;
+ z->img_mcu_w = h_max * 8;
+ z->img_mcu_h = v_max * 8;
+ // these sizes can't be more than 17 bits
+ z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w;
+ z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h;
+
+ for (i=0; i < s->img_n; ++i) {
+ // number of effective pixels (e.g. for non-interleaved MCU)
+ z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max;
+ z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max;
+ // to simplify generation, we'll allocate enough memory to decode
+ // the bogus oversized data from using interleaved MCUs and their
+ // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't
+ // discard the extra data until colorspace conversion
+ //
+ // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier)
+ // so these muls can't overflow with 32-bit ints (which we require)
+ z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8;
+ z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8;
+ z->img_comp[i].coeff = 0;
+ z->img_comp[i].raw_coeff = 0;
+ z->img_comp[i].linebuf = NULL;
+ z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15);
+ if (z->img_comp[i].raw_data == NULL)
+ return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory"));
+ // align blocks for idct using mmx/sse
+ z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15);
+ if (z->progressive) {
+ // w2, h2 are multiples of 8 (see above)
+ z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8;
+ z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8;
+ z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15);
+ if (z->img_comp[i].raw_coeff == NULL)
+ return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory"));
+ z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15);
+ }
+ }
+
+ return 1;
+}
+
+// use comparisons since in some cases we handle more than one case (e.g. SOF)
+#define stbi__DNL(x) ((x) == 0xdc)
+#define stbi__SOI(x) ((x) == 0xd8)
+#define stbi__EOI(x) ((x) == 0xd9)
+#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2)
+#define stbi__SOS(x) ((x) == 0xda)
+
+#define stbi__SOF_progressive(x) ((x) == 0xc2)
+
+static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan)
+{
+ int m;
+ z->jfif = 0;
+ z->app14_color_transform = -1; // valid values are 0,1,2
+ z->marker = STBI__MARKER_none; // initialize cached marker to empty
+ m = stbi__get_marker(z);
+ if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG");
+ if (scan == STBI__SCAN_type) return 1;
+ m = stbi__get_marker(z);
+ while (!stbi__SOF(m)) {
+ if (!stbi__process_marker(z,m)) return 0;
+ m = stbi__get_marker(z);
+ while (m == STBI__MARKER_none) {
+ // some files have extra padding after their blocks, so ok, we'll scan
+ if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG");
+ m = stbi__get_marker(z);
+ }
+ }
+ z->progressive = stbi__SOF_progressive(m);
+ if (!stbi__process_frame_header(z, scan)) return 0;
+ return 1;
+}
+
+// decode image to YCbCr format
+static int stbi__decode_jpeg_image(stbi__jpeg *j)
+{
+ int m;
+ for (m = 0; m < 4; m++) {
+ j->img_comp[m].raw_data = NULL;
+ j->img_comp[m].raw_coeff = NULL;
+ }
+ j->restart_interval = 0;
+ if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0;
+ m = stbi__get_marker(j);
+ while (!stbi__EOI(m)) {
+ if (stbi__SOS(m)) {
+ if (!stbi__process_scan_header(j)) return 0;
+ if (!stbi__parse_entropy_coded_data(j)) return 0;
+ if (j->marker == STBI__MARKER_none ) {
+ // handle 0s at the end of image data from IP Kamera 9060
+ while (!stbi__at_eof(j->s)) {
+ int x = stbi__get8(j->s);
+ if (x == 255) {
+ j->marker = stbi__get8(j->s);
+ break;
+ }
+ }
+ // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0
+ }
+ } else if (stbi__DNL(m)) {
+ int Ld = stbi__get16be(j->s);
+ stbi__uint32 NL = stbi__get16be(j->s);
+ if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG");
+ if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG");
+ } else {
+ if (!stbi__process_marker(j, m)) return 0;
+ }
+ m = stbi__get_marker(j);
+ }
+ if (j->progressive)
+ stbi__jpeg_finish(j);
+ return 1;
+}
+
+// static jfif-centered resampling (across block boundaries)
+
+typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1,
+ int w, int hs);
+
+#define stbi__div4(x) ((stbi_uc) ((x) >> 2))
+
+static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
+{
+ STBI_NOTUSED(out);
+ STBI_NOTUSED(in_far);
+ STBI_NOTUSED(w);
+ STBI_NOTUSED(hs);
+ return in_near;
+}
+
+static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
+{
+ // need to generate two samples vertically for every one in input
+ int i;
+ STBI_NOTUSED(hs);
+ for (i=0; i < w; ++i)
+ out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2);
+ return out;
+}
+
+static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
+{
+ // need to generate two samples horizontally for every one in input
+ int i;
+ stbi_uc *input = in_near;
+
+ if (w == 1) {
+ // if only one sample, can't do any interpolation
+ out[0] = out[1] = input[0];
+ return out;
+ }
+
+ out[0] = input[0];
+ out[1] = stbi__div4(input[0]*3 + input[1] + 2);
+ for (i=1; i < w-1; ++i) {
+ int n = 3*input[i]+2;
+ out[i*2+0] = stbi__div4(n+input[i-1]);
+ out[i*2+1] = stbi__div4(n+input[i+1]);
+ }
+ out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2);
+ out[i*2+1] = input[w-1];
+
+ STBI_NOTUSED(in_far);
+ STBI_NOTUSED(hs);
+
+ return out;
+}
+
+#define stbi__div16(x) ((stbi_uc) ((x) >> 4))
+
+static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
+{
+ // need to generate 2x2 samples for every one in input
+ int i,t0,t1;
+ if (w == 1) {
+ out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2);
+ return out;
+ }
+
+ t1 = 3*in_near[0] + in_far[0];
+ out[0] = stbi__div4(t1+2);
+ for (i=1; i < w; ++i) {
+ t0 = t1;
+ t1 = 3*in_near[i]+in_far[i];
+ out[i*2-1] = stbi__div16(3*t0 + t1 + 8);
+ out[i*2 ] = stbi__div16(3*t1 + t0 + 8);
+ }
+ out[w*2-1] = stbi__div4(t1+2);
+
+ STBI_NOTUSED(hs);
+
+ return out;
+}
+
+#if defined(STBI_SSE2) || defined(STBI_NEON)
+static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
+{
+ // need to generate 2x2 samples for every one in input
+ int i=0,t0,t1;
+
+ if (w == 1) {
+ out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2);
+ return out;
+ }
+
+ t1 = 3*in_near[0] + in_far[0];
+ // process groups of 8 pixels for as long as we can.
+ // note we can't handle the last pixel in a row in this loop
+ // because we need to handle the filter boundary conditions.
+ for (; i < ((w-1) & ~7); i += 8) {
+#if defined(STBI_SSE2)
+ // load and perform the vertical filtering pass
+ // this uses 3*x + y = 4*x + (y - x)
+ __m128i zero = _mm_setzero_si128();
+ __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i));
+ __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i));
+ __m128i farw = _mm_unpacklo_epi8(farb, zero);
+ __m128i nearw = _mm_unpacklo_epi8(nearb, zero);
+ __m128i diff = _mm_sub_epi16(farw, nearw);
+ __m128i nears = _mm_slli_epi16(nearw, 2);
+ __m128i curr = _mm_add_epi16(nears, diff); // current row
+
+ // horizontal filter works the same based on shifted vers of current
+ // row. "prev" is current row shifted right by 1 pixel; we need to
+ // insert the previous pixel value (from t1).
+ // "next" is current row shifted left by 1 pixel, with first pixel
+ // of next block of 8 pixels added in.
+ __m128i prv0 = _mm_slli_si128(curr, 2);
+ __m128i nxt0 = _mm_srli_si128(curr, 2);
+ __m128i prev = _mm_insert_epi16(prv0, t1, 0);
+ __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7);
+
+ // horizontal filter, polyphase implementation since it's convenient:
+ // even pixels = 3*cur + prev = cur*4 + (prev - cur)
+ // odd pixels = 3*cur + next = cur*4 + (next - cur)
+ // note the shared term.
+ __m128i bias = _mm_set1_epi16(8);
+ __m128i curs = _mm_slli_epi16(curr, 2);
+ __m128i prvd = _mm_sub_epi16(prev, curr);
+ __m128i nxtd = _mm_sub_epi16(next, curr);
+ __m128i curb = _mm_add_epi16(curs, bias);
+ __m128i even = _mm_add_epi16(prvd, curb);
+ __m128i odd = _mm_add_epi16(nxtd, curb);
+
+ // interleave even and odd pixels, then undo scaling.
+ __m128i int0 = _mm_unpacklo_epi16(even, odd);
+ __m128i int1 = _mm_unpackhi_epi16(even, odd);
+ __m128i de0 = _mm_srli_epi16(int0, 4);
+ __m128i de1 = _mm_srli_epi16(int1, 4);
+
+ // pack and write output
+ __m128i outv = _mm_packus_epi16(de0, de1);
+ _mm_storeu_si128((__m128i *) (out + i*2), outv);
+#elif defined(STBI_NEON)
+ // load and perform the vertical filtering pass
+ // this uses 3*x + y = 4*x + (y - x)
+ uint8x8_t farb = vld1_u8(in_far + i);
+ uint8x8_t nearb = vld1_u8(in_near + i);
+ int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb));
+ int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2));
+ int16x8_t curr = vaddq_s16(nears, diff); // current row
+
+ // horizontal filter works the same based on shifted vers of current
+ // row. "prev" is current row shifted right by 1 pixel; we need to
+ // insert the previous pixel value (from t1).
+ // "next" is current row shifted left by 1 pixel, with first pixel
+ // of next block of 8 pixels added in.
+ int16x8_t prv0 = vextq_s16(curr, curr, 7);
+ int16x8_t nxt0 = vextq_s16(curr, curr, 1);
+ int16x8_t prev = vsetq_lane_s16(t1, prv0, 0);
+ int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7);
+
+ // horizontal filter, polyphase implementation since it's convenient:
+ // even pixels = 3*cur + prev = cur*4 + (prev - cur)
+ // odd pixels = 3*cur + next = cur*4 + (next - cur)
+ // note the shared term.
+ int16x8_t curs = vshlq_n_s16(curr, 2);
+ int16x8_t prvd = vsubq_s16(prev, curr);
+ int16x8_t nxtd = vsubq_s16(next, curr);
+ int16x8_t even = vaddq_s16(curs, prvd);
+ int16x8_t odd = vaddq_s16(curs, nxtd);
+
+ // undo scaling and round, then store with even/odd phases interleaved
+ uint8x8x2_t o;
+ o.val[0] = vqrshrun_n_s16(even, 4);
+ o.val[1] = vqrshrun_n_s16(odd, 4);
+ vst2_u8(out + i*2, o);
+#endif
+
+ // "previous" value for next iter
+ t1 = 3*in_near[i+7] + in_far[i+7];
+ }
+
+ t0 = t1;
+ t1 = 3*in_near[i] + in_far[i];
+ out[i*2] = stbi__div16(3*t1 + t0 + 8);
+
+ for (++i; i < w; ++i) {
+ t0 = t1;
+ t1 = 3*in_near[i]+in_far[i];
+ out[i*2-1] = stbi__div16(3*t0 + t1 + 8);
+ out[i*2 ] = stbi__div16(3*t1 + t0 + 8);
+ }
+ out[w*2-1] = stbi__div4(t1+2);
+
+ STBI_NOTUSED(hs);
+
+ return out;
+}
+#endif
+
+static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
+{
+ // resample with nearest-neighbor
+ int i,j;
+ STBI_NOTUSED(in_far);
+ for (i=0; i < w; ++i)
+ for (j=0; j < hs; ++j)
+ out[i*hs+j] = in_near[i];
+ return out;
+}
+
+// this is a reduced-precision calculation of YCbCr-to-RGB introduced
+// to make sure the code produces the same results in both SIMD and scalar
+#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8)
+static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step)
+{
+ int i;
+ for (i=0; i < count; ++i) {
+ int y_fixed = (y[i] << 20) + (1<<19); // rounding
+ int r,g,b;
+ int cr = pcr[i] - 128;
+ int cb = pcb[i] - 128;
+ r = y_fixed + cr* stbi__float2fixed(1.40200f);
+ g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000);
+ b = y_fixed + cb* stbi__float2fixed(1.77200f);
+ r >>= 20;
+ g >>= 20;
+ b >>= 20;
+ if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; }
+ if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; }
+ if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; }
+ out[0] = (stbi_uc)r;
+ out[1] = (stbi_uc)g;
+ out[2] = (stbi_uc)b;
+ out[3] = 255;
+ out += step;
+ }
+}
+
+#if defined(STBI_SSE2) || defined(STBI_NEON)
+static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step)
+{
+ int i = 0;
+
+#ifdef STBI_SSE2
+ // step == 3 is pretty ugly on the final interleave, and i'm not convinced
+ // it's useful in practice (you wouldn't use it for textures, for example).
+ // so just accelerate step == 4 case.
+ if (step == 4) {
+ // this is a fairly straightforward implementation and not super-optimized.
+ __m128i signflip = _mm_set1_epi8(-0x80);
+ __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f));
+ __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f));
+ __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f));
+ __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f));
+ __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128);
+ __m128i xw = _mm_set1_epi16(255); // alpha channel
+
+ for (; i+7 < count; i += 8) {
+ // load
+ __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i));
+ __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i));
+ __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i));
+ __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128
+ __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128
+
+ // unpack to short (and left-shift cr, cb by 8)
+ __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes);
+ __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased);
+ __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased);
+
+ // color transform
+ __m128i yws = _mm_srli_epi16(yw, 4);
+ __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw);
+ __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw);
+ __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1);
+ __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1);
+ __m128i rws = _mm_add_epi16(cr0, yws);
+ __m128i gwt = _mm_add_epi16(cb0, yws);
+ __m128i bws = _mm_add_epi16(yws, cb1);
+ __m128i gws = _mm_add_epi16(gwt, cr1);
+
+ // descale
+ __m128i rw = _mm_srai_epi16(rws, 4);
+ __m128i bw = _mm_srai_epi16(bws, 4);
+ __m128i gw = _mm_srai_epi16(gws, 4);
+
+ // back to byte, set up for transpose
+ __m128i brb = _mm_packus_epi16(rw, bw);
+ __m128i gxb = _mm_packus_epi16(gw, xw);
+
+ // transpose to interleave channels
+ __m128i t0 = _mm_unpacklo_epi8(brb, gxb);
+ __m128i t1 = _mm_unpackhi_epi8(brb, gxb);
+ __m128i o0 = _mm_unpacklo_epi16(t0, t1);
+ __m128i o1 = _mm_unpackhi_epi16(t0, t1);
+
+ // store
+ _mm_storeu_si128((__m128i *) (out + 0), o0);
+ _mm_storeu_si128((__m128i *) (out + 16), o1);
+ out += 32;
+ }
+ }
+#endif
+
+#ifdef STBI_NEON
+ // in this version, step=3 support would be easy to add. but is there demand?
+ if (step == 4) {
+ // this is a fairly straightforward implementation and not super-optimized.
+ uint8x8_t signflip = vdup_n_u8(0x80);
+ int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f));
+ int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f));
+ int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f));
+ int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f));
+
+ for (; i+7 < count; i += 8) {
+ // load
+ uint8x8_t y_bytes = vld1_u8(y + i);
+ uint8x8_t cr_bytes = vld1_u8(pcr + i);
+ uint8x8_t cb_bytes = vld1_u8(pcb + i);
+ int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip));
+ int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip));
+
+ // expand to s16
+ int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4));
+ int16x8_t crw = vshll_n_s8(cr_biased, 7);
+ int16x8_t cbw = vshll_n_s8(cb_biased, 7);
+
+ // color transform
+ int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0);
+ int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0);
+ int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1);
+ int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1);
+ int16x8_t rws = vaddq_s16(yws, cr0);
+ int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1);
+ int16x8_t bws = vaddq_s16(yws, cb1);
+
+ // undo scaling, round, convert to byte
+ uint8x8x4_t o;
+ o.val[0] = vqrshrun_n_s16(rws, 4);
+ o.val[1] = vqrshrun_n_s16(gws, 4);
+ o.val[2] = vqrshrun_n_s16(bws, 4);
+ o.val[3] = vdup_n_u8(255);
+
+ // store, interleaving r/g/b/a
+ vst4_u8(out, o);
+ out += 8*4;
+ }
+ }
+#endif
+
+ for (; i < count; ++i) {
+ int y_fixed = (y[i] << 20) + (1<<19); // rounding
+ int r,g,b;
+ int cr = pcr[i] - 128;
+ int cb = pcb[i] - 128;
+ r = y_fixed + cr* stbi__float2fixed(1.40200f);
+ g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000);
+ b = y_fixed + cb* stbi__float2fixed(1.77200f);
+ r >>= 20;
+ g >>= 20;
+ b >>= 20;
+ if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; }
+ if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; }
+ if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; }
+ out[0] = (stbi_uc)r;
+ out[1] = (stbi_uc)g;
+ out[2] = (stbi_uc)b;
+ out[3] = 255;
+ out += step;
+ }
+}
+#endif
+
+// set up the kernels
+static void stbi__setup_jpeg(stbi__jpeg *j)
+{
+ j->idct_block_kernel = stbi__idct_block;
+ j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row;
+ j->resample_row_hv_2_kernel = stbi__resample_row_hv_2;
+
+#ifdef STBI_SSE2
+ if (stbi__sse2_available()) {
+ j->idct_block_kernel = stbi__idct_simd;
+ j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd;
+ j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd;
+ }
+#endif
+
+#ifdef STBI_NEON
+ j->idct_block_kernel = stbi__idct_simd;
+ j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd;
+ j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd;
+#endif
+}
+
+// clean up the temporary component buffers
+static void stbi__cleanup_jpeg(stbi__jpeg *j)
+{
+ stbi__free_jpeg_components(j, j->s->img_n, 0);
+}
+
+typedef struct
+{
+ resample_row_func resample;
+ stbi_uc *line0,*line1;
+ int hs,vs; // expansion factor in each axis
+ int w_lores; // horizontal pixels pre-expansion
+ int ystep; // how far through vertical expansion we are
+ int ypos; // which pre-expansion row we're on
+} stbi__resample;
+
+// fast 0..255 * 0..255 => 0..255 rounded multiplication
+static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y)
+{
+ unsigned int t = x*y + 128;
+ return (stbi_uc) ((t + (t >>8)) >> 8);
+}
+
+static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp)
+{
+ int n, decode_n, is_rgb;
+ z->s->img_n = 0; // make stbi__cleanup_jpeg safe
+
+ // validate req_comp
+ if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error");
+
+ // load a jpeg image from whichever source, but leave in YCbCr format
+ if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; }
+
+ // determine actual number of components to generate
+ n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1;
+
+ is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif));
+
+ if (z->s->img_n == 3 && n < 3 && !is_rgb)
+ decode_n = 1;
+ else
+ decode_n = z->s->img_n;
+
+ // resample and color-convert
+ {
+ int k;
+ unsigned int i,j;
+ stbi_uc *output;
+ stbi_uc *coutput[4];
+
+ stbi__resample res_comp[4];
+
+ for (k=0; k < decode_n; ++k) {
+ stbi__resample *r = &res_comp[k];
+
+ // allocate line buffer big enough for upsampling off the edges
+ // with upsample factor of 4
+ z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3);
+ if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); }
+
+ r->hs = z->img_h_max / z->img_comp[k].h;
+ r->vs = z->img_v_max / z->img_comp[k].v;
+ r->ystep = r->vs >> 1;
+ r->w_lores = (z->s->img_x + r->hs-1) / r->hs;
+ r->ypos = 0;
+ r->line0 = r->line1 = z->img_comp[k].data;
+
+ if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1;
+ else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2;
+ else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2;
+ else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel;
+ else r->resample = stbi__resample_row_generic;
+ }
+
+ // can't error after this so, this is safe
+ output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1);
+ if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); }
+
+ // now go ahead and resample
+ for (j=0; j < z->s->img_y; ++j) {
+ stbi_uc *out = output + n * z->s->img_x * j;
+ for (k=0; k < decode_n; ++k) {
+ stbi__resample *r = &res_comp[k];
+ int y_bot = r->ystep >= (r->vs >> 1);
+ coutput[k] = r->resample(z->img_comp[k].linebuf,
+ y_bot ? r->line1 : r->line0,
+ y_bot ? r->line0 : r->line1,
+ r->w_lores, r->hs);
+ if (++r->ystep >= r->vs) {
+ r->ystep = 0;
+ r->line0 = r->line1;
+ if (++r->ypos < z->img_comp[k].y)
+ r->line1 += z->img_comp[k].w2;
+ }
+ }
+ if (n >= 3) {
+ stbi_uc *y = coutput[0];
+ if (z->s->img_n == 3) {
+ if (is_rgb) {
+ for (i=0; i < z->s->img_x; ++i) {
+ out[0] = y[i];
+ out[1] = coutput[1][i];
+ out[2] = coutput[2][i];
+ out[3] = 255;
+ out += n;
+ }
+ } else {
+ z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n);
+ }
+ } else if (z->s->img_n == 4) {
+ if (z->app14_color_transform == 0) { // CMYK
+ for (i=0; i < z->s->img_x; ++i) {
+ stbi_uc m = coutput[3][i];
+ out[0] = stbi__blinn_8x8(coutput[0][i], m);
+ out[1] = stbi__blinn_8x8(coutput[1][i], m);
+ out[2] = stbi__blinn_8x8(coutput[2][i], m);
+ out[3] = 255;
+ out += n;
+ }
+ } else if (z->app14_color_transform == 2) { // YCCK
+ z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n);
+ for (i=0; i < z->s->img_x; ++i) {
+ stbi_uc m = coutput[3][i];
+ out[0] = stbi__blinn_8x8(255 - out[0], m);
+ out[1] = stbi__blinn_8x8(255 - out[1], m);
+ out[2] = stbi__blinn_8x8(255 - out[2], m);
+ out += n;
+ }
+ } else { // YCbCr + alpha? Ignore the fourth channel for now
+ z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n);
+ }
+ } else
+ for (i=0; i < z->s->img_x; ++i) {
+ out[0] = out[1] = out[2] = y[i];
+ out[3] = 255; // not used if n==3
+ out += n;
+ }
+ } else {
+ if (is_rgb) {
+ if (n == 1)
+ for (i=0; i < z->s->img_x; ++i)
+ *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]);
+ else {
+ for (i=0; i < z->s->img_x; ++i, out += 2) {
+ out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]);
+ out[1] = 255;
+ }
+ }
+ } else if (z->s->img_n == 4 && z->app14_color_transform == 0) {
+ for (i=0; i < z->s->img_x; ++i) {
+ stbi_uc m = coutput[3][i];
+ stbi_uc r = stbi__blinn_8x8(coutput[0][i], m);
+ stbi_uc g = stbi__blinn_8x8(coutput[1][i], m);
+ stbi_uc b = stbi__blinn_8x8(coutput[2][i], m);
+ out[0] = stbi__compute_y(r, g, b);
+ out[1] = 255;
+ out += n;
+ }
+ } else if (z->s->img_n == 4 && z->app14_color_transform == 2) {
+ for (i=0; i < z->s->img_x; ++i) {
+ out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]);
+ out[1] = 255;
+ out += n;
+ }
+ } else {
+ stbi_uc *y = coutput[0];
+ if (n == 1)
+ for (i=0; i < z->s->img_x; ++i) out[i] = y[i];
+ else
+ for (i=0; i < z->s->img_x; ++i) *out++ = y[i], *out++ = 255;
+ }
+ }
+ }
+ stbi__cleanup_jpeg(z);
+ *out_x = z->s->img_x;
+ *out_y = z->s->img_y;
+ if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output
+ return output;
+ }
+}
+
+static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
+{
+ unsigned char* result;
+ stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg));
+ STBI_NOTUSED(ri);
+ j->s = s;
+ stbi__setup_jpeg(j);
+ result = load_jpeg_image(j, x,y,comp,req_comp);
+ STBI_FREE(j);
+ return result;
+}
+
+static int stbi__jpeg_test(stbi__context *s)
+{
+ int r;
+ stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg));
+ j->s = s;
+ stbi__setup_jpeg(j);
+ r = stbi__decode_jpeg_header(j, STBI__SCAN_type);
+ stbi__rewind(s);
+ STBI_FREE(j);
+ return r;
+}
+
+static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp)
+{
+ if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) {
+ stbi__rewind( j->s );
+ return 0;
+ }
+ if (x) *x = j->s->img_x;
+ if (y) *y = j->s->img_y;
+ if (comp) *comp = j->s->img_n >= 3 ? 3 : 1;
+ return 1;
+}
+
+static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ int result;
+ stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg)));
+ if (!j) {
+ stbi__errpuc("outofmem", "Out of memory");
+ return 0;
+ }
+ j->s = s;
+ result = stbi__jpeg_info_raw(j, x, y, comp);
+ STBI_FREE(j);
+ return result;
+}
+#endif
+
+// public domain zlib decode v0.2 Sean Barrett 2006-11-18
+// simple implementation
+// - all input must be provided in an upfront buffer
+// - all output is written to a single output buffer (can malloc/realloc)
+// performance
+// - fast huffman
+
+#ifndef STBI_NO_ZLIB
+
+// fast-way is faster to check than jpeg huffman, but slow way is slower
+#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables
+#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1)
+
+// zlib-style huffman encoding
+// (jpegs packs from left, zlib from right, so can't share code)
+typedef struct
+{
+ stbi__uint16 fast[1 << STBI__ZFAST_BITS];
+ stbi__uint16 firstcode[16];
+ int maxcode[17];
+ stbi__uint16 firstsymbol[16];
+ stbi_uc size[288];
+ stbi__uint16 value[288];
+} stbi__zhuffman;
+
+stbi_inline static int stbi__bitreverse16(int n)
+{
+ n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1);
+ n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2);
+ n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4);
+ n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8);
+ return n;
+}
+
+stbi_inline static int stbi__bit_reverse(int v, int bits)
+{
+ STBI_ASSERT(bits <= 16);
+ // to bit reverse n bits, reverse 16 and shift
+ // e.g. 11 bits, bit reverse and shift away 5
+ return stbi__bitreverse16(v) >> (16-bits);
+}
+
+static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num)
+{
+ int i,k=0;
+ int code, next_code[16], sizes[17];
+
+ // DEFLATE spec for generating codes
+ memset(sizes, 0, sizeof(sizes));
+ memset(z->fast, 0, sizeof(z->fast));
+ for (i=0; i < num; ++i)
+ ++sizes[sizelist[i]];
+ sizes[0] = 0;
+ for (i=1; i < 16; ++i)
+ if (sizes[i] > (1 << i))
+ return stbi__err("bad sizes", "Corrupt PNG");
+ code = 0;
+ for (i=1; i < 16; ++i) {
+ next_code[i] = code;
+ z->firstcode[i] = (stbi__uint16) code;
+ z->firstsymbol[i] = (stbi__uint16) k;
+ code = (code + sizes[i]);
+ if (sizes[i])
+ if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG");
+ z->maxcode[i] = code << (16-i); // preshift for inner loop
+ code <<= 1;
+ k += sizes[i];
+ }
+ z->maxcode[16] = 0x10000; // sentinel
+ for (i=0; i < num; ++i) {
+ int s = sizelist[i];
+ if (s) {
+ int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s];
+ stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i);
+ z->size [c] = (stbi_uc ) s;
+ z->value[c] = (stbi__uint16) i;
+ if (s <= STBI__ZFAST_BITS) {
+ int j = stbi__bit_reverse(next_code[s],s);
+ while (j < (1 << STBI__ZFAST_BITS)) {
+ z->fast[j] = fastv;
+ j += (1 << s);
+ }
+ }
+ ++next_code[s];
+ }
+ }
+ return 1;
+}
+
+// zlib-from-memory implementation for PNG reading
+// because PNG allows splitting the zlib stream arbitrarily,
+// and it's annoying structurally to have PNG call ZLIB call PNG,
+// we require PNG read all the IDATs and combine them into a single
+// memory buffer
+
+typedef struct
+{
+ stbi_uc *zbuffer, *zbuffer_end;
+ int num_bits;
+ stbi__uint32 code_buffer;
+
+ char *zout;
+ char *zout_start;
+ char *zout_end;
+ int z_expandable;
+
+ stbi__zhuffman z_length, z_distance;
+} stbi__zbuf;
+
+stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z)
+{
+ if (z->zbuffer >= z->zbuffer_end) return 0;
+ return *z->zbuffer++;
+}
+
+static void stbi__fill_bits(stbi__zbuf *z)
+{
+ do {
+ STBI_ASSERT(z->code_buffer < (1U << z->num_bits));
+ z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits;
+ z->num_bits += 8;
+ } while (z->num_bits <= 24);
+}
+
+stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n)
+{
+ unsigned int k;
+ if (z->num_bits < n) stbi__fill_bits(z);
+ k = z->code_buffer & ((1 << n) - 1);
+ z->code_buffer >>= n;
+ z->num_bits -= n;
+ return k;
+}
+
+static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z)
+{
+ int b,s,k;
+ // not resolved by fast table, so compute it the slow way
+ // use jpeg approach, which requires MSbits at top
+ k = stbi__bit_reverse(a->code_buffer, 16);
+ for (s=STBI__ZFAST_BITS+1; ; ++s)
+ if (k < z->maxcode[s])
+ break;
+ if (s == 16) return -1; // invalid code!
+ // code size is s, so:
+ b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s];
+ STBI_ASSERT(z->size[b] == s);
+ a->code_buffer >>= s;
+ a->num_bits -= s;
+ return z->value[b];
+}
+
+stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z)
+{
+ int b,s;
+ if (a->num_bits < 16) stbi__fill_bits(a);
+ b = z->fast[a->code_buffer & STBI__ZFAST_MASK];
+ if (b) {
+ s = b >> 9;
+ a->code_buffer >>= s;
+ a->num_bits -= s;
+ return b & 511;
+ }
+ return stbi__zhuffman_decode_slowpath(a, z);
+}
+
+static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes
+{
+ char *q;
+ int cur, limit, old_limit;
+ z->zout = zout;
+ if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG");
+ cur = (int) (z->zout - z->zout_start);
+ limit = old_limit = (int) (z->zout_end - z->zout_start);
+ while (cur + n > limit)
+ limit *= 2;
+ q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit);
+ STBI_NOTUSED(old_limit);
+ if (q == NULL) return stbi__err("outofmem", "Out of memory");
+ z->zout_start = q;
+ z->zout = q + cur;
+ z->zout_end = q + limit;
+ return 1;
+}
+
+static const int stbi__zlength_base[31] = {
+ 3,4,5,6,7,8,9,10,11,13,
+ 15,17,19,23,27,31,35,43,51,59,
+ 67,83,99,115,131,163,195,227,258,0,0 };
+
+static const int stbi__zlength_extra[31]=
+{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 };
+
+static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,
+257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0};
+
+static const int stbi__zdist_extra[32] =
+{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
+
+static int stbi__parse_huffman_block(stbi__zbuf *a)
+{
+ char *zout = a->zout;
+ for(;;) {
+ int z = stbi__zhuffman_decode(a, &a->z_length);
+ if (z < 256) {
+ if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes
+ if (zout >= a->zout_end) {
+ if (!stbi__zexpand(a, zout, 1)) return 0;
+ zout = a->zout;
+ }
+ *zout++ = (char) z;
+ } else {
+ stbi_uc *p;
+ int len,dist;
+ if (z == 256) {
+ a->zout = zout;
+ return 1;
+ }
+ z -= 257;
+ len = stbi__zlength_base[z];
+ if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]);
+ z = stbi__zhuffman_decode(a, &a->z_distance);
+ if (z < 0) return stbi__err("bad huffman code","Corrupt PNG");
+ dist = stbi__zdist_base[z];
+ if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]);
+ if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG");
+ if (zout + len > a->zout_end) {
+ if (!stbi__zexpand(a, zout, len)) return 0;
+ zout = a->zout;
+ }
+ p = (stbi_uc *) (zout - dist);
+ if (dist == 1) { // run of one byte; common in images.
+ stbi_uc v = *p;
+ if (len) { do *zout++ = v; while (--len); }
+ } else {
+ if (len) { do *zout++ = *p++; while (--len); }
+ }
+ }
+ }
+}
+
+static int stbi__compute_huffman_codes(stbi__zbuf *a)
+{
+ static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 };
+ stbi__zhuffman z_codelength;
+ stbi_uc lencodes[286+32+137];//padding for maximum single op
+ stbi_uc codelength_sizes[19];
+ int i,n;
+
+ int hlit = stbi__zreceive(a,5) + 257;
+ int hdist = stbi__zreceive(a,5) + 1;
+ int hclen = stbi__zreceive(a,4) + 4;
+ int ntot = hlit + hdist;
+
+ memset(codelength_sizes, 0, sizeof(codelength_sizes));
+ for (i=0; i < hclen; ++i) {
+ int s = stbi__zreceive(a,3);
+ codelength_sizes[length_dezigzag[i]] = (stbi_uc) s;
+ }
+ if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0;
+
+ n = 0;
+ while (n < ntot) {
+ int c = stbi__zhuffman_decode(a, &z_codelength);
+ if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG");
+ if (c < 16)
+ lencodes[n++] = (stbi_uc) c;
+ else {
+ stbi_uc fill = 0;
+ if (c == 16) {
+ c = stbi__zreceive(a,2)+3;
+ if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG");
+ fill = lencodes[n-1];
+ } else if (c == 17)
+ c = stbi__zreceive(a,3)+3;
+ else {
+ STBI_ASSERT(c == 18);
+ c = stbi__zreceive(a,7)+11;
+ }
+ if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG");
+ memset(lencodes+n, fill, c);
+ n += c;
+ }
+ }
+ if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG");
+ if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0;
+ if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0;
+ return 1;
+}
+
+static int stbi__parse_uncompressed_block(stbi__zbuf *a)
+{
+ stbi_uc header[4];
+ int len,nlen,k;
+ if (a->num_bits & 7)
+ stbi__zreceive(a, a->num_bits & 7); // discard
+ // drain the bit-packed data into header
+ k = 0;
+ while (a->num_bits > 0) {
+ header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check
+ a->code_buffer >>= 8;
+ a->num_bits -= 8;
+ }
+ STBI_ASSERT(a->num_bits == 0);
+ // now fill header the normal way
+ while (k < 4)
+ header[k++] = stbi__zget8(a);
+ len = header[1] * 256 + header[0];
+ nlen = header[3] * 256 + header[2];
+ if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG");
+ if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG");
+ if (a->zout + len > a->zout_end)
+ if (!stbi__zexpand(a, a->zout, len)) return 0;
+ memcpy(a->zout, a->zbuffer, len);
+ a->zbuffer += len;
+ a->zout += len;
+ return 1;
+}
+
+static int stbi__parse_zlib_header(stbi__zbuf *a)
+{
+ int cmf = stbi__zget8(a);
+ int cm = cmf & 15;
+ /* int cinfo = cmf >> 4; */
+ int flg = stbi__zget8(a);
+ if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec
+ if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png
+ if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png
+ // window = 1 << (8 + cinfo)... but who cares, we fully buffer output
+ return 1;
+}
+
+static const stbi_uc stbi__zdefault_length[288] =
+{
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8
+};
+static const stbi_uc stbi__zdefault_distance[32] =
+{
+ 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5
+};
+/*
+Init algorithm:
+{
+ int i; // use <= to match clearly with spec
+ for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8;
+ for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9;
+ for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7;
+ for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8;
+
+ for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5;
+}
+*/
+
+static int stbi__parse_zlib(stbi__zbuf *a, int parse_header)
+{
+ int final, type;
+ if (parse_header)
+ if (!stbi__parse_zlib_header(a)) return 0;
+ a->num_bits = 0;
+ a->code_buffer = 0;
+ do {
+ final = stbi__zreceive(a,1);
+ type = stbi__zreceive(a,2);
+ if (type == 0) {
+ if (!stbi__parse_uncompressed_block(a)) return 0;
+ } else if (type == 3) {
+ return 0;
+ } else {
+ if (type == 1) {
+ // use fixed code lengths
+ if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , 288)) return 0;
+ if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0;
+ } else {
+ if (!stbi__compute_huffman_codes(a)) return 0;
+ }
+ if (!stbi__parse_huffman_block(a)) return 0;
+ }
+ } while (!final);
+ return 1;
+}
+
+static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header)
+{
+ a->zout_start = obuf;
+ a->zout = obuf;
+ a->zout_end = obuf + olen;
+ a->z_expandable = exp;
+
+ return stbi__parse_zlib(a, parse_header);
+}
+
+STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen)
+{
+ stbi__zbuf a;
+ char *p = (char *) stbi__malloc(initial_size);
+ if (p == NULL) return NULL;
+ a.zbuffer = (stbi_uc *) buffer;
+ a.zbuffer_end = (stbi_uc *) buffer + len;
+ if (stbi__do_zlib(&a, p, initial_size, 1, 1)) {
+ if (outlen) *outlen = (int) (a.zout - a.zout_start);
+ return a.zout_start;
+ } else {
+ STBI_FREE(a.zout_start);
+ return NULL;
+ }
+}
+
+STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen)
+{
+ return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen);
+}
+
+STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header)
+{
+ stbi__zbuf a;
+ char *p = (char *) stbi__malloc(initial_size);
+ if (p == NULL) return NULL;
+ a.zbuffer = (stbi_uc *) buffer;
+ a.zbuffer_end = (stbi_uc *) buffer + len;
+ if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) {
+ if (outlen) *outlen = (int) (a.zout - a.zout_start);
+ return a.zout_start;
+ } else {
+ STBI_FREE(a.zout_start);
+ return NULL;
+ }
+}
+
+STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen)
+{
+ stbi__zbuf a;
+ a.zbuffer = (stbi_uc *) ibuffer;
+ a.zbuffer_end = (stbi_uc *) ibuffer + ilen;
+ if (stbi__do_zlib(&a, obuffer, olen, 0, 1))
+ return (int) (a.zout - a.zout_start);
+ else
+ return -1;
+}
+
+STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen)
+{
+ stbi__zbuf a;
+ char *p = (char *) stbi__malloc(16384);
+ if (p == NULL) return NULL;
+ a.zbuffer = (stbi_uc *) buffer;
+ a.zbuffer_end = (stbi_uc *) buffer+len;
+ if (stbi__do_zlib(&a, p, 16384, 1, 0)) {
+ if (outlen) *outlen = (int) (a.zout - a.zout_start);
+ return a.zout_start;
+ } else {
+ STBI_FREE(a.zout_start);
+ return NULL;
+ }
+}
+
+STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen)
+{
+ stbi__zbuf a;
+ a.zbuffer = (stbi_uc *) ibuffer;
+ a.zbuffer_end = (stbi_uc *) ibuffer + ilen;
+ if (stbi__do_zlib(&a, obuffer, olen, 0, 0))
+ return (int) (a.zout - a.zout_start);
+ else
+ return -1;
+}
+#endif
+
+// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18
+// simple implementation
+// - only 8-bit samples
+// - no CRC checking
+// - allocates lots of intermediate memory
+// - avoids problem of streaming data between subsystems
+// - avoids explicit window management
+// performance
+// - uses stb_zlib, a PD zlib implementation with fast huffman decoding
+
+#ifndef STBI_NO_PNG
+typedef struct
+{
+ stbi__uint32 length;
+ stbi__uint32 type;
+} stbi__pngchunk;
+
+static stbi__pngchunk stbi__get_chunk_header(stbi__context *s)
+{
+ stbi__pngchunk c;
+ c.length = stbi__get32be(s);
+ c.type = stbi__get32be(s);
+ return c;
+}
+
+static int stbi__check_png_header(stbi__context *s)
+{
+ static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 };
+ int i;
+ for (i=0; i < 8; ++i)
+ if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG");
+ return 1;
+}
+
+typedef struct
+{
+ stbi__context *s;
+ stbi_uc *idata, *expanded, *out;
+ int depth;
+} stbi__png;
+
+
+enum {
+ STBI__F_none=0,
+ STBI__F_sub=1,
+ STBI__F_up=2,
+ STBI__F_avg=3,
+ STBI__F_paeth=4,
+ // synthetic filters used for first scanline to avoid needing a dummy row of 0s
+ STBI__F_avg_first,
+ STBI__F_paeth_first
+};
+
+static stbi_uc first_row_filter[5] =
+{
+ STBI__F_none,
+ STBI__F_sub,
+ STBI__F_none,
+ STBI__F_avg_first,
+ STBI__F_paeth_first
+};
+
+static int stbi__paeth(int a, int b, int c)
+{
+ int p = a + b - c;
+ int pa = abs(p-a);
+ int pb = abs(p-b);
+ int pc = abs(p-c);
+ if (pa <= pb && pa <= pc) return a;
+ if (pb <= pc) return b;
+ return c;
+}
+
+static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 };
+
+// create the png data from post-deflated data
+static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color)
+{
+ int bytes = (depth == 16? 2 : 1);
+ stbi__context *s = a->s;
+ stbi__uint32 i,j,stride = x*out_n*bytes;
+ stbi__uint32 img_len, img_width_bytes;
+ int k;
+ int img_n = s->img_n; // copy it into a local for later
+
+ int output_bytes = out_n*bytes;
+ int filter_bytes = img_n*bytes;
+ int width = x;
+
+ STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1);
+ a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into
+ if (!a->out) return stbi__err("outofmem", "Out of memory");
+
+ if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG");
+ img_width_bytes = (((img_n * x * depth) + 7) >> 3);
+ img_len = (img_width_bytes + 1) * y;
+
+ // we used to check for exact match between raw_len and img_len on non-interlaced PNGs,
+ // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros),
+ // so just check for raw_len < img_len always.
+ if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG");
+
+ for (j=0; j < y; ++j) {
+ stbi_uc *cur = a->out + stride*j;
+ stbi_uc *prior;
+ int filter = *raw++;
+
+ if (filter > 4)
+ return stbi__err("invalid filter","Corrupt PNG");
+
+ if (depth < 8) {
+ STBI_ASSERT(img_width_bytes <= x);
+ cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place
+ filter_bytes = 1;
+ width = img_width_bytes;
+ }
+ prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above
+
+ // if first row, use special filter that doesn't sample previous row
+ if (j == 0) filter = first_row_filter[filter];
+
+ // handle first byte explicitly
+ for (k=0; k < filter_bytes; ++k) {
+ switch (filter) {
+ case STBI__F_none : cur[k] = raw[k]; break;
+ case STBI__F_sub : cur[k] = raw[k]; break;
+ case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break;
+ case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break;
+ case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break;
+ case STBI__F_avg_first : cur[k] = raw[k]; break;
+ case STBI__F_paeth_first: cur[k] = raw[k]; break;
+ }
+ }
+
+ if (depth == 8) {
+ if (img_n != out_n)
+ cur[img_n] = 255; // first pixel
+ raw += img_n;
+ cur += out_n;
+ prior += out_n;
+ } else if (depth == 16) {
+ if (img_n != out_n) {
+ cur[filter_bytes] = 255; // first pixel top byte
+ cur[filter_bytes+1] = 255; // first pixel bottom byte
+ }
+ raw += filter_bytes;
+ cur += output_bytes;
+ prior += output_bytes;
+ } else {
+ raw += 1;
+ cur += 1;
+ prior += 1;
+ }
+
+ // this is a little gross, so that we don't switch per-pixel or per-component
+ if (depth < 8 || img_n == out_n) {
+ int nk = (width - 1)*filter_bytes;
+ #define STBI__CASE(f) \
+ case f: \
+ for (k=0; k < nk; ++k)
+ switch (filter) {
+ // "none" filter turns into a memcpy here; make that explicit.
+ case STBI__F_none: memcpy(cur, raw, nk); break;
+ STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break;
+ STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break;
+ STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break;
+ STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break;
+ STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break;
+ STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break;
+ }
+ #undef STBI__CASE
+ raw += nk;
+ } else {
+ STBI_ASSERT(img_n+1 == out_n);
+ #define STBI__CASE(f) \
+ case f: \
+ for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \
+ for (k=0; k < filter_bytes; ++k)
+ switch (filter) {
+ STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break;
+ STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break;
+ STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break;
+ STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break;
+ STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break;
+ STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break;
+ STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break;
+ }
+ #undef STBI__CASE
+
+ // the loop above sets the high byte of the pixels' alpha, but for
+ // 16 bit png files we also need the low byte set. we'll do that here.
+ if (depth == 16) {
+ cur = a->out + stride*j; // start at the beginning of the row again
+ for (i=0; i < x; ++i,cur+=output_bytes) {
+ cur[filter_bytes+1] = 255;
+ }
+ }
+ }
+ }
+
+ // we make a separate pass to expand bits to pixels; for performance,
+ // this could run two scanlines behind the above code, so it won't
+ // intefere with filtering but will still be in the cache.
+ if (depth < 8) {
+ for (j=0; j < y; ++j) {
+ stbi_uc *cur = a->out + stride*j;
+ stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes;
+ // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit
+ // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop
+ stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range
+
+ // note that the final byte might overshoot and write more data than desired.
+ // we can allocate enough data that this never writes out of memory, but it
+ // could also overwrite the next scanline. can it overwrite non-empty data
+ // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel.
+ // so we need to explicitly clamp the final ones
+
+ if (depth == 4) {
+ for (k=x*img_n; k >= 2; k-=2, ++in) {
+ *cur++ = scale * ((*in >> 4) );
+ *cur++ = scale * ((*in ) & 0x0f);
+ }
+ if (k > 0) *cur++ = scale * ((*in >> 4) );
+ } else if (depth == 2) {
+ for (k=x*img_n; k >= 4; k-=4, ++in) {
+ *cur++ = scale * ((*in >> 6) );
+ *cur++ = scale * ((*in >> 4) & 0x03);
+ *cur++ = scale * ((*in >> 2) & 0x03);
+ *cur++ = scale * ((*in ) & 0x03);
+ }
+ if (k > 0) *cur++ = scale * ((*in >> 6) );
+ if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03);
+ if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03);
+ } else if (depth == 1) {
+ for (k=x*img_n; k >= 8; k-=8, ++in) {
+ *cur++ = scale * ((*in >> 7) );
+ *cur++ = scale * ((*in >> 6) & 0x01);
+ *cur++ = scale * ((*in >> 5) & 0x01);
+ *cur++ = scale * ((*in >> 4) & 0x01);
+ *cur++ = scale * ((*in >> 3) & 0x01);
+ *cur++ = scale * ((*in >> 2) & 0x01);
+ *cur++ = scale * ((*in >> 1) & 0x01);
+ *cur++ = scale * ((*in ) & 0x01);
+ }
+ if (k > 0) *cur++ = scale * ((*in >> 7) );
+ if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01);
+ if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01);
+ if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01);
+ if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01);
+ if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01);
+ if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01);
+ }
+ if (img_n != out_n) {
+ int q;
+ // insert alpha = 255
+ cur = a->out + stride*j;
+ if (img_n == 1) {
+ for (q=x-1; q >= 0; --q) {
+ cur[q*2+1] = 255;
+ cur[q*2+0] = cur[q];
+ }
+ } else {
+ STBI_ASSERT(img_n == 3);
+ for (q=x-1; q >= 0; --q) {
+ cur[q*4+3] = 255;
+ cur[q*4+2] = cur[q*3+2];
+ cur[q*4+1] = cur[q*3+1];
+ cur[q*4+0] = cur[q*3+0];
+ }
+ }
+ }
+ }
+ } else if (depth == 16) {
+ // force the image data from big-endian to platform-native.
+ // this is done in a separate pass due to the decoding relying
+ // on the data being untouched, but could probably be done
+ // per-line during decode if care is taken.
+ stbi_uc *cur = a->out;
+ stbi__uint16 *cur16 = (stbi__uint16*)cur;
+
+ for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) {
+ *cur16 = (cur[0] << 8) | cur[1];
+ }
+ }
+
+ return 1;
+}
+
+static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced)
+{
+ int bytes = (depth == 16 ? 2 : 1);
+ int out_bytes = out_n * bytes;
+ stbi_uc *final;
+ int p;
+ if (!interlaced)
+ return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color);
+
+ // de-interlacing
+ final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0);
+ for (p=0; p < 7; ++p) {
+ int xorig[] = { 0,4,0,2,0,1,0 };
+ int yorig[] = { 0,0,4,0,2,0,1 };
+ int xspc[] = { 8,8,4,4,2,2,1 };
+ int yspc[] = { 8,8,8,4,4,2,2 };
+ int i,j,x,y;
+ // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1
+ x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p];
+ y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p];
+ if (x && y) {
+ stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y;
+ if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) {
+ STBI_FREE(final);
+ return 0;
+ }
+ for (j=0; j < y; ++j) {
+ for (i=0; i < x; ++i) {
+ int out_y = j*yspc[p]+yorig[p];
+ int out_x = i*xspc[p]+xorig[p];
+ memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes,
+ a->out + (j*x+i)*out_bytes, out_bytes);
+ }
+ }
+ STBI_FREE(a->out);
+ a->out = NULL;
+ image_data += img_len;
+ image_data_len -= img_len;
+ }
+ }
+ a->out = final;
+
+ return 1;
+}
+
+static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n)
+{
+ stbi__context *s = z->s;
+ stbi__uint32 i, pixel_count = s->img_x * s->img_y;
+ stbi_uc *p = z->out;
+
+ // compute color-based transparency, assuming we've
+ // already got 255 as the alpha value in the output
+ STBI_ASSERT(out_n == 2 || out_n == 4);
+
+ if (out_n == 2) {
+ for (i=0; i < pixel_count; ++i) {
+ p[1] = (p[0] == tc[0] ? 0 : 255);
+ p += 2;
+ }
+ } else {
+ for (i=0; i < pixel_count; ++i) {
+ if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2])
+ p[3] = 0;
+ p += 4;
+ }
+ }
+ return 1;
+}
+
+static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n)
+{
+ stbi__context *s = z->s;
+ stbi__uint32 i, pixel_count = s->img_x * s->img_y;
+ stbi__uint16 *p = (stbi__uint16*) z->out;
+
+ // compute color-based transparency, assuming we've
+ // already got 65535 as the alpha value in the output
+ STBI_ASSERT(out_n == 2 || out_n == 4);
+
+ if (out_n == 2) {
+ for (i = 0; i < pixel_count; ++i) {
+ p[1] = (p[0] == tc[0] ? 0 : 65535);
+ p += 2;
+ }
+ } else {
+ for (i = 0; i < pixel_count; ++i) {
+ if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2])
+ p[3] = 0;
+ p += 4;
+ }
+ }
+ return 1;
+}
+
+static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n)
+{
+ stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y;
+ stbi_uc *p, *temp_out, *orig = a->out;
+
+ p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0);
+ if (p == NULL) return stbi__err("outofmem", "Out of memory");
+
+ // between here and free(out) below, exitting would leak
+ temp_out = p;
+
+ if (pal_img_n == 3) {
+ for (i=0; i < pixel_count; ++i) {
+ int n = orig[i]*4;
+ p[0] = palette[n ];
+ p[1] = palette[n+1];
+ p[2] = palette[n+2];
+ p += 3;
+ }
+ } else {
+ for (i=0; i < pixel_count; ++i) {
+ int n = orig[i]*4;
+ p[0] = palette[n ];
+ p[1] = palette[n+1];
+ p[2] = palette[n+2];
+ p[3] = palette[n+3];
+ p += 4;
+ }
+ }
+ STBI_FREE(a->out);
+ a->out = temp_out;
+
+ STBI_NOTUSED(len);
+
+ return 1;
+}
+
+static int stbi__unpremultiply_on_load = 0;
+static int stbi__de_iphone_flag = 0;
+
+STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply)
+{
+ stbi__unpremultiply_on_load = flag_true_if_should_unpremultiply;
+}
+
+STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert)
+{
+ stbi__de_iphone_flag = flag_true_if_should_convert;
+}
+
+static void stbi__de_iphone(stbi__png *z)
+{
+ stbi__context *s = z->s;
+ stbi__uint32 i, pixel_count = s->img_x * s->img_y;
+ stbi_uc *p = z->out;
+
+ if (s->img_out_n == 3) { // convert bgr to rgb
+ for (i=0; i < pixel_count; ++i) {
+ stbi_uc t = p[0];
+ p[0] = p[2];
+ p[2] = t;
+ p += 3;
+ }
+ } else {
+ STBI_ASSERT(s->img_out_n == 4);
+ if (stbi__unpremultiply_on_load) {
+ // convert bgr to rgb and unpremultiply
+ for (i=0; i < pixel_count; ++i) {
+ stbi_uc a = p[3];
+ stbi_uc t = p[0];
+ if (a) {
+ stbi_uc half = a / 2;
+ p[0] = (p[2] * 255 + half) / a;
+ p[1] = (p[1] * 255 + half) / a;
+ p[2] = ( t * 255 + half) / a;
+ } else {
+ p[0] = p[2];
+ p[2] = t;
+ }
+ p += 4;
+ }
+ } else {
+ // convert bgr to rgb
+ for (i=0; i < pixel_count; ++i) {
+ stbi_uc t = p[0];
+ p[0] = p[2];
+ p[2] = t;
+ p += 4;
+ }
+ }
+ }
+}
+
+#define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d))
+
+static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp)
+{
+ stbi_uc palette[1024], pal_img_n=0;
+ stbi_uc has_trans=0, tc[3];
+ stbi__uint16 tc16[3];
+ stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0;
+ int first=1,k,interlace=0, color=0, is_iphone=0;
+ stbi__context *s = z->s;
+
+ z->expanded = NULL;
+ z->idata = NULL;
+ z->out = NULL;
+
+ if (!stbi__check_png_header(s)) return 0;
+
+ if (scan == STBI__SCAN_type) return 1;
+
+ for (;;) {
+ stbi__pngchunk c = stbi__get_chunk_header(s);
+ switch (c.type) {
+ case STBI__PNG_TYPE('C','g','B','I'):
+ is_iphone = 1;
+ stbi__skip(s, c.length);
+ break;
+ case STBI__PNG_TYPE('I','H','D','R'): {
+ int comp,filter;
+ if (!first) return stbi__err("multiple IHDR","Corrupt PNG");
+ first = 0;
+ if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG");
+ s->img_x = stbi__get32be(s); if (s->img_x > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)");
+ s->img_y = stbi__get32be(s); if (s->img_y > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)");
+ z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only");
+ color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG");
+ if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG");
+ if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG");
+ comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG");
+ filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG");
+ interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG");
+ if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG");
+ if (!pal_img_n) {
+ s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0);
+ if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode");
+ if (scan == STBI__SCAN_header) return 1;
+ } else {
+ // if paletted, then pal_n is our final components, and
+ // img_n is # components to decompress/filter.
+ s->img_n = 1;
+ if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG");
+ // if SCAN_header, have to scan to see if we have a tRNS
+ }
+ break;
+ }
+
+ case STBI__PNG_TYPE('P','L','T','E'): {
+ if (first) return stbi__err("first not IHDR", "Corrupt PNG");
+ if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG");
+ pal_len = c.length / 3;
+ if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG");
+ for (i=0; i < pal_len; ++i) {
+ palette[i*4+0] = stbi__get8(s);
+ palette[i*4+1] = stbi__get8(s);
+ palette[i*4+2] = stbi__get8(s);
+ palette[i*4+3] = 255;
+ }
+ break;
+ }
+
+ case STBI__PNG_TYPE('t','R','N','S'): {
+ if (first) return stbi__err("first not IHDR", "Corrupt PNG");
+ if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG");
+ if (pal_img_n) {
+ if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; }
+ if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG");
+ if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG");
+ pal_img_n = 4;
+ for (i=0; i < c.length; ++i)
+ palette[i*4+3] = stbi__get8(s);
+ } else {
+ if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG");
+ if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG");
+ has_trans = 1;
+ if (z->depth == 16) {
+ for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is
+ } else {
+ for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger
+ }
+ }
+ break;
+ }
+
+ case STBI__PNG_TYPE('I','D','A','T'): {
+ if (first) return stbi__err("first not IHDR", "Corrupt PNG");
+ if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG");
+ if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; }
+ if ((int)(ioff + c.length) < (int)ioff) return 0;
+ if (ioff + c.length > idata_limit) {
+ stbi__uint32 idata_limit_old = idata_limit;
+ stbi_uc *p;
+ if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096;
+ while (ioff + c.length > idata_limit)
+ idata_limit *= 2;
+ STBI_NOTUSED(idata_limit_old);
+ p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory");
+ z->idata = p;
+ }
+ if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG");
+ ioff += c.length;
+ break;
+ }
+
+ case STBI__PNG_TYPE('I','E','N','D'): {
+ stbi__uint32 raw_len, bpl;
+ if (first) return stbi__err("first not IHDR", "Corrupt PNG");
+ if (scan != STBI__SCAN_load) return 1;
+ if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG");
+ // initial guess for decoded data size to avoid unnecessary reallocs
+ bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component
+ raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */;
+ z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone);
+ if (z->expanded == NULL) return 0; // zlib should set error
+ STBI_FREE(z->idata); z->idata = NULL;
+ if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans)
+ s->img_out_n = s->img_n+1;
+ else
+ s->img_out_n = s->img_n;
+ if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0;
+ if (has_trans) {
+ if (z->depth == 16) {
+ if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0;
+ } else {
+ if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0;
+ }
+ }
+ if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2)
+ stbi__de_iphone(z);
+ if (pal_img_n) {
+ // pal_img_n == 3 or 4
+ s->img_n = pal_img_n; // record the actual colors we had
+ s->img_out_n = pal_img_n;
+ if (req_comp >= 3) s->img_out_n = req_comp;
+ if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n))
+ return 0;
+ } else if (has_trans) {
+ // non-paletted image with tRNS -> source image has (constant) alpha
+ ++s->img_n;
+ }
+ STBI_FREE(z->expanded); z->expanded = NULL;
+ return 1;
+ }
+
+ default:
+ // if critical, fail
+ if (first) return stbi__err("first not IHDR", "Corrupt PNG");
+ if ((c.type & (1 << 29)) == 0) {
+ #ifndef STBI_NO_FAILURE_STRINGS
+ // not threadsafe
+ static char invalid_chunk[] = "XXXX PNG chunk not known";
+ invalid_chunk[0] = STBI__BYTECAST(c.type >> 24);
+ invalid_chunk[1] = STBI__BYTECAST(c.type >> 16);
+ invalid_chunk[2] = STBI__BYTECAST(c.type >> 8);
+ invalid_chunk[3] = STBI__BYTECAST(c.type >> 0);
+ #endif
+ return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type");
+ }
+ stbi__skip(s, c.length);
+ break;
+ }
+ // end of PNG chunk, read and skip CRC
+ stbi__get32be(s);
+ }
+}
+
+static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri)
+{
+ void *result=NULL;
+ if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error");
+ if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) {
+ if (p->depth < 8)
+ ri->bits_per_channel = 8;
+ else
+ ri->bits_per_channel = p->depth;
+ result = p->out;
+ p->out = NULL;
+ if (req_comp && req_comp != p->s->img_out_n) {
+ if (ri->bits_per_channel == 8)
+ result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y);
+ else
+ result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y);
+ p->s->img_out_n = req_comp;
+ if (result == NULL) return result;
+ }
+ *x = p->s->img_x;
+ *y = p->s->img_y;
+ if (n) *n = p->s->img_n;
+ }
+ STBI_FREE(p->out); p->out = NULL;
+ STBI_FREE(p->expanded); p->expanded = NULL;
+ STBI_FREE(p->idata); p->idata = NULL;
+
+ return result;
+}
+
+static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
+{
+ stbi__png p;
+ p.s = s;
+ return stbi__do_png(&p, x,y,comp,req_comp, ri);
+}
+
+static int stbi__png_test(stbi__context *s)
+{
+ int r;
+ r = stbi__check_png_header(s);
+ stbi__rewind(s);
+ return r;
+}
+
+static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp)
+{
+ if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) {
+ stbi__rewind( p->s );
+ return 0;
+ }
+ if (x) *x = p->s->img_x;
+ if (y) *y = p->s->img_y;
+ if (comp) *comp = p->s->img_n;
+ return 1;
+}
+
+static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ stbi__png p;
+ p.s = s;
+ return stbi__png_info_raw(&p, x, y, comp);
+}
+
+static int stbi__png_is16(stbi__context *s)
+{
+ stbi__png p;
+ p.s = s;
+ if (!stbi__png_info_raw(&p, NULL, NULL, NULL))
+ return 0;
+ if (p.depth != 16) {
+ stbi__rewind(p.s);
+ return 0;
+ }
+ return 1;
+}
+#endif
+
+// Microsoft/Windows BMP image
+
+#ifndef STBI_NO_BMP
+static int stbi__bmp_test_raw(stbi__context *s)
+{
+ int r;
+ int sz;
+ if (stbi__get8(s) != 'B') return 0;
+ if (stbi__get8(s) != 'M') return 0;
+ stbi__get32le(s); // discard filesize
+ stbi__get16le(s); // discard reserved
+ stbi__get16le(s); // discard reserved
+ stbi__get32le(s); // discard data offset
+ sz = stbi__get32le(s);
+ r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124);
+ return r;
+}
+
+static int stbi__bmp_test(stbi__context *s)
+{
+ int r = stbi__bmp_test_raw(s);
+ stbi__rewind(s);
+ return r;
+}
+
+
+// returns 0..31 for the highest set bit
+static int stbi__high_bit(unsigned int z)
+{
+ int n=0;
+ if (z == 0) return -1;
+ if (z >= 0x10000) n += 16, z >>= 16;
+ if (z >= 0x00100) n += 8, z >>= 8;
+ if (z >= 0x00010) n += 4, z >>= 4;
+ if (z >= 0x00004) n += 2, z >>= 2;
+ if (z >= 0x00002) n += 1, z >>= 1;
+ return n;
+}
+
+static int stbi__bitcount(unsigned int a)
+{
+ a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2
+ a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4
+ a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits
+ a = (a + (a >> 8)); // max 16 per 8 bits
+ a = (a + (a >> 16)); // max 32 per 8 bits
+ return a & 0xff;
+}
+
+// extract an arbitrarily-aligned N-bit value (N=bits)
+// from v, and then make it 8-bits long and fractionally
+// extend it to full full range.
+static int stbi__shiftsigned(int v, int shift, int bits)
+{
+ static unsigned int mul_table[9] = {
+ 0,
+ 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/,
+ 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/,
+ };
+ static unsigned int shift_table[9] = {
+ 0, 0,0,1,0,2,4,6,0,
+ };
+ if (shift < 0)
+ v <<= -shift;
+ else
+ v >>= shift;
+ STBI_ASSERT(v >= 0 && v < 256);
+ v >>= (8-bits);
+ STBI_ASSERT(bits >= 0 && bits <= 8);
+ return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits];
+}
+
+typedef struct
+{
+ int bpp, offset, hsz;
+ unsigned int mr,mg,mb,ma, all_a;
+} stbi__bmp_data;
+
+static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info)
+{
+ int hsz;
+ if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP");
+ stbi__get32le(s); // discard filesize
+ stbi__get16le(s); // discard reserved
+ stbi__get16le(s); // discard reserved
+ info->offset = stbi__get32le(s);
+ info->hsz = hsz = stbi__get32le(s);
+ info->mr = info->mg = info->mb = info->ma = 0;
+
+ if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown");
+ if (hsz == 12) {
+ s->img_x = stbi__get16le(s);
+ s->img_y = stbi__get16le(s);
+ } else {
+ s->img_x = stbi__get32le(s);
+ s->img_y = stbi__get32le(s);
+ }
+ if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP");
+ info->bpp = stbi__get16le(s);
+ if (hsz != 12) {
+ int compress = stbi__get32le(s);
+ if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE");
+ stbi__get32le(s); // discard sizeof
+ stbi__get32le(s); // discard hres
+ stbi__get32le(s); // discard vres
+ stbi__get32le(s); // discard colorsused
+ stbi__get32le(s); // discard max important
+ if (hsz == 40 || hsz == 56) {
+ if (hsz == 56) {
+ stbi__get32le(s);
+ stbi__get32le(s);
+ stbi__get32le(s);
+ stbi__get32le(s);
+ }
+ if (info->bpp == 16 || info->bpp == 32) {
+ if (compress == 0) {
+ if (info->bpp == 32) {
+ info->mr = 0xffu << 16;
+ info->mg = 0xffu << 8;
+ info->mb = 0xffu << 0;
+ info->ma = 0xffu << 24;
+ info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0
+ } else {
+ info->mr = 31u << 10;
+ info->mg = 31u << 5;
+ info->mb = 31u << 0;
+ }
+ } else if (compress == 3) {
+ info->mr = stbi__get32le(s);
+ info->mg = stbi__get32le(s);
+ info->mb = stbi__get32le(s);
+ // not documented, but generated by photoshop and handled by mspaint
+ if (info->mr == info->mg && info->mg == info->mb) {
+ // ?!?!?
+ return stbi__errpuc("bad BMP", "bad BMP");
+ }
+ } else
+ return stbi__errpuc("bad BMP", "bad BMP");
+ }
+ } else {
+ int i;
+ if (hsz != 108 && hsz != 124)
+ return stbi__errpuc("bad BMP", "bad BMP");
+ info->mr = stbi__get32le(s);
+ info->mg = stbi__get32le(s);
+ info->mb = stbi__get32le(s);
+ info->ma = stbi__get32le(s);
+ stbi__get32le(s); // discard color space
+ for (i=0; i < 12; ++i)
+ stbi__get32le(s); // discard color space parameters
+ if (hsz == 124) {
+ stbi__get32le(s); // discard rendering intent
+ stbi__get32le(s); // discard offset of profile data
+ stbi__get32le(s); // discard size of profile data
+ stbi__get32le(s); // discard reserved
+ }
+ }
+ }
+ return (void *) 1;
+}
+
+
+static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
+{
+ stbi_uc *out;
+ unsigned int mr=0,mg=0,mb=0,ma=0, all_a;
+ stbi_uc pal[256][4];
+ int psize=0,i,j,width;
+ int flip_vertically, pad, target;
+ stbi__bmp_data info;
+ STBI_NOTUSED(ri);
+
+ info.all_a = 255;
+ if (stbi__bmp_parse_header(s, &info) == NULL)
+ return NULL; // error code already set
+
+ flip_vertically = ((int) s->img_y) > 0;
+ s->img_y = abs((int) s->img_y);
+
+ mr = info.mr;
+ mg = info.mg;
+ mb = info.mb;
+ ma = info.ma;
+ all_a = info.all_a;
+
+ if (info.hsz == 12) {
+ if (info.bpp < 24)
+ psize = (info.offset - 14 - 24) / 3;
+ } else {
+ if (info.bpp < 16)
+ psize = (info.offset - 14 - info.hsz) >> 2;
+ }
+
+ s->img_n = ma ? 4 : 3;
+ if (req_comp && req_comp >= 3) // we can directly decode 3 or 4
+ target = req_comp;
+ else
+ target = s->img_n; // if they want monochrome, we'll post-convert
+
+ // sanity-check size
+ if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0))
+ return stbi__errpuc("too large", "Corrupt BMP");
+
+ out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0);
+ if (!out) return stbi__errpuc("outofmem", "Out of memory");
+ if (info.bpp < 16) {
+ int z=0;
+ if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); }
+ for (i=0; i < psize; ++i) {
+ pal[i][2] = stbi__get8(s);
+ pal[i][1] = stbi__get8(s);
+ pal[i][0] = stbi__get8(s);
+ if (info.hsz != 12) stbi__get8(s);
+ pal[i][3] = 255;
+ }
+ stbi__skip(s, info.offset - 14 - info.hsz - psize * (info.hsz == 12 ? 3 : 4));
+ if (info.bpp == 1) width = (s->img_x + 7) >> 3;
+ else if (info.bpp == 4) width = (s->img_x + 1) >> 1;
+ else if (info.bpp == 8) width = s->img_x;
+ else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); }
+ pad = (-width)&3;
+ if (info.bpp == 1) {
+ for (j=0; j < (int) s->img_y; ++j) {
+ int bit_offset = 7, v = stbi__get8(s);
+ for (i=0; i < (int) s->img_x; ++i) {
+ int color = (v>>bit_offset)&0x1;
+ out[z++] = pal[color][0];
+ out[z++] = pal[color][1];
+ out[z++] = pal[color][2];
+ if((--bit_offset) < 0) {
+ bit_offset = 7;
+ v = stbi__get8(s);
+ }
+ }
+ stbi__skip(s, pad);
+ }
+ } else {
+ for (j=0; j < (int) s->img_y; ++j) {
+ for (i=0; i < (int) s->img_x; i += 2) {
+ int v=stbi__get8(s),v2=0;
+ if (info.bpp == 4) {
+ v2 = v & 15;
+ v >>= 4;
+ }
+ out[z++] = pal[v][0];
+ out[z++] = pal[v][1];
+ out[z++] = pal[v][2];
+ if (target == 4) out[z++] = 255;
+ if (i+1 == (int) s->img_x) break;
+ v = (info.bpp == 8) ? stbi__get8(s) : v2;
+ out[z++] = pal[v][0];
+ out[z++] = pal[v][1];
+ out[z++] = pal[v][2];
+ if (target == 4) out[z++] = 255;
+ }
+ stbi__skip(s, pad);
+ }
+ }
+ } else {
+ int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0;
+ int z = 0;
+ int easy=0;
+ stbi__skip(s, info.offset - 14 - info.hsz);
+ if (info.bpp == 24) width = 3 * s->img_x;
+ else if (info.bpp == 16) width = 2*s->img_x;
+ else /* bpp = 32 and pad = 0 */ width=0;
+ pad = (-width) & 3;
+ if (info.bpp == 24) {
+ easy = 1;
+ } else if (info.bpp == 32) {
+ if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000)
+ easy = 2;
+ }
+ if (!easy) {
+ if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); }
+ // right shift amt to put high bit in position #7
+ rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr);
+ gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg);
+ bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb);
+ ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma);
+ }
+ for (j=0; j < (int) s->img_y; ++j) {
+ if (easy) {
+ for (i=0; i < (int) s->img_x; ++i) {
+ unsigned char a;
+ out[z+2] = stbi__get8(s);
+ out[z+1] = stbi__get8(s);
+ out[z+0] = stbi__get8(s);
+ z += 3;
+ a = (easy == 2 ? stbi__get8(s) : 255);
+ all_a |= a;
+ if (target == 4) out[z++] = a;
+ }
+ } else {
+ int bpp = info.bpp;
+ for (i=0; i < (int) s->img_x; ++i) {
+ stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s));
+ unsigned int a;
+ out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount));
+ out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount));
+ out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount));
+ a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255);
+ all_a |= a;
+ if (target == 4) out[z++] = STBI__BYTECAST(a);
+ }
+ }
+ stbi__skip(s, pad);
+ }
+ }
+
+ // if alpha channel is all 0s, replace with all 255s
+ if (target == 4 && all_a == 0)
+ for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4)
+ out[i] = 255;
+
+ if (flip_vertically) {
+ stbi_uc t;
+ for (j=0; j < (int) s->img_y>>1; ++j) {
+ stbi_uc *p1 = out + j *s->img_x*target;
+ stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target;
+ for (i=0; i < (int) s->img_x*target; ++i) {
+ t = p1[i], p1[i] = p2[i], p2[i] = t;
+ }
+ }
+ }
+
+ if (req_comp && req_comp != target) {
+ out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y);
+ if (out == NULL) return out; // stbi__convert_format frees input on failure
+ }
+
+ *x = s->img_x;
+ *y = s->img_y;
+ if (comp) *comp = s->img_n;
+ return out;
+}
+#endif
+
+// Targa Truevision - TGA
+// by Jonathan Dummer
+#ifndef STBI_NO_TGA
+// returns STBI_rgb or whatever, 0 on error
+static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16)
+{
+ // only RGB or RGBA (incl. 16bit) or grey allowed
+ if (is_rgb16) *is_rgb16 = 0;
+ switch(bits_per_pixel) {
+ case 8: return STBI_grey;
+ case 16: if(is_grey) return STBI_grey_alpha;
+ // fallthrough
+ case 15: if(is_rgb16) *is_rgb16 = 1;
+ return STBI_rgb;
+ case 24: // fallthrough
+ case 32: return bits_per_pixel/8;
+ default: return 0;
+ }
+}
+
+static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp;
+ int sz, tga_colormap_type;
+ stbi__get8(s); // discard Offset
+ tga_colormap_type = stbi__get8(s); // colormap type
+ if( tga_colormap_type > 1 ) {
+ stbi__rewind(s);
+ return 0; // only RGB or indexed allowed
+ }
+ tga_image_type = stbi__get8(s); // image type
+ if ( tga_colormap_type == 1 ) { // colormapped (paletted) image
+ if (tga_image_type != 1 && tga_image_type != 9) {
+ stbi__rewind(s);
+ return 0;
+ }
+ stbi__skip(s,4); // skip index of first colormap entry and number of entries
+ sz = stbi__get8(s); // check bits per palette color entry
+ if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) {
+ stbi__rewind(s);
+ return 0;
+ }
+ stbi__skip(s,4); // skip image x and y origin
+ tga_colormap_bpp = sz;
+ } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE
+ if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) {
+ stbi__rewind(s);
+ return 0; // only RGB or grey allowed, +/- RLE
+ }
+ stbi__skip(s,9); // skip colormap specification and image x/y origin
+ tga_colormap_bpp = 0;
+ }
+ tga_w = stbi__get16le(s);
+ if( tga_w < 1 ) {
+ stbi__rewind(s);
+ return 0; // test width
+ }
+ tga_h = stbi__get16le(s);
+ if( tga_h < 1 ) {
+ stbi__rewind(s);
+ return 0; // test height
+ }
+ tga_bits_per_pixel = stbi__get8(s); // bits per pixel
+ stbi__get8(s); // ignore alpha bits
+ if (tga_colormap_bpp != 0) {
+ if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) {
+ // when using a colormap, tga_bits_per_pixel is the size of the indexes
+ // I don't think anything but 8 or 16bit indexes makes sense
+ stbi__rewind(s);
+ return 0;
+ }
+ tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL);
+ } else {
+ tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL);
+ }
+ if(!tga_comp) {
+ stbi__rewind(s);
+ return 0;
+ }
+ if (x) *x = tga_w;
+ if (y) *y = tga_h;
+ if (comp) *comp = tga_comp;
+ return 1; // seems to have passed everything
+}
+
+static int stbi__tga_test(stbi__context *s)
+{
+ int res = 0;
+ int sz, tga_color_type;
+ stbi__get8(s); // discard Offset
+ tga_color_type = stbi__get8(s); // color type
+ if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed
+ sz = stbi__get8(s); // image type
+ if ( tga_color_type == 1 ) { // colormapped (paletted) image
+ if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9
+ stbi__skip(s,4); // skip index of first colormap entry and number of entries
+ sz = stbi__get8(s); // check bits per palette color entry
+ if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd;
+ stbi__skip(s,4); // skip image x and y origin
+ } else { // "normal" image w/o colormap
+ if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE
+ stbi__skip(s,9); // skip colormap specification and image x/y origin
+ }
+ if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width
+ if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height
+ sz = stbi__get8(s); // bits per pixel
+ if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index
+ if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd;
+
+ res = 1; // if we got this far, everything's good and we can return 1 instead of 0
+
+errorEnd:
+ stbi__rewind(s);
+ return res;
+}
+
+// read 16bit value and convert to 24bit RGB
+static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out)
+{
+ stbi__uint16 px = (stbi__uint16)stbi__get16le(s);
+ stbi__uint16 fiveBitMask = 31;
+ // we have 3 channels with 5bits each
+ int r = (px >> 10) & fiveBitMask;
+ int g = (px >> 5) & fiveBitMask;
+ int b = px & fiveBitMask;
+ // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later
+ out[0] = (stbi_uc)((r * 255)/31);
+ out[1] = (stbi_uc)((g * 255)/31);
+ out[2] = (stbi_uc)((b * 255)/31);
+
+ // some people claim that the most significant bit might be used for alpha
+ // (possibly if an alpha-bit is set in the "image descriptor byte")
+ // but that only made 16bit test images completely translucent..
+ // so let's treat all 15 and 16bit TGAs as RGB with no alpha.
+}
+
+static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
+{
+ // read in the TGA header stuff
+ int tga_offset = stbi__get8(s);
+ int tga_indexed = stbi__get8(s);
+ int tga_image_type = stbi__get8(s);
+ int tga_is_RLE = 0;
+ int tga_palette_start = stbi__get16le(s);
+ int tga_palette_len = stbi__get16le(s);
+ int tga_palette_bits = stbi__get8(s);
+ int tga_x_origin = stbi__get16le(s);
+ int tga_y_origin = stbi__get16le(s);
+ int tga_width = stbi__get16le(s);
+ int tga_height = stbi__get16le(s);
+ int tga_bits_per_pixel = stbi__get8(s);
+ int tga_comp, tga_rgb16=0;
+ int tga_inverted = stbi__get8(s);
+ // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?)
+ // image data
+ unsigned char *tga_data;
+ unsigned char *tga_palette = NULL;
+ int i, j;
+ unsigned char raw_data[4] = {0};
+ int RLE_count = 0;
+ int RLE_repeating = 0;
+ int read_next_pixel = 1;
+ STBI_NOTUSED(ri);
+
+ // do a tiny bit of precessing
+ if ( tga_image_type >= 8 )
+ {
+ tga_image_type -= 8;
+ tga_is_RLE = 1;
+ }
+ tga_inverted = 1 - ((tga_inverted >> 5) & 1);
+
+ // If I'm paletted, then I'll use the number of bits from the palette
+ if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16);
+ else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16);
+
+ if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency
+ return stbi__errpuc("bad format", "Can't find out TGA pixelformat");
+
+ // tga info
+ *x = tga_width;
+ *y = tga_height;
+ if (comp) *comp = tga_comp;
+
+ if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0))
+ return stbi__errpuc("too large", "Corrupt TGA");
+
+ tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0);
+ if (!tga_data) return stbi__errpuc("outofmem", "Out of memory");
+
+ // skip to the data's starting position (offset usually = 0)
+ stbi__skip(s, tga_offset );
+
+ if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) {
+ for (i=0; i < tga_height; ++i) {
+ int row = tga_inverted ? tga_height -i - 1 : i;
+ stbi_uc *tga_row = tga_data + row*tga_width*tga_comp;
+ stbi__getn(s, tga_row, tga_width * tga_comp);
+ }
+ } else {
+ // do I need to load a palette?
+ if ( tga_indexed)
+ {
+ // any data to skip? (offset usually = 0)
+ stbi__skip(s, tga_palette_start );
+ // load the palette
+ tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0);
+ if (!tga_palette) {
+ STBI_FREE(tga_data);
+ return stbi__errpuc("outofmem", "Out of memory");
+ }
+ if (tga_rgb16) {
+ stbi_uc *pal_entry = tga_palette;
+ STBI_ASSERT(tga_comp == STBI_rgb);
+ for (i=0; i < tga_palette_len; ++i) {
+ stbi__tga_read_rgb16(s, pal_entry);
+ pal_entry += tga_comp;
+ }
+ } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) {
+ STBI_FREE(tga_data);
+ STBI_FREE(tga_palette);
+ return stbi__errpuc("bad palette", "Corrupt TGA");
+ }
+ }
+ // load the data
+ for (i=0; i < tga_width * tga_height; ++i)
+ {
+ // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk?
+ if ( tga_is_RLE )
+ {
+ if ( RLE_count == 0 )
+ {
+ // yep, get the next byte as a RLE command
+ int RLE_cmd = stbi__get8(s);
+ RLE_count = 1 + (RLE_cmd & 127);
+ RLE_repeating = RLE_cmd >> 7;
+ read_next_pixel = 1;
+ } else if ( !RLE_repeating )
+ {
+ read_next_pixel = 1;
+ }
+ } else
+ {
+ read_next_pixel = 1;
+ }
+ // OK, if I need to read a pixel, do it now
+ if ( read_next_pixel )
+ {
+ // load however much data we did have
+ if ( tga_indexed )
+ {
+ // read in index, then perform the lookup
+ int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s);
+ if ( pal_idx >= tga_palette_len ) {
+ // invalid index
+ pal_idx = 0;
+ }
+ pal_idx *= tga_comp;
+ for (j = 0; j < tga_comp; ++j) {
+ raw_data[j] = tga_palette[pal_idx+j];
+ }
+ } else if(tga_rgb16) {
+ STBI_ASSERT(tga_comp == STBI_rgb);
+ stbi__tga_read_rgb16(s, raw_data);
+ } else {
+ // read in the data raw
+ for (j = 0; j < tga_comp; ++j) {
+ raw_data[j] = stbi__get8(s);
+ }
+ }
+ // clear the reading flag for the next pixel
+ read_next_pixel = 0;
+ } // end of reading a pixel
+
+ // copy data
+ for (j = 0; j < tga_comp; ++j)
+ tga_data[i*tga_comp+j] = raw_data[j];
+
+ // in case we're in RLE mode, keep counting down
+ --RLE_count;
+ }
+ // do I need to invert the image?
+ if ( tga_inverted )
+ {
+ for (j = 0; j*2 < tga_height; ++j)
+ {
+ int index1 = j * tga_width * tga_comp;
+ int index2 = (tga_height - 1 - j) * tga_width * tga_comp;
+ for (i = tga_width * tga_comp; i > 0; --i)
+ {
+ unsigned char temp = tga_data[index1];
+ tga_data[index1] = tga_data[index2];
+ tga_data[index2] = temp;
+ ++index1;
+ ++index2;
+ }
+ }
+ }
+ // clear my palette, if I had one
+ if ( tga_palette != NULL )
+ {
+ STBI_FREE( tga_palette );
+ }
+ }
+
+ // swap RGB - if the source data was RGB16, it already is in the right order
+ if (tga_comp >= 3 && !tga_rgb16)
+ {
+ unsigned char* tga_pixel = tga_data;
+ for (i=0; i < tga_width * tga_height; ++i)
+ {
+ unsigned char temp = tga_pixel[0];
+ tga_pixel[0] = tga_pixel[2];
+ tga_pixel[2] = temp;
+ tga_pixel += tga_comp;
+ }
+ }
+
+ // convert to target component count
+ if (req_comp && req_comp != tga_comp)
+ tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height);
+
+ // the things I do to get rid of an error message, and yet keep
+ // Microsoft's C compilers happy... [8^(
+ tga_palette_start = tga_palette_len = tga_palette_bits =
+ tga_x_origin = tga_y_origin = 0;
+ // OK, done
+ return tga_data;
+}
+#endif
+
+// *************************************************************************************************
+// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB
+
+#ifndef STBI_NO_PSD
+static int stbi__psd_test(stbi__context *s)
+{
+ int r = (stbi__get32be(s) == 0x38425053);
+ stbi__rewind(s);
+ return r;
+}
+
+static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount)
+{
+ int count, nleft, len;
+
+ count = 0;
+ while ((nleft = pixelCount - count) > 0) {
+ len = stbi__get8(s);
+ if (len == 128) {
+ // No-op.
+ } else if (len < 128) {
+ // Copy next len+1 bytes literally.
+ len++;
+ if (len > nleft) return 0; // corrupt data
+ count += len;
+ while (len) {
+ *p = stbi__get8(s);
+ p += 4;
+ len--;
+ }
+ } else if (len > 128) {
+ stbi_uc val;
+ // Next -len+1 bytes in the dest are replicated from next source byte.
+ // (Interpret len as a negative 8-bit int.)
+ len = 257 - len;
+ if (len > nleft) return 0; // corrupt data
+ val = stbi__get8(s);
+ count += len;
+ while (len) {
+ *p = val;
+ p += 4;
+ len--;
+ }
+ }
+ }
+
+ return 1;
+}
+
+static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc)
+{
+ int pixelCount;
+ int channelCount, compression;
+ int channel, i;
+ int bitdepth;
+ int w,h;
+ stbi_uc *out;
+ STBI_NOTUSED(ri);
+
+ // Check identifier
+ if (stbi__get32be(s) != 0x38425053) // "8BPS"
+ return stbi__errpuc("not PSD", "Corrupt PSD image");
+
+ // Check file type version.
+ if (stbi__get16be(s) != 1)
+ return stbi__errpuc("wrong version", "Unsupported version of PSD image");
+
+ // Skip 6 reserved bytes.
+ stbi__skip(s, 6 );
+
+ // Read the number of channels (R, G, B, A, etc).
+ channelCount = stbi__get16be(s);
+ if (channelCount < 0 || channelCount > 16)
+ return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image");
+
+ // Read the rows and columns of the image.
+ h = stbi__get32be(s);
+ w = stbi__get32be(s);
+
+ // Make sure the depth is 8 bits.
+ bitdepth = stbi__get16be(s);
+ if (bitdepth != 8 && bitdepth != 16)
+ return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit");
+
+ // Make sure the color mode is RGB.
+ // Valid options are:
+ // 0: Bitmap
+ // 1: Grayscale
+ // 2: Indexed color
+ // 3: RGB color
+ // 4: CMYK color
+ // 7: Multichannel
+ // 8: Duotone
+ // 9: Lab color
+ if (stbi__get16be(s) != 3)
+ return stbi__errpuc("wrong color format", "PSD is not in RGB color format");
+
+ // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.)
+ stbi__skip(s,stbi__get32be(s) );
+
+ // Skip the image resources. (resolution, pen tool paths, etc)
+ stbi__skip(s, stbi__get32be(s) );
+
+ // Skip the reserved data.
+ stbi__skip(s, stbi__get32be(s) );
+
+ // Find out if the data is compressed.
+ // Known values:
+ // 0: no compression
+ // 1: RLE compressed
+ compression = stbi__get16be(s);
+ if (compression > 1)
+ return stbi__errpuc("bad compression", "PSD has an unknown compression format");
+
+ // Check size
+ if (!stbi__mad3sizes_valid(4, w, h, 0))
+ return stbi__errpuc("too large", "Corrupt PSD");
+
+ // Create the destination image.
+
+ if (!compression && bitdepth == 16 && bpc == 16) {
+ out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0);
+ ri->bits_per_channel = 16;
+ } else
+ out = (stbi_uc *) stbi__malloc(4 * w*h);
+
+ if (!out) return stbi__errpuc("outofmem", "Out of memory");
+ pixelCount = w*h;
+
+ // Initialize the data to zero.
+ //memset( out, 0, pixelCount * 4 );
+
+ // Finally, the image data.
+ if (compression) {
+ // RLE as used by .PSD and .TIFF
+ // Loop until you get the number of unpacked bytes you are expecting:
+ // Read the next source byte into n.
+ // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally.
+ // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times.
+ // Else if n is 128, noop.
+ // Endloop
+
+ // The RLE-compressed data is preceeded by a 2-byte data count for each row in the data,
+ // which we're going to just skip.
+ stbi__skip(s, h * channelCount * 2 );
+
+ // Read the RLE data by channel.
+ for (channel = 0; channel < 4; channel++) {
+ stbi_uc *p;
+
+ p = out+channel;
+ if (channel >= channelCount) {
+ // Fill this channel with default data.
+ for (i = 0; i < pixelCount; i++, p += 4)
+ *p = (channel == 3 ? 255 : 0);
+ } else {
+ // Read the RLE data.
+ if (!stbi__psd_decode_rle(s, p, pixelCount)) {
+ STBI_FREE(out);
+ return stbi__errpuc("corrupt", "bad RLE data");
+ }
+ }
+ }
+
+ } else {
+ // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...)
+ // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image.
+
+ // Read the data by channel.
+ for (channel = 0; channel < 4; channel++) {
+ if (channel >= channelCount) {
+ // Fill this channel with default data.
+ if (bitdepth == 16 && bpc == 16) {
+ stbi__uint16 *q = ((stbi__uint16 *) out) + channel;
+ stbi__uint16 val = channel == 3 ? 65535 : 0;
+ for (i = 0; i < pixelCount; i++, q += 4)
+ *q = val;
+ } else {
+ stbi_uc *p = out+channel;
+ stbi_uc val = channel == 3 ? 255 : 0;
+ for (i = 0; i < pixelCount; i++, p += 4)
+ *p = val;
+ }
+ } else {
+ if (ri->bits_per_channel == 16) { // output bpc
+ stbi__uint16 *q = ((stbi__uint16 *) out) + channel;
+ for (i = 0; i < pixelCount; i++, q += 4)
+ *q = (stbi__uint16) stbi__get16be(s);
+ } else {
+ stbi_uc *p = out+channel;
+ if (bitdepth == 16) { // input bpc
+ for (i = 0; i < pixelCount; i++, p += 4)
+ *p = (stbi_uc) (stbi__get16be(s) >> 8);
+ } else {
+ for (i = 0; i < pixelCount; i++, p += 4)
+ *p = stbi__get8(s);
+ }
+ }
+ }
+ }
+ }
+
+ // remove weird white matte from PSD
+ if (channelCount >= 4) {
+ if (ri->bits_per_channel == 16) {
+ for (i=0; i < w*h; ++i) {
+ stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i;
+ if (pixel[3] != 0 && pixel[3] != 65535) {
+ float a = pixel[3] / 65535.0f;
+ float ra = 1.0f / a;
+ float inv_a = 65535.0f * (1 - ra);
+ pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a);
+ pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a);
+ pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a);
+ }
+ }
+ } else {
+ for (i=0; i < w*h; ++i) {
+ unsigned char *pixel = out + 4*i;
+ if (pixel[3] != 0 && pixel[3] != 255) {
+ float a = pixel[3] / 255.0f;
+ float ra = 1.0f / a;
+ float inv_a = 255.0f * (1 - ra);
+ pixel[0] = (unsigned char) (pixel[0]*ra + inv_a);
+ pixel[1] = (unsigned char) (pixel[1]*ra + inv_a);
+ pixel[2] = (unsigned char) (pixel[2]*ra + inv_a);
+ }
+ }
+ }
+ }
+
+ // convert to desired output format
+ if (req_comp && req_comp != 4) {
+ if (ri->bits_per_channel == 16)
+ out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h);
+ else
+ out = stbi__convert_format(out, 4, req_comp, w, h);
+ if (out == NULL) return out; // stbi__convert_format frees input on failure
+ }
+
+ if (comp) *comp = 4;
+ *y = h;
+ *x = w;
+
+ return out;
+}
+#endif
+
+// *************************************************************************************************
+// Softimage PIC loader
+// by Tom Seddon
+//
+// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format
+// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/
+
+#ifndef STBI_NO_PIC
+static int stbi__pic_is4(stbi__context *s,const char *str)
+{
+ int i;
+ for (i=0; i<4; ++i)
+ if (stbi__get8(s) != (stbi_uc)str[i])
+ return 0;
+
+ return 1;
+}
+
+static int stbi__pic_test_core(stbi__context *s)
+{
+ int i;
+
+ if (!stbi__pic_is4(s,"\x53\x80\xF6\x34"))
+ return 0;
+
+ for(i=0;i<84;++i)
+ stbi__get8(s);
+
+ if (!stbi__pic_is4(s,"PICT"))
+ return 0;
+
+ return 1;
+}
+
+typedef struct
+{
+ stbi_uc size,type,channel;
+} stbi__pic_packet;
+
+static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest)
+{
+ int mask=0x80, i;
+
+ for (i=0; i<4; ++i, mask>>=1) {
+ if (channel & mask) {
+ if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short");
+ dest[i]=stbi__get8(s);
+ }
+ }
+
+ return dest;
+}
+
+static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src)
+{
+ int mask=0x80,i;
+
+ for (i=0;i<4; ++i, mask>>=1)
+ if (channel&mask)
+ dest[i]=src[i];
+}
+
+static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result)
+{
+ int act_comp=0,num_packets=0,y,chained;
+ stbi__pic_packet packets[10];
+
+ // this will (should...) cater for even some bizarre stuff like having data
+ // for the same channel in multiple packets.
+ do {
+ stbi__pic_packet *packet;
+
+ if (num_packets==sizeof(packets)/sizeof(packets[0]))
+ return stbi__errpuc("bad format","too many packets");
+
+ packet = &packets[num_packets++];
+
+ chained = stbi__get8(s);
+ packet->size = stbi__get8(s);
+ packet->type = stbi__get8(s);
+ packet->channel = stbi__get8(s);
+
+ act_comp |= packet->channel;
+
+ if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)");
+ if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp");
+ } while (chained);
+
+ *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel?
+
+ for(y=0; y<height; ++y) {
+ int packet_idx;
+
+ for(packet_idx=0; packet_idx < num_packets; ++packet_idx) {
+ stbi__pic_packet *packet = &packets[packet_idx];
+ stbi_uc *dest = result+y*width*4;
+
+ switch (packet->type) {
+ default:
+ return stbi__errpuc("bad format","packet has bad compression type");
+
+ case 0: {//uncompressed
+ int x;
+
+ for(x=0;x<width;++x, dest+=4)
+ if (!stbi__readval(s,packet->channel,dest))
+ return 0;
+ break;
+ }
+
+ case 1://Pure RLE
+ {
+ int left=width, i;
+
+ while (left>0) {
+ stbi_uc count,value[4];
+
+ count=stbi__get8(s);
+ if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)");
+
+ if (count > left)
+ count = (stbi_uc) left;
+
+ if (!stbi__readval(s,packet->channel,value)) return 0;
+
+ for(i=0; i<count; ++i,dest+=4)
+ stbi__copyval(packet->channel,dest,value);
+ left -= count;
+ }
+ }
+ break;
+
+ case 2: {//Mixed RLE
+ int left=width;
+ while (left>0) {
+ int count = stbi__get8(s), i;
+ if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)");
+
+ if (count >= 128) { // Repeated
+ stbi_uc value[4];
+
+ if (count==128)
+ count = stbi__get16be(s);
+ else
+ count -= 127;
+ if (count > left)
+ return stbi__errpuc("bad file","scanline overrun");
+
+ if (!stbi__readval(s,packet->channel,value))
+ return 0;
+
+ for(i=0;i<count;++i, dest += 4)
+ stbi__copyval(packet->channel,dest,value);
+ } else { // Raw
+ ++count;
+ if (count>left) return stbi__errpuc("bad file","scanline overrun");
+
+ for(i=0;i<count;++i, dest+=4)
+ if (!stbi__readval(s,packet->channel,dest))
+ return 0;
+ }
+ left-=count;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ return result;
+}
+
+static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri)
+{
+ stbi_uc *result;
+ int i, x,y, internal_comp;
+ STBI_NOTUSED(ri);
+
+ if (!comp) comp = &internal_comp;
+
+ for (i=0; i<92; ++i)
+ stbi__get8(s);
+
+ x = stbi__get16be(s);
+ y = stbi__get16be(s);
+ if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)");
+ if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode");
+
+ stbi__get32be(s); //skip `ratio'
+ stbi__get16be(s); //skip `fields'
+ stbi__get16be(s); //skip `pad'
+
+ // intermediate buffer is RGBA
+ result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0);
+ memset(result, 0xff, x*y*4);
+
+ if (!stbi__pic_load_core(s,x,y,comp, result)) {
+ STBI_FREE(result);
+ result=0;
+ }
+ *px = x;
+ *py = y;
+ if (req_comp == 0) req_comp = *comp;
+ result=stbi__convert_format(result,4,req_comp,x,y);
+
+ return result;
+}
+
+static int stbi__pic_test(stbi__context *s)
+{
+ int r = stbi__pic_test_core(s);
+ stbi__rewind(s);
+ return r;
+}
+#endif
+
+// *************************************************************************************************
+// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb
+
+#ifndef STBI_NO_GIF
+typedef struct
+{
+ stbi__int16 prefix;
+ stbi_uc first;
+ stbi_uc suffix;
+} stbi__gif_lzw;
+
+typedef struct
+{
+ int w,h;
+ stbi_uc *out; // output buffer (always 4 components)
+ stbi_uc *background; // The current "background" as far as a gif is concerned
+ stbi_uc *history;
+ int flags, bgindex, ratio, transparent, eflags;
+ stbi_uc pal[256][4];
+ stbi_uc lpal[256][4];
+ stbi__gif_lzw codes[8192];
+ stbi_uc *color_table;
+ int parse, step;
+ int lflags;
+ int start_x, start_y;
+ int max_x, max_y;
+ int cur_x, cur_y;
+ int line_size;
+ int delay;
+} stbi__gif;
+
+static int stbi__gif_test_raw(stbi__context *s)
+{
+ int sz;
+ if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0;
+ sz = stbi__get8(s);
+ if (sz != '9' && sz != '7') return 0;
+ if (stbi__get8(s) != 'a') return 0;
+ return 1;
+}
+
+static int stbi__gif_test(stbi__context *s)
+{
+ int r = stbi__gif_test_raw(s);
+ stbi__rewind(s);
+ return r;
+}
+
+static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp)
+{
+ int i;
+ for (i=0; i < num_entries; ++i) {
+ pal[i][2] = stbi__get8(s);
+ pal[i][1] = stbi__get8(s);
+ pal[i][0] = stbi__get8(s);
+ pal[i][3] = transp == i ? 0 : 255;
+ }
+}
+
+static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info)
+{
+ stbi_uc version;
+ if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8')
+ return stbi__err("not GIF", "Corrupt GIF");
+
+ version = stbi__get8(s);
+ if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF");
+ if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF");
+
+ stbi__g_failure_reason = "";
+ g->w = stbi__get16le(s);
+ g->h = stbi__get16le(s);
+ g->flags = stbi__get8(s);
+ g->bgindex = stbi__get8(s);
+ g->ratio = stbi__get8(s);
+ g->transparent = -1;
+
+ if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments
+
+ if (is_info) return 1;
+
+ if (g->flags & 0x80)
+ stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1);
+
+ return 1;
+}
+
+static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp)
+{
+ stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif));
+ if (!stbi__gif_header(s, g, comp, 1)) {
+ STBI_FREE(g);
+ stbi__rewind( s );
+ return 0;
+ }
+ if (x) *x = g->w;
+ if (y) *y = g->h;
+ STBI_FREE(g);
+ return 1;
+}
+
+static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code)
+{
+ stbi_uc *p, *c;
+ int idx;
+
+ // recurse to decode the prefixes, since the linked-list is backwards,
+ // and working backwards through an interleaved image would be nasty
+ if (g->codes[code].prefix >= 0)
+ stbi__out_gif_code(g, g->codes[code].prefix);
+
+ if (g->cur_y >= g->max_y) return;
+
+ idx = g->cur_x + g->cur_y;
+ p = &g->out[idx];
+ g->history[idx / 4] = 1;
+
+ c = &g->color_table[g->codes[code].suffix * 4];
+ if (c[3] > 128) { // don't render transparent pixels;
+ p[0] = c[2];
+ p[1] = c[1];
+ p[2] = c[0];
+ p[3] = c[3];
+ }
+ g->cur_x += 4;
+
+ if (g->cur_x >= g->max_x) {
+ g->cur_x = g->start_x;
+ g->cur_y += g->step;
+
+ while (g->cur_y >= g->max_y && g->parse > 0) {
+ g->step = (1 << g->parse) * g->line_size;
+ g->cur_y = g->start_y + (g->step >> 1);
+ --g->parse;
+ }
+ }
+}
+
+static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g)
+{
+ stbi_uc lzw_cs;
+ stbi__int32 len, init_code;
+ stbi__uint32 first;
+ stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear;
+ stbi__gif_lzw *p;
+
+ lzw_cs = stbi__get8(s);
+ if (lzw_cs > 12) return NULL;
+ clear = 1 << lzw_cs;
+ first = 1;
+ codesize = lzw_cs + 1;
+ codemask = (1 << codesize) - 1;
+ bits = 0;
+ valid_bits = 0;
+ for (init_code = 0; init_code < clear; init_code++) {
+ g->codes[init_code].prefix = -1;
+ g->codes[init_code].first = (stbi_uc) init_code;
+ g->codes[init_code].suffix = (stbi_uc) init_code;
+ }
+
+ // support no starting clear code
+ avail = clear+2;
+ oldcode = -1;
+
+ len = 0;
+ for(;;) {
+ if (valid_bits < codesize) {
+ if (len == 0) {
+ len = stbi__get8(s); // start new block
+ if (len == 0)
+ return g->out;
+ }
+ --len;
+ bits |= (stbi__int32) stbi__get8(s) << valid_bits;
+ valid_bits += 8;
+ } else {
+ stbi__int32 code = bits & codemask;
+ bits >>= codesize;
+ valid_bits -= codesize;
+ // @OPTIMIZE: is there some way we can accelerate the non-clear path?
+ if (code == clear) { // clear code
+ codesize = lzw_cs + 1;
+ codemask = (1 << codesize) - 1;
+ avail = clear + 2;
+ oldcode = -1;
+ first = 0;
+ } else if (code == clear + 1) { // end of stream code
+ stbi__skip(s, len);
+ while ((len = stbi__get8(s)) > 0)
+ stbi__skip(s,len);
+ return g->out;
+ } else if (code <= avail) {
+ if (first) {
+ return stbi__errpuc("no clear code", "Corrupt GIF");
+ }
+
+ if (oldcode >= 0) {
+ p = &g->codes[avail++];
+ if (avail > 8192) {
+ return stbi__errpuc("too many codes", "Corrupt GIF");
+ }
+
+ p->prefix = (stbi__int16) oldcode;
+ p->first = g->codes[oldcode].first;
+ p->suffix = (code == avail) ? p->first : g->codes[code].first;
+ } else if (code == avail)
+ return stbi__errpuc("illegal code in raster", "Corrupt GIF");
+
+ stbi__out_gif_code(g, (stbi__uint16) code);
+
+ if ((avail & codemask) == 0 && avail <= 0x0FFF) {
+ codesize++;
+ codemask = (1 << codesize) - 1;
+ }
+
+ oldcode = code;
+ } else {
+ return stbi__errpuc("illegal code in raster", "Corrupt GIF");
+ }
+ }
+ }
+}
+
+// this function is designed to support animated gifs, although stb_image doesn't support it
+// two back is the image from two frames ago, used for a very specific disposal format
+static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back)
+{
+ int dispose;
+ int first_frame;
+ int pi;
+ int pcount;
+
+ // on first frame, any non-written pixels get the background colour (non-transparent)
+ first_frame = 0;
+ if (g->out == 0) {
+ if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header
+ g->out = (stbi_uc *) stbi__malloc(4 * g->w * g->h);
+ g->background = (stbi_uc *) stbi__malloc(4 * g->w * g->h);
+ g->history = (stbi_uc *) stbi__malloc(g->w * g->h);
+ if (g->out == 0) return stbi__errpuc("outofmem", "Out of memory");
+
+ // image is treated as "tranparent" at the start - ie, nothing overwrites the current background;
+ // background colour is only used for pixels that are not rendered first frame, after that "background"
+ // color refers to teh color that was there the previous frame.
+ memset( g->out, 0x00, 4 * g->w * g->h );
+ memset( g->background, 0x00, 4 * g->w * g->h ); // state of the background (starts transparent)
+ memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame
+ first_frame = 1;
+ } else {
+ // second frame - how do we dispoase of the previous one?
+ dispose = (g->eflags & 0x1C) >> 2;
+ pcount = g->w * g->h;
+
+ if ((dispose == 3) && (two_back == 0)) {
+ dispose = 2; // if I don't have an image to revert back to, default to the old background
+ }
+
+ if (dispose == 3) { // use previous graphic
+ for (pi = 0; pi < pcount; ++pi) {
+ if (g->history[pi]) {
+ memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 );
+ }
+ }
+ } else if (dispose == 2) {
+ // restore what was changed last frame to background before that frame;
+ for (pi = 0; pi < pcount; ++pi) {
+ if (g->history[pi]) {
+ memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 );
+ }
+ }
+ } else {
+ // This is a non-disposal case eithe way, so just
+ // leave the pixels as is, and they will become the new background
+ // 1: do not dispose
+ // 0: not specified.
+ }
+
+ // background is what out is after the undoing of the previou frame;
+ memcpy( g->background, g->out, 4 * g->w * g->h );
+ }
+
+ // clear my history;
+ memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame
+
+ for (;;) {
+ int tag = stbi__get8(s);
+ switch (tag) {
+ case 0x2C: /* Image Descriptor */
+ {
+ stbi__int32 x, y, w, h;
+ stbi_uc *o;
+
+ x = stbi__get16le(s);
+ y = stbi__get16le(s);
+ w = stbi__get16le(s);
+ h = stbi__get16le(s);
+ if (((x + w) > (g->w)) || ((y + h) > (g->h)))
+ return stbi__errpuc("bad Image Descriptor", "Corrupt GIF");
+
+ g->line_size = g->w * 4;
+ g->start_x = x * 4;
+ g->start_y = y * g->line_size;
+ g->max_x = g->start_x + w * 4;
+ g->max_y = g->start_y + h * g->line_size;
+ g->cur_x = g->start_x;
+ g->cur_y = g->start_y;
+
+ g->lflags = stbi__get8(s);
+
+ if (g->lflags & 0x40) {
+ g->step = 8 * g->line_size; // first interlaced spacing
+ g->parse = 3;
+ } else {
+ g->step = g->line_size;
+ g->parse = 0;
+ }
+
+ if (g->lflags & 0x80) {
+ stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1);
+ g->color_table = (stbi_uc *) g->lpal;
+ } else if (g->flags & 0x80) {
+ g->color_table = (stbi_uc *) g->pal;
+ } else
+ return stbi__errpuc("missing color table", "Corrupt GIF");
+
+ o = stbi__process_gif_raster(s, g);
+ if (o == NULL) return NULL;
+
+ // if this was the first frame,
+ pcount = g->w * g->h;
+ if (first_frame && (g->bgindex > 0)) {
+ // if first frame, any pixel not drawn to gets the background color
+ for (pi = 0; pi < pcount; ++pi) {
+ if (g->history[pi] == 0) {
+ g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be;
+ memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 );
+ }
+ }
+ }
+
+ return o;
+ }
+
+ case 0x21: // Comment Extension.
+ {
+ int len;
+ int ext = stbi__get8(s);
+ if (ext == 0xF9) { // Graphic Control Extension.
+ len = stbi__get8(s);
+ if (len == 4) {
+ g->eflags = stbi__get8(s);
+ g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths.
+
+ // unset old transparent
+ if (g->transparent >= 0) {
+ g->pal[g->transparent][3] = 255;
+ }
+ if (g->eflags & 0x01) {
+ g->transparent = stbi__get8(s);
+ if (g->transparent >= 0) {
+ g->pal[g->transparent][3] = 0;
+ }
+ } else {
+ // don't need transparent
+ stbi__skip(s, 1);
+ g->transparent = -1;
+ }
+ } else {
+ stbi__skip(s, len);
+ break;
+ }
+ }
+ while ((len = stbi__get8(s)) != 0) {
+ stbi__skip(s, len);
+ }
+ break;
+ }
+
+ case 0x3B: // gif stream termination code
+ return (stbi_uc *) s; // using '1' causes warning on some compilers
+
+ default:
+ return stbi__errpuc("unknown code", "Corrupt GIF");
+ }
+ }
+}
+
+static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp)
+{
+ if (stbi__gif_test(s)) {
+ int layers = 0;
+ stbi_uc *u = 0;
+ stbi_uc *out = 0;
+ stbi_uc *two_back = 0;
+ stbi__gif g;
+ int stride;
+ memset(&g, 0, sizeof(g));
+ if (delays) {
+ *delays = 0;
+ }
+
+ do {
+ u = stbi__gif_load_next(s, &g, comp, req_comp, two_back);
+ if (u == (stbi_uc *) s) u = 0; // end of animated gif marker
+
+ if (u) {
+ *x = g.w;
+ *y = g.h;
+ ++layers;
+ stride = g.w * g.h * 4;
+
+ if (out) {
+ out = (stbi_uc*) STBI_REALLOC( out, layers * stride );
+ if (delays) {
+ *delays = (int*) STBI_REALLOC( *delays, sizeof(int) * layers );
+ }
+ } else {
+ out = (stbi_uc*)stbi__malloc( layers * stride );
+ if (delays) {
+ *delays = (int*) stbi__malloc( layers * sizeof(int) );
+ }
+ }
+ memcpy( out + ((layers - 1) * stride), u, stride );
+ if (layers >= 2) {
+ two_back = out - 2 * stride;
+ }
+
+ if (delays) {
+ (*delays)[layers - 1U] = g.delay;
+ }
+ }
+ } while (u != 0);
+
+ // free temp buffer;
+ STBI_FREE(g.out);
+ STBI_FREE(g.history);
+ STBI_FREE(g.background);
+
+ // do the final conversion after loading everything;
+ if (req_comp && req_comp != 4)
+ out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h);
+
+ *z = layers;
+ return out;
+ } else {
+ return stbi__errpuc("not GIF", "Image was not as a gif type.");
+ }
+}
+
+static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
+{
+ stbi_uc *u = 0;
+ stbi__gif g;
+ memset(&g, 0, sizeof(g));
+
+ u = stbi__gif_load_next(s, &g, comp, req_comp, 0);
+ if (u == (stbi_uc *) s) u = 0; // end of animated gif marker
+ if (u) {
+ *x = g.w;
+ *y = g.h;
+
+ // moved conversion to after successful load so that the same
+ // can be done for multiple frames.
+ if (req_comp && req_comp != 4)
+ u = stbi__convert_format(u, 4, req_comp, g.w, g.h);
+ }
+
+ // free buffers needed for multiple frame loading;
+ STBI_FREE(g.history);
+ STBI_FREE(g.background);
+
+ return u;
+}
+
+static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ return stbi__gif_info_raw(s,x,y,comp);
+}
+#endif
+
+// *************************************************************************************************
+// Radiance RGBE HDR loader
+// originally by Nicolas Schulz
+#ifndef STBI_NO_HDR
+static int stbi__hdr_test_core(stbi__context *s, const char *signature)
+{
+ int i;
+ for (i=0; signature[i]; ++i)
+ if (stbi__get8(s) != signature[i])
+ return 0;
+ stbi__rewind(s);
+ return 1;
+}
+
+static int stbi__hdr_test(stbi__context* s)
+{
+ int r = stbi__hdr_test_core(s, "#?RADIANCE\n");
+ stbi__rewind(s);
+ if(!r) {
+ r = stbi__hdr_test_core(s, "#?RGBE\n");
+ stbi__rewind(s);
+ }
+ return r;
+}
+
+#define STBI__HDR_BUFLEN 1024
+static char *stbi__hdr_gettoken(stbi__context *z, char *buffer)
+{
+ int len=0;
+ char c = '\0';
+
+ c = (char) stbi__get8(z);
+
+ while (!stbi__at_eof(z) && c != '\n') {
+ buffer[len++] = c;
+ if (len == STBI__HDR_BUFLEN-1) {
+ // flush to end of line
+ while (!stbi__at_eof(z) && stbi__get8(z) != '\n')
+ ;
+ break;
+ }
+ c = (char) stbi__get8(z);
+ }
+
+ buffer[len] = 0;
+ return buffer;
+}
+
+static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp)
+{
+ if ( input[3] != 0 ) {
+ float f1;
+ // Exponent
+ f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8));
+ if (req_comp <= 2)
+ output[0] = (input[0] + input[1] + input[2]) * f1 / 3;
+ else {
+ output[0] = input[0] * f1;
+ output[1] = input[1] * f1;
+ output[2] = input[2] * f1;
+ }
+ if (req_comp == 2) output[1] = 1;
+ if (req_comp == 4) output[3] = 1;
+ } else {
+ switch (req_comp) {
+ case 4: output[3] = 1; /* fallthrough */
+ case 3: output[0] = output[1] = output[2] = 0;
+ break;
+ case 2: output[1] = 1; /* fallthrough */
+ case 1: output[0] = 0;
+ break;
+ }
+ }
+}
+
+static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
+{
+ char buffer[STBI__HDR_BUFLEN];
+ char *token;
+ int valid = 0;
+ int width, height;
+ stbi_uc *scanline;
+ float *hdr_data;
+ int len;
+ unsigned char count, value;
+ int i, j, k, c1,c2, z;
+ const char *headerToken;
+ STBI_NOTUSED(ri);
+
+ // Check identifier
+ headerToken = stbi__hdr_gettoken(s,buffer);
+ if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0)
+ return stbi__errpf("not HDR", "Corrupt HDR image");
+
+ // Parse header
+ for(;;) {
+ token = stbi__hdr_gettoken(s,buffer);
+ if (token[0] == 0) break;
+ if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1;
+ }
+
+ if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format");
+
+ // Parse width and height
+ // can't use sscanf() if we're not using stdio!
+ token = stbi__hdr_gettoken(s,buffer);
+ if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format");
+ token += 3;
+ height = (int) strtol(token, &token, 10);
+ while (*token == ' ') ++token;
+ if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format");
+ token += 3;
+ width = (int) strtol(token, NULL, 10);
+
+ *x = width;
+ *y = height;
+
+ if (comp) *comp = 3;
+ if (req_comp == 0) req_comp = 3;
+
+ if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0))
+ return stbi__errpf("too large", "HDR image is too large");
+
+ // Read data
+ hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0);
+ if (!hdr_data)
+ return stbi__errpf("outofmem", "Out of memory");
+
+ // Load image data
+ // image data is stored as some number of sca
+ if ( width < 8 || width >= 32768) {
+ // Read flat data
+ for (j=0; j < height; ++j) {
+ for (i=0; i < width; ++i) {
+ stbi_uc rgbe[4];
+ main_decode_loop:
+ stbi__getn(s, rgbe, 4);
+ stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp);
+ }
+ }
+ } else {
+ // Read RLE-encoded data
+ scanline = NULL;
+
+ for (j = 0; j < height; ++j) {
+ c1 = stbi__get8(s);
+ c2 = stbi__get8(s);
+ len = stbi__get8(s);
+ if (c1 != 2 || c2 != 2 || (len & 0x80)) {
+ // not run-length encoded, so we have to actually use THIS data as a decoded
+ // pixel (note this can't be a valid pixel--one of RGB must be >= 128)
+ stbi_uc rgbe[4];
+ rgbe[0] = (stbi_uc) c1;
+ rgbe[1] = (stbi_uc) c2;
+ rgbe[2] = (stbi_uc) len;
+ rgbe[3] = (stbi_uc) stbi__get8(s);
+ stbi__hdr_convert(hdr_data, rgbe, req_comp);
+ i = 1;
+ j = 0;
+ STBI_FREE(scanline);
+ goto main_decode_loop; // yes, this makes no sense
+ }
+ len <<= 8;
+ len |= stbi__get8(s);
+ if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); }
+ if (scanline == NULL) {
+ scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0);
+ if (!scanline) {
+ STBI_FREE(hdr_data);
+ return stbi__errpf("outofmem", "Out of memory");
+ }
+ }
+
+ for (k = 0; k < 4; ++k) {
+ int nleft;
+ i = 0;
+ while ((nleft = width - i) > 0) {
+ count = stbi__get8(s);
+ if (count > 128) {
+ // Run
+ value = stbi__get8(s);
+ count -= 128;
+ if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); }
+ for (z = 0; z < count; ++z)
+ scanline[i++ * 4 + k] = value;
+ } else {
+ // Dump
+ if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); }
+ for (z = 0; z < count; ++z)
+ scanline[i++ * 4 + k] = stbi__get8(s);
+ }
+ }
+ }
+ for (i=0; i < width; ++i)
+ stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp);
+ }
+ if (scanline)
+ STBI_FREE(scanline);
+ }
+
+ return hdr_data;
+}
+
+static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ char buffer[STBI__HDR_BUFLEN];
+ char *token;
+ int valid = 0;
+ int dummy;
+
+ if (!x) x = &dummy;
+ if (!y) y = &dummy;
+ if (!comp) comp = &dummy;
+
+ if (stbi__hdr_test(s) == 0) {
+ stbi__rewind( s );
+ return 0;
+ }
+
+ for(;;) {
+ token = stbi__hdr_gettoken(s,buffer);
+ if (token[0] == 0) break;
+ if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1;
+ }
+
+ if (!valid) {
+ stbi__rewind( s );
+ return 0;
+ }
+ token = stbi__hdr_gettoken(s,buffer);
+ if (strncmp(token, "-Y ", 3)) {
+ stbi__rewind( s );
+ return 0;
+ }
+ token += 3;
+ *y = (int) strtol(token, &token, 10);
+ while (*token == ' ') ++token;
+ if (strncmp(token, "+X ", 3)) {
+ stbi__rewind( s );
+ return 0;
+ }
+ token += 3;
+ *x = (int) strtol(token, NULL, 10);
+ *comp = 3;
+ return 1;
+}
+#endif // STBI_NO_HDR
+
+#ifndef STBI_NO_BMP
+static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ void *p;
+ stbi__bmp_data info;
+
+ info.all_a = 255;
+ p = stbi__bmp_parse_header(s, &info);
+ stbi__rewind( s );
+ if (p == NULL)
+ return 0;
+ if (x) *x = s->img_x;
+ if (y) *y = s->img_y;
+ if (comp) *comp = info.ma ? 4 : 3;
+ return 1;
+}
+#endif
+
+#ifndef STBI_NO_PSD
+static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ int channelCount, dummy, depth;
+ if (!x) x = &dummy;
+ if (!y) y = &dummy;
+ if (!comp) comp = &dummy;
+ if (stbi__get32be(s) != 0x38425053) {
+ stbi__rewind( s );
+ return 0;
+ }
+ if (stbi__get16be(s) != 1) {
+ stbi__rewind( s );
+ return 0;
+ }
+ stbi__skip(s, 6);
+ channelCount = stbi__get16be(s);
+ if (channelCount < 0 || channelCount > 16) {
+ stbi__rewind( s );
+ return 0;
+ }
+ *y = stbi__get32be(s);
+ *x = stbi__get32be(s);
+ depth = stbi__get16be(s);
+ if (depth != 8 && depth != 16) {
+ stbi__rewind( s );
+ return 0;
+ }
+ if (stbi__get16be(s) != 3) {
+ stbi__rewind( s );
+ return 0;
+ }
+ *comp = 4;
+ return 1;
+}
+
+static int stbi__psd_is16(stbi__context *s)
+{
+ int channelCount, depth;
+ if (stbi__get32be(s) != 0x38425053) {
+ stbi__rewind( s );
+ return 0;
+ }
+ if (stbi__get16be(s) != 1) {
+ stbi__rewind( s );
+ return 0;
+ }
+ stbi__skip(s, 6);
+ channelCount = stbi__get16be(s);
+ if (channelCount < 0 || channelCount > 16) {
+ stbi__rewind( s );
+ return 0;
+ }
+ (void) stbi__get32be(s);
+ (void) stbi__get32be(s);
+ depth = stbi__get16be(s);
+ if (depth != 16) {
+ stbi__rewind( s );
+ return 0;
+ }
+ return 1;
+}
+#endif
+
+#ifndef STBI_NO_PIC
+static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ int act_comp=0,num_packets=0,chained,dummy;
+ stbi__pic_packet packets[10];
+
+ if (!x) x = &dummy;
+ if (!y) y = &dummy;
+ if (!comp) comp = &dummy;
+
+ if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) {
+ stbi__rewind(s);
+ return 0;
+ }
+
+ stbi__skip(s, 88);
+
+ *x = stbi__get16be(s);
+ *y = stbi__get16be(s);
+ if (stbi__at_eof(s)) {
+ stbi__rewind( s);
+ return 0;
+ }
+ if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) {
+ stbi__rewind( s );
+ return 0;
+ }
+
+ stbi__skip(s, 8);
+
+ do {
+ stbi__pic_packet *packet;
+
+ if (num_packets==sizeof(packets)/sizeof(packets[0]))
+ return 0;
+
+ packet = &packets[num_packets++];
+ chained = stbi__get8(s);
+ packet->size = stbi__get8(s);
+ packet->type = stbi__get8(s);
+ packet->channel = stbi__get8(s);
+ act_comp |= packet->channel;
+
+ if (stbi__at_eof(s)) {
+ stbi__rewind( s );
+ return 0;
+ }
+ if (packet->size != 8) {
+ stbi__rewind( s );
+ return 0;
+ }
+ } while (chained);
+
+ *comp = (act_comp & 0x10 ? 4 : 3);
+
+ return 1;
+}
+#endif
+
+// *************************************************************************************************
+// Portable Gray Map and Portable Pixel Map loader
+// by Ken Miller
+//
+// PGM: http://netpbm.sourceforge.net/doc/pgm.html
+// PPM: http://netpbm.sourceforge.net/doc/ppm.html
+//
+// Known limitations:
+// Does not support comments in the header section
+// Does not support ASCII image data (formats P2 and P3)
+// Does not support 16-bit-per-channel
+
+#ifndef STBI_NO_PNM
+
+static int stbi__pnm_test(stbi__context *s)
+{
+ char p, t;
+ p = (char) stbi__get8(s);
+ t = (char) stbi__get8(s);
+ if (p != 'P' || (t != '5' && t != '6')) {
+ stbi__rewind( s );
+ return 0;
+ }
+ return 1;
+}
+
+static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
+{
+ stbi_uc *out;
+ STBI_NOTUSED(ri);
+
+ if (!stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n))
+ return 0;
+
+ *x = s->img_x;
+ *y = s->img_y;
+ if (comp) *comp = s->img_n;
+
+ if (!stbi__mad3sizes_valid(s->img_n, s->img_x, s->img_y, 0))
+ return stbi__errpuc("too large", "PNM too large");
+
+ out = (stbi_uc *) stbi__malloc_mad3(s->img_n, s->img_x, s->img_y, 0);
+ if (!out) return stbi__errpuc("outofmem", "Out of memory");
+ stbi__getn(s, out, s->img_n * s->img_x * s->img_y);
+
+ if (req_comp && req_comp != s->img_n) {
+ out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y);
+ if (out == NULL) return out; // stbi__convert_format frees input on failure
+ }
+ return out;
+}
+
+static int stbi__pnm_isspace(char c)
+{
+ return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r';
+}
+
+static void stbi__pnm_skip_whitespace(stbi__context *s, char *c)
+{
+ for (;;) {
+ while (!stbi__at_eof(s) && stbi__pnm_isspace(*c))
+ *c = (char) stbi__get8(s);
+
+ if (stbi__at_eof(s) || *c != '#')
+ break;
+
+ while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' )
+ *c = (char) stbi__get8(s);
+ }
+}
+
+static int stbi__pnm_isdigit(char c)
+{
+ return c >= '0' && c <= '9';
+}
+
+static int stbi__pnm_getinteger(stbi__context *s, char *c)
+{
+ int value = 0;
+
+ while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) {
+ value = value*10 + (*c - '0');
+ *c = (char) stbi__get8(s);
+ }
+
+ return value;
+}
+
+static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ int maxv, dummy;
+ char c, p, t;
+
+ if (!x) x = &dummy;
+ if (!y) y = &dummy;
+ if (!comp) comp = &dummy;
+
+ stbi__rewind(s);
+
+ // Get identifier
+ p = (char) stbi__get8(s);
+ t = (char) stbi__get8(s);
+ if (p != 'P' || (t != '5' && t != '6')) {
+ stbi__rewind(s);
+ return 0;
+ }
+
+ *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm
+
+ c = (char) stbi__get8(s);
+ stbi__pnm_skip_whitespace(s, &c);
+
+ *x = stbi__pnm_getinteger(s, &c); // read width
+ stbi__pnm_skip_whitespace(s, &c);
+
+ *y = stbi__pnm_getinteger(s, &c); // read height
+ stbi__pnm_skip_whitespace(s, &c);
+
+ maxv = stbi__pnm_getinteger(s, &c); // read max value
+
+ if (maxv > 255)
+ return stbi__err("max value > 255", "PPM image not 8-bit");
+ else
+ return 1;
+}
+#endif
+
+static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp)
+{
+ #ifndef STBI_NO_JPEG
+ if (stbi__jpeg_info(s, x, y, comp)) return 1;
+ #endif
+
+ #ifndef STBI_NO_PNG
+ if (stbi__png_info(s, x, y, comp)) return 1;
+ #endif
+
+ #ifndef STBI_NO_GIF
+ if (stbi__gif_info(s, x, y, comp)) return 1;
+ #endif
+
+ #ifndef STBI_NO_BMP
+ if (stbi__bmp_info(s, x, y, comp)) return 1;
+ #endif
+
+ #ifndef STBI_NO_PSD
+ if (stbi__psd_info(s, x, y, comp)) return 1;
+ #endif
+
+ #ifndef STBI_NO_PIC
+ if (stbi__pic_info(s, x, y, comp)) return 1;
+ #endif
+
+ #ifndef STBI_NO_PNM
+ if (stbi__pnm_info(s, x, y, comp)) return 1;
+ #endif
+
+ #ifndef STBI_NO_HDR
+ if (stbi__hdr_info(s, x, y, comp)) return 1;
+ #endif
+
+ // test tga last because it's a crappy test!
+ #ifndef STBI_NO_TGA
+ if (stbi__tga_info(s, x, y, comp))
+ return 1;
+ #endif
+ return stbi__err("unknown image type", "Image not of any known type, or corrupt");
+}
+
+static int stbi__is_16_main(stbi__context *s)
+{
+ #ifndef STBI_NO_PNG
+ if (stbi__png_is16(s)) return 1;
+ #endif
+
+ #ifndef STBI_NO_PSD
+ if (stbi__psd_is16(s)) return 1;
+ #endif
+
+ return 0;
+}
+
+#ifndef STBI_NO_STDIO
+STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp)
+{
+ FILE *f = stbi__fopen(filename, "rb");
+ int result;
+ if (!f) return stbi__err("can't fopen", "Unable to open file");
+ result = stbi_info_from_file(f, x, y, comp);
+ fclose(f);
+ return result;
+}
+
+STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp)
+{
+ int r;
+ stbi__context s;
+ long pos = ftell(f);
+ stbi__start_file(&s, f);
+ r = stbi__info_main(&s,x,y,comp);
+ if (pos >= 0) {
+ if (fseek(f,pos,SEEK_SET) == -1) return stbi__err("fseek() error", "File Seek Fail");
+ }
+ return r;
+}
+
+STBIDEF int stbi_is_16_bit(char const *filename)
+{
+ FILE *f = stbi__fopen(filename, "rb");
+ int result;
+ if (!f) return stbi__err("can't fopen", "Unable to open file");
+ result = stbi_is_16_bit_from_file(f);
+ fclose(f);
+ return result;
+}
+
+STBIDEF int stbi_is_16_bit_from_file(FILE *f)
+{
+ int r;
+ stbi__context s;
+ long pos = ftell(f);
+ stbi__start_file(&s, f);
+ r = stbi__is_16_main(&s);
+ if (pos >= 0) {
+ if (fseek(f,pos,SEEK_SET) == -1) return stbi__err("fseek() error", "File Seek Fail");
+ }
+ return r;
+}
+#endif // !STBI_NO_STDIO
+
+STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp)
+{
+ stbi__context s;
+ stbi__start_mem(&s,buffer,len);
+ return stbi__info_main(&s,x,y,comp);
+}
+
+STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp)
+{
+ stbi__context s;
+ stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user);
+ return stbi__info_main(&s,x,y,comp);
+}
+
+STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len)
+{
+ stbi__context s;
+ stbi__start_mem(&s,buffer,len);
+ return stbi__is_16_main(&s);
+}
+
+STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user)
+{
+ stbi__context s;
+ stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user);
+ return stbi__is_16_main(&s);
+}
+
+#endif // STB_IMAGE_IMPLEMENTATION
+
+/*
+ revision history:
+ 2.19 (2018-02-11) fix warning
+ 2.18 (2018-01-30) fix warnings
+ 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug
+ 1-bit BMP
+ *_is_16_bit api
+ avoid warnings
+ 2.16 (2017-07-23) all functions have 16-bit variants;
+ STBI_NO_STDIO works again;
+ compilation fixes;
+ fix rounding in unpremultiply;
+ optimize vertical flip;
+ disable raw_len validation;
+ documentation fixes
+ 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode;
+ warning fixes; disable run-time SSE detection on gcc;
+ uniform handling of optional "return" values;
+ thread-safe initialization of zlib tables
+ 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs
+ 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now
+ 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes
+ 2.11 (2016-04-02) allocate large structures on the stack
+ remove white matting for transparent PSD
+ fix reported channel count for PNG & BMP
+ re-enable SSE2 in non-gcc 64-bit
+ support RGB-formatted JPEG
+ read 16-bit PNGs (only as 8-bit)
+ 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED
+ 2.09 (2016-01-16) allow comments in PNM files
+ 16-bit-per-pixel TGA (not bit-per-component)
+ info() for TGA could break due to .hdr handling
+ info() for BMP to shares code instead of sloppy parse
+ can use STBI_REALLOC_SIZED if allocator doesn't support realloc
+ code cleanup
+ 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA
+ 2.07 (2015-09-13) fix compiler warnings
+ partial animated GIF support
+ limited 16-bpc PSD support
+ #ifdef unused functions
+ bug with < 92 byte PIC,PNM,HDR,TGA
+ 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value
+ 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning
+ 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit
+ 2.03 (2015-04-12) extra corruption checking (mmozeiko)
+ stbi_set_flip_vertically_on_load (nguillemot)
+ fix NEON support; fix mingw support
+ 2.02 (2015-01-19) fix incorrect assert, fix warning
+ 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2
+ 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG
+ 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg)
+ progressive JPEG (stb)
+ PGM/PPM support (Ken Miller)
+ STBI_MALLOC,STBI_REALLOC,STBI_FREE
+ GIF bugfix -- seemingly never worked
+ STBI_NO_*, STBI_ONLY_*
+ 1.48 (2014-12-14) fix incorrectly-named assert()
+ 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb)
+ optimize PNG (ryg)
+ fix bug in interlaced PNG with user-specified channel count (stb)
+ 1.46 (2014-08-26)
+ fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG
+ 1.45 (2014-08-16)
+ fix MSVC-ARM internal compiler error by wrapping malloc
+ 1.44 (2014-08-07)
+ various warning fixes from Ronny Chevalier
+ 1.43 (2014-07-15)
+ fix MSVC-only compiler problem in code changed in 1.42
+ 1.42 (2014-07-09)
+ don't define _CRT_SECURE_NO_WARNINGS (affects user code)
+ fixes to stbi__cleanup_jpeg path
+ added STBI_ASSERT to avoid requiring assert.h
+ 1.41 (2014-06-25)
+ fix search&replace from 1.36 that messed up comments/error messages
+ 1.40 (2014-06-22)
+ fix gcc struct-initialization warning
+ 1.39 (2014-06-15)
+ fix to TGA optimization when req_comp != number of components in TGA;
+ fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite)
+ add support for BMP version 5 (more ignored fields)
+ 1.38 (2014-06-06)
+ suppress MSVC warnings on integer casts truncating values
+ fix accidental rename of 'skip' field of I/O
+ 1.37 (2014-06-04)
+ remove duplicate typedef
+ 1.36 (2014-06-03)
+ convert to header file single-file library
+ if de-iphone isn't set, load iphone images color-swapped instead of returning NULL
+ 1.35 (2014-05-27)
+ various warnings
+ fix broken STBI_SIMD path
+ fix bug where stbi_load_from_file no longer left file pointer in correct place
+ fix broken non-easy path for 32-bit BMP (possibly never used)
+ TGA optimization by Arseny Kapoulkine
+ 1.34 (unknown)
+ use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case
+ 1.33 (2011-07-14)
+ make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements
+ 1.32 (2011-07-13)
+ support for "info" function for all supported filetypes (SpartanJ)
+ 1.31 (2011-06-20)
+ a few more leak fixes, bug in PNG handling (SpartanJ)
+ 1.30 (2011-06-11)
+ added ability to load files via callbacks to accomidate custom input streams (Ben Wenger)
+ removed deprecated format-specific test/load functions
+ removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway
+ error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha)
+ fix inefficiency in decoding 32-bit BMP (David Woo)
+ 1.29 (2010-08-16)
+ various warning fixes from Aurelien Pocheville
+ 1.28 (2010-08-01)
+ fix bug in GIF palette transparency (SpartanJ)
+ 1.27 (2010-08-01)
+ cast-to-stbi_uc to fix warnings
+ 1.26 (2010-07-24)
+ fix bug in file buffering for PNG reported by SpartanJ
+ 1.25 (2010-07-17)
+ refix trans_data warning (Won Chun)
+ 1.24 (2010-07-12)
+ perf improvements reading from files on platforms with lock-heavy fgetc()
+ minor perf improvements for jpeg
+ deprecated type-specific functions so we'll get feedback if they're needed
+ attempt to fix trans_data warning (Won Chun)
+ 1.23 fixed bug in iPhone support
+ 1.22 (2010-07-10)
+ removed image *writing* support
+ stbi_info support from Jetro Lauha
+ GIF support from Jean-Marc Lienher
+ iPhone PNG-extensions from James Brown
+ warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva)
+ 1.21 fix use of 'stbi_uc' in header (reported by jon blow)
+ 1.20 added support for Softimage PIC, by Tom Seddon
+ 1.19 bug in interlaced PNG corruption check (found by ryg)
+ 1.18 (2008-08-02)
+ fix a threading bug (local mutable static)
+ 1.17 support interlaced PNG
+ 1.16 major bugfix - stbi__convert_format converted one too many pixels
+ 1.15 initialize some fields for thread safety
+ 1.14 fix threadsafe conversion bug
+ header-file-only version (#define STBI_HEADER_FILE_ONLY before including)
+ 1.13 threadsafe
+ 1.12 const qualifiers in the API
+ 1.11 Support installable IDCT, colorspace conversion routines
+ 1.10 Fixes for 64-bit (don't use "unsigned long")
+ optimized upsampling by Fabian "ryg" Giesen
+ 1.09 Fix format-conversion for PSD code (bad global variables!)
+ 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz
+ 1.07 attempt to fix C++ warning/errors again
+ 1.06 attempt to fix C++ warning/errors again
+ 1.05 fix TGA loading to return correct *comp and use good luminance calc
+ 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free
+ 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR
+ 1.02 support for (subset of) HDR files, float interface for preferred access to them
+ 1.01 fix bug: possible bug in handling right-side up bmps... not sure
+ fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all
+ 1.00 interface to zlib that skips zlib header
+ 0.99 correct handling of alpha in palette
+ 0.98 TGA loader by lonesock; dynamically add loaders (untested)
+ 0.97 jpeg errors on too large a file; also catch another malloc failure
+ 0.96 fix detection of invalid v value - particleman@mollyrocket forum
+ 0.95 during header scan, seek to markers in case of padding
+ 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same
+ 0.93 handle jpegtran output; verbose errors
+ 0.92 read 4,8,16,24,32-bit BMP files of several formats
+ 0.91 output 24-bit Windows 3.0 BMP files
+ 0.90 fix a few more warnings; bump version number to approach 1.0
+ 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd
+ 0.60 fix compiling as c++
+ 0.59 fix warnings: merge Dave Moore's -Wall fixes
+ 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian
+ 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available
+ 0.56 fix bug: zlib uncompressed mode len vs. nlen
+ 0.55 fix bug: restart_interval not initialized to 0
+ 0.54 allow NULL for 'int *comp'
+ 0.53 fix bug in png 3->4; speedup png decoding
+ 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments
+ 0.51 obey req_comp requests, 1-component jpegs return as 1-component,
+ on 'test' only check type, not whether we support this variant
+ 0.50 (2006-11-19)
+ first released version
+*/
+
+
+/*
+------------------------------------------------------------------------------
+This software is available under 2 licenses -- choose whichever you prefer.
+------------------------------------------------------------------------------
+ALTERNATIVE A - MIT License
+Copyright (c) 2017 Sean Barrett
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+------------------------------------------------------------------------------
+ALTERNATIVE B - Public Domain (www.unlicense.org)
+This is free and unencumbered software released into the public domain.
+Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
+software, either in source code form or as a compiled binary, for any purpose,
+commercial or non-commercial, and by any means.
+In jurisdictions that recognize copyright laws, the author or authors of this
+software dedicate any and all copyright interest in the software to the public
+domain. We make this dedication for the benefit of the public at large and to
+the detriment of our heirs and successors. We intend this dedication to be an
+overt act of relinquishment in perpetuity of all present and future rights to
+this software under copyright law.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+------------------------------------------------------------------------------
+*/
diff --git a/vendor/github.com/Benau/go_rlottie/vector_varenaalloc.cpp b/vendor/github.com/Benau/go_rlottie/vector_varenaalloc.cpp
new file mode 100644
index 00000000..55bd3793
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_varenaalloc.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "vector_varenaalloc.h"
+#include <algorithm>
+#include <new>
+
+static char* end_chain(char*) { return nullptr; }
+
+static uint32_t first_allocated_block(uint32_t blockSize, uint32_t firstHeapAllocation) {
+ return firstHeapAllocation > 0 ? firstHeapAllocation :
+ blockSize > 0 ? blockSize : 1024;
+}
+
+VArenaAlloc::VArenaAlloc(char* block, size_t size, size_t firstHeapAllocation)
+ : fDtorCursor {block}
+ , fCursor {block}
+ , fEnd {block + ToU32(size)}
+ , fFirstBlock {block}
+ , fFirstSize {ToU32(size)}
+ , fFirstHeapAllocationSize {first_allocated_block(ToU32(size), ToU32(firstHeapAllocation))}
+{
+ if (size < sizeof(Footer)) {
+ fEnd = fCursor = fDtorCursor = nullptr;
+ }
+
+ if (fCursor != nullptr) {
+ this->installFooter(end_chain, 0);
+ }
+}
+
+VArenaAlloc::~VArenaAlloc() {
+ RunDtorsOnBlock(fDtorCursor);
+}
+
+void VArenaAlloc::reset() {
+ this->~VArenaAlloc();
+ new (this) VArenaAlloc{fFirstBlock, fFirstSize, fFirstHeapAllocationSize};
+}
+
+void VArenaAlloc::installFooter(FooterAction* action, uint32_t padding) {
+ assert(padding < 64);
+ int64_t actionInt = (int64_t)(intptr_t)action;
+
+ // The top 14 bits should be either all 0s or all 1s. Check this.
+ assert((actionInt << 6) >> 6 == actionInt);
+ Footer encodedFooter = (actionInt << 6) | padding;
+ memmove(fCursor, &encodedFooter, sizeof(Footer));
+ fCursor += sizeof(Footer);
+ fDtorCursor = fCursor;
+}
+
+void VArenaAlloc::installPtrFooter(FooterAction* action, char* ptr, uint32_t padding) {
+ memmove(fCursor, &ptr, sizeof(char*));
+ fCursor += sizeof(char*);
+ this->installFooter(action, padding);
+}
+
+char* VArenaAlloc::SkipPod(char* footerEnd) {
+ char* objEnd = footerEnd - (sizeof(Footer) + sizeof(int32_t));
+ int32_t skip;
+ memmove(&skip, objEnd, sizeof(int32_t));
+ return objEnd - skip;
+}
+
+void VArenaAlloc::RunDtorsOnBlock(char* footerEnd) {
+ while (footerEnd != nullptr) {
+ Footer footer;
+ memcpy(&footer, footerEnd - sizeof(Footer), sizeof(Footer));
+
+ FooterAction* action = (FooterAction*)(footer >> 6);
+ ptrdiff_t padding = footer & 63;
+
+ footerEnd = action(footerEnd) - padding;
+ }
+}
+
+char* VArenaAlloc::NextBlock(char* footerEnd) {
+ char* objEnd = footerEnd - (sizeof(Footer) + sizeof(char*));
+ char* next;
+ memmove(&next, objEnd, sizeof(char*));
+ RunDtorsOnBlock(next);
+ delete [] objEnd;
+ return nullptr;
+}
+
+void VArenaAlloc::installUint32Footer(FooterAction* action, uint32_t value, uint32_t padding) {
+ memmove(fCursor, &value, sizeof(uint32_t));
+ fCursor += sizeof(uint32_t);
+ this->installFooter(action, padding);
+}
+
+void VArenaAlloc::ensureSpace(uint32_t size, uint32_t alignment) {
+ constexpr uint32_t headerSize = sizeof(Footer) + sizeof(ptrdiff_t);
+ // The chrome c++ library we use does not define std::max_align_t.
+ // This must be conservative to add the right amount of extra memory to handle the alignment
+ // padding.
+ constexpr uint32_t alignof_max_align_t = 8;
+ constexpr uint32_t maxSize = std::numeric_limits<uint32_t>::max();
+ constexpr uint32_t overhead = headerSize + sizeof(Footer);
+ AssertRelease(size <= maxSize - overhead);
+ uint32_t objSizeAndOverhead = size + overhead;
+ if (alignment > alignof_max_align_t) {
+ uint32_t alignmentOverhead = alignment - 1;
+ AssertRelease(objSizeAndOverhead <= maxSize - alignmentOverhead);
+ objSizeAndOverhead += alignmentOverhead;
+ }
+
+ uint32_t minAllocationSize;
+ if (fFirstHeapAllocationSize <= maxSize / fFib0) {
+ minAllocationSize = fFirstHeapAllocationSize * fFib0;
+ fFib0 += fFib1;
+ std::swap(fFib0, fFib1);
+ } else {
+ minAllocationSize = maxSize;
+ }
+ uint32_t allocationSize = std::max(objSizeAndOverhead, minAllocationSize);
+
+ // Round up to a nice size. If > 32K align to 4K boundary else up to max_align_t. The > 32K
+ // heuristic is from the JEMalloc behavior.
+ {
+ uint32_t mask = allocationSize > (1 << 15) ? (1 << 12) - 1 : 16 - 1;
+ AssertRelease(allocationSize <= maxSize - mask);
+ allocationSize = (allocationSize + mask) & ~mask;
+ }
+
+ char* newBlock = new char[allocationSize];
+
+ auto previousDtor = fDtorCursor;
+ fCursor = newBlock;
+ fDtorCursor = newBlock;
+ fEnd = fCursor + allocationSize;
+ this->installPtrFooter(NextBlock, previousDtor, 0);
+}
+
+char* VArenaAlloc::allocObjectWithFooter(uint32_t sizeIncludingFooter, uint32_t alignment) {
+ uintptr_t mask = alignment - 1;
+
+restart:
+ uint32_t skipOverhead = 0;
+ bool needsSkipFooter = fCursor != fDtorCursor;
+ if (needsSkipFooter) {
+ skipOverhead = sizeof(Footer) + sizeof(uint32_t);
+ }
+ char* objStart = (char*)((uintptr_t)(fCursor + skipOverhead + mask) & ~mask);
+ uint32_t totalSize = sizeIncludingFooter + skipOverhead;
+ //std::cout<<"non POD object size = "<<totalSize<<"\n";
+ if ((ptrdiff_t)totalSize > fEnd - objStart) {
+ this->ensureSpace(totalSize, alignment);
+ goto restart;
+ }
+
+ AssertRelease((ptrdiff_t)totalSize <= fEnd - objStart);
+
+ // Install a skip footer if needed, thus terminating a run of POD data. The calling code is
+ // responsible for installing the footer after the object.
+ if (needsSkipFooter) {
+ this->installUint32Footer(SkipPod, ToU32(fCursor - fDtorCursor), 0);
+ }
+
+ return objStart;
+}
diff --git a/vendor/github.com/Benau/go_rlottie/vector_varenaalloc.h b/vendor/github.com/Benau/go_rlottie/vector_varenaalloc.h
new file mode 100644
index 00000000..ed03b53f
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_varenaalloc.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef VARENAALLOC_H
+#define VARENAALLOC_H
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <limits>
+#include <new>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+// SkArenaAlloc allocates object and destroys the allocated objects when destroyed. It's designed
+// to minimize the number of underlying block allocations. SkArenaAlloc allocates first out of an
+// (optional) user-provided block of memory, and when that's exhausted it allocates on the heap,
+// starting with an allocation of firstHeapAllocation bytes. If your data (plus a small overhead)
+// fits in the user-provided block, SkArenaAlloc never uses the heap, and if it fits in
+// firstHeapAllocation bytes, it'll use the heap only once. If 0 is specified for
+// firstHeapAllocation, then blockSize is used unless that too is 0, then 1024 is used.
+//
+// Examples:
+//
+// char block[mostCasesSize];
+// SkArenaAlloc arena(block, mostCasesSize);
+//
+// If mostCasesSize is too large for the stack, you can use the following pattern.
+//
+// std::unique_ptr<char[]> block{new char[mostCasesSize]};
+// SkArenaAlloc arena(block.get(), mostCasesSize, almostAllCasesSize);
+//
+// If the program only sometimes allocates memory, use the following pattern.
+//
+// SkArenaAlloc arena(nullptr, 0, almostAllCasesSize);
+//
+// The storage does not necessarily need to be on the stack. Embedding the storage in a class also
+// works.
+//
+// class Foo {
+// char storage[mostCasesSize];
+// SkArenaAlloc arena (storage, mostCasesSize);
+// };
+//
+// In addition, the system is optimized to handle POD data including arrays of PODs (where
+// POD is really data with no destructors). For POD data it has zero overhead per item, and a
+// typical per block overhead of 8 bytes. For non-POD objects there is a per item overhead of 4
+// bytes. For arrays of non-POD objects there is a per array overhead of typically 8 bytes. There
+// is an addition overhead when switching from POD data to non-POD data of typically 8 bytes.
+//
+// If additional blocks are needed they are increased exponentially. This strategy bounds the
+// recursion of the RunDtorsOnBlock to be limited to O(log size-of-memory). Block size grow using
+// the Fibonacci sequence which means that for 2^32 memory there are 48 allocations, and for 2^48
+// there are 71 allocations.
+class VArenaAlloc {
+public:
+ VArenaAlloc(char* block, size_t blockSize, size_t firstHeapAllocation);
+
+ explicit VArenaAlloc(size_t firstHeapAllocation)
+ : VArenaAlloc(nullptr, 0, firstHeapAllocation)
+ {}
+
+ ~VArenaAlloc();
+
+ template <typename T, typename... Args>
+ T* make(Args&&... args) {
+ uint32_t size = ToU32(sizeof(T));
+ uint32_t alignment = ToU32(alignof(T));
+ char* objStart;
+ if (std::is_trivially_destructible<T>::value) {
+ objStart = this->allocObject(size, alignment);
+ fCursor = objStart + size;
+ } else {
+ objStart = this->allocObjectWithFooter(size + sizeof(Footer), alignment);
+ // Can never be UB because max value is alignof(T).
+ uint32_t padding = ToU32(objStart - fCursor);
+
+ // Advance to end of object to install footer.
+ fCursor = objStart + size;
+ FooterAction* releaser = [](char* objEnd) {
+ char* objStart = objEnd - (sizeof(T) + sizeof(Footer));
+ ((T*)objStart)->~T();
+ return objStart;
+ };
+ this->installFooter(releaser, padding);
+ }
+
+ // This must be last to make objects with nested use of this allocator work.
+ return new(objStart) T(std::forward<Args>(args)...);
+ }
+
+ template <typename T>
+ T* makeArrayDefault(size_t count) {
+ uint32_t safeCount = ToU32(count);
+ T* array = (T*)this->commonArrayAlloc<T>(safeCount);
+
+ // If T is primitive then no initialization takes place.
+ for (size_t i = 0; i < safeCount; i++) {
+ new (&array[i]) T;
+ }
+ return array;
+ }
+
+ template <typename T>
+ T* makeArray(size_t count) {
+ uint32_t safeCount = ToU32(count);
+ T* array = (T*)this->commonArrayAlloc<T>(safeCount);
+
+ // If T is primitive then the memory is initialized. For example, an array of chars will
+ // be zeroed.
+ for (size_t i = 0; i < safeCount; i++) {
+ new (&array[i]) T();
+ }
+ return array;
+ }
+
+ // Only use makeBytesAlignedTo if none of the typed variants are impractical to use.
+ void* makeBytesAlignedTo(size_t size, size_t align) {
+ auto objStart = this->allocObject(ToU32(size), ToU32(align));
+ fCursor = objStart + size;
+ return objStart;
+ }
+
+ // Destroy all allocated objects, free any heap allocations.
+ void reset();
+
+private:
+ static void AssertRelease(bool cond) { if (!cond) { ::abort(); } }
+ static uint32_t ToU32(size_t v) {
+ return (uint32_t)v;
+ }
+
+ using Footer = int64_t;
+ using FooterAction = char* (char*);
+
+ static char* SkipPod(char* footerEnd);
+ static void RunDtorsOnBlock(char* footerEnd);
+ static char* NextBlock(char* footerEnd);
+
+ void installFooter(FooterAction* releaser, uint32_t padding);
+ void installUint32Footer(FooterAction* action, uint32_t value, uint32_t padding);
+ void installPtrFooter(FooterAction* action, char* ptr, uint32_t padding);
+
+ void ensureSpace(uint32_t size, uint32_t alignment);
+
+ char* allocObject(uint32_t size, uint32_t alignment) {
+ uintptr_t mask = alignment - 1;
+ uintptr_t alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask;
+ uintptr_t totalSize = size + alignedOffset;
+ AssertRelease(totalSize >= size);
+
+ if (totalSize > static_cast<uintptr_t>(fEnd - fCursor)) {
+ this->ensureSpace(size, alignment);
+ alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask;
+ }
+ return fCursor + alignedOffset;
+ }
+
+ char* allocObjectWithFooter(uint32_t sizeIncludingFooter, uint32_t alignment);
+
+ template <typename T>
+ char* commonArrayAlloc(uint32_t count) {
+ char* objStart;
+ AssertRelease(count <= std::numeric_limits<uint32_t>::max() / sizeof(T));
+ uint32_t arraySize = ToU32(count * sizeof(T));
+ uint32_t alignment = ToU32(alignof(T));
+
+ if (std::is_trivially_destructible<T>::value) {
+ objStart = this->allocObject(arraySize, alignment);
+ fCursor = objStart + arraySize;
+ } else {
+ constexpr uint32_t overhead = sizeof(Footer) + sizeof(uint32_t);
+ AssertRelease(arraySize <= std::numeric_limits<uint32_t>::max() - overhead);
+ uint32_t totalSize = arraySize + overhead;
+ objStart = this->allocObjectWithFooter(totalSize, alignment);
+
+ // Can never be UB because max value is alignof(T).
+ uint32_t padding = ToU32(objStart - fCursor);
+
+ // Advance to end of array to install footer.?
+ fCursor = objStart + arraySize;
+ this->installUint32Footer(
+ [](char* footerEnd) {
+ char* objEnd = footerEnd - (sizeof(Footer) + sizeof(uint32_t));
+ uint32_t count;
+ memmove(&count, objEnd, sizeof(uint32_t));
+ char* objStart = objEnd - count * sizeof(T);
+ T* array = (T*) objStart;
+ for (uint32_t i = 0; i < count; i++) {
+ array[i].~T();
+ }
+ return objStart;
+ },
+ ToU32(count),
+ padding);
+ }
+
+ return objStart;
+ }
+
+ char* fDtorCursor;
+ char* fCursor;
+ char* fEnd;
+ char* const fFirstBlock;
+ const uint32_t fFirstSize;
+ const uint32_t fFirstHeapAllocationSize;
+
+ // Use the Fibonacci sequence as the growth factor for block size. The size of the block
+ // allocated is fFib0 * fFirstHeapAllocationSize. Using 2 ^ n * fFirstHeapAllocationSize
+ // had too much slop for Android.
+ uint32_t fFib0 {1}, fFib1 {1};
+};
+
+// Helper for defining allocators with inline/reserved storage.
+// For argument declarations, stick to the base type (SkArenaAlloc).
+template <size_t InlineStorageSize>
+class VSTArenaAlloc : public VArenaAlloc {
+public:
+ explicit VSTArenaAlloc(size_t firstHeapAllocation = InlineStorageSize)
+ : VArenaAlloc(fInlineStorage, InlineStorageSize, firstHeapAllocation) {}
+
+private:
+ char fInlineStorage[InlineStorageSize];
+};
+
+#endif // VARENAALLOC_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vbezier.cpp b/vendor/github.com/Benau/go_rlottie/vector_vbezier.cpp
new file mode 100644
index 00000000..3aa324e7
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vbezier.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "vector_vbezier.h"
+#include <cmath>
+#include "vector_vline.h"
+
+V_BEGIN_NAMESPACE
+
+VBezier VBezier::fromPoints(const VPointF &p1, const VPointF &p2,
+ const VPointF &p3, const VPointF &p4)
+{
+ VBezier b;
+ b.x1 = p1.x();
+ b.y1 = p1.y();
+ b.x2 = p2.x();
+ b.y2 = p2.y();
+ b.x3 = p3.x();
+ b.y3 = p3.y();
+ b.x4 = p4.x();
+ b.y4 = p4.y();
+ return b;
+}
+
+float VBezier::length() const
+{
+ const auto len = VLine::length(x1, y1, x2, y2) +
+ VLine::length(x2, y2, x3, y3) +
+ VLine::length(x3, y3, x4, y4);
+
+ const auto chord = VLine::length(x1, y1, x4, y4);
+
+ if ((len - chord) > 0.01) {
+ VBezier left, right;
+ split(&left, &right);
+ return left.length() + right.length();
+ }
+
+ return len;
+}
+
+VBezier VBezier::onInterval(float t0, float t1) const
+{
+ if (t0 == 0 && t1 == 1) return *this;
+
+ VBezier bezier = *this;
+
+ VBezier result;
+ bezier.parameterSplitLeft(t0, &result);
+ float trueT = (t1 - t0) / (1 - t0);
+ bezier.parameterSplitLeft(trueT, &result);
+
+ return result;
+}
+
+float VBezier::tAtLength(float l, float totalLength) const
+{
+ float t = 1.0;
+ const float error = 0.01f;
+ if (l > totalLength || vCompare(l, totalLength)) return t;
+
+ t *= 0.5;
+
+ float lastBigger = 1.0;
+ for (int num = 0; num < 100500; num++) {
+ VBezier right = *this;
+ VBezier left;
+ right.parameterSplitLeft(t, &left);
+ float lLen = left.length();
+ if (fabs(lLen - l) < error) return t;
+
+ if (lLen < l) {
+ t += (lastBigger - t) * 0.5f;
+ } else {
+ lastBigger = t;
+ t -= t * 0.5f;
+ }
+ }
+ vWarning << "no convergence";
+ return t;
+}
+
+void VBezier::splitAtLength(float len, VBezier *left, VBezier *right)
+{
+ float t;
+
+ *right = *this;
+ t = right->tAtLength(len);
+ right->parameterSplitLeft(t, left);
+}
+
+VPointF VBezier::derivative(float t) const
+{
+ // p'(t) = 3 * (-(1-2t+t^2) * p0 + (1 - 4 * t + 3 * t^2) * p1 + (2 * t - 3 *
+ // t^2) * p2 + t^2 * p3)
+
+ float m_t = 1.0f - t;
+
+ float d = t * t;
+ float a = -m_t * m_t;
+ float b = 1 - 4 * t + 3 * d;
+ float c = 2 * t - 3 * d;
+
+ return 3 * VPointF(a * x1 + b * x2 + c * x3 + d * x4,
+ a * y1 + b * y2 + c * y3 + d * y4);
+}
+
+float VBezier::angleAt(float t) const
+{
+ if (t < 0 || t > 1) {
+ return 0;
+ }
+ return VLine({}, derivative(t)).angle();
+}
+
+V_END_NAMESPACE
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vbezier.h b/vendor/github.com/Benau/go_rlottie/vector_vbezier.h
new file mode 100644
index 00000000..1210d27a
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vbezier.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VBEZIER_H
+#define VBEZIER_H
+
+#include "vector_vpoint.h"
+
+V_BEGIN_NAMESPACE
+
+class VBezier {
+public:
+ VBezier() = default;
+ VPointF pointAt(float t) const;
+ float angleAt(float t) const;
+ VBezier onInterval(float t0, float t1) const;
+ float length() const;
+ static void coefficients(float t, float &a, float &b, float &c, float &d);
+ static VBezier fromPoints(const VPointF &start, const VPointF &cp1,
+ const VPointF &cp2, const VPointF &end);
+ inline void parameterSplitLeft(float t, VBezier *left);
+ inline void split(VBezier *firstHalf, VBezier *secondHalf) const;
+ float tAtLength(float len) const { return tAtLength(len , length());}
+ float tAtLength(float len, float totalLength) const;
+ void splitAtLength(float len, VBezier *left, VBezier *right);
+ VPointF pt1() const { return {x1, y1}; }
+ VPointF pt2() const { return {x2, y2}; }
+ VPointF pt3() const { return {x3, y3}; }
+ VPointF pt4() const { return {x4, y4}; }
+
+private:
+ VPointF derivative(float t) const;
+ float x1, y1, x2, y2, x3, y3, x4, y4;
+};
+
+inline void VBezier::coefficients(float t, float &a, float &b, float &c,
+ float &d)
+{
+ float m_t = 1.0f - t;
+ b = m_t * m_t;
+ c = t * t;
+ d = c * t;
+ a = b * m_t;
+ b *= 3.0f * t;
+ c *= 3.0f * m_t;
+}
+
+inline VPointF VBezier::pointAt(float t) const
+{
+ // numerically more stable:
+ float x, y;
+
+ float m_t = 1.0f - t;
+ {
+ float a = x1 * m_t + x2 * t;
+ float b = x2 * m_t + x3 * t;
+ float c = x3 * m_t + x4 * t;
+ a = a * m_t + b * t;
+ b = b * m_t + c * t;
+ x = a * m_t + b * t;
+ }
+ {
+ float a = y1 * m_t + y2 * t;
+ float b = y2 * m_t + y3 * t;
+ float c = y3 * m_t + y4 * t;
+ a = a * m_t + b * t;
+ b = b * m_t + c * t;
+ y = a * m_t + b * t;
+ }
+ return {x, y};
+}
+
+inline void VBezier::parameterSplitLeft(float t, VBezier *left)
+{
+ left->x1 = x1;
+ left->y1 = y1;
+
+ left->x2 = x1 + t * (x2 - x1);
+ left->y2 = y1 + t * (y2 - y1);
+
+ left->x3 = x2 + t * (x3 - x2); // temporary holding spot
+ left->y3 = y2 + t * (y3 - y2); // temporary holding spot
+
+ x3 = x3 + t * (x4 - x3);
+ y3 = y3 + t * (y4 - y3);
+
+ x2 = left->x3 + t * (x3 - left->x3);
+ y2 = left->y3 + t * (y3 - left->y3);
+
+ left->x3 = left->x2 + t * (left->x3 - left->x2);
+ left->y3 = left->y2 + t * (left->y3 - left->y2);
+
+ left->x4 = x1 = left->x3 + t * (x2 - left->x3);
+ left->y4 = y1 = left->y3 + t * (y2 - left->y3);
+}
+
+inline void VBezier::split(VBezier *firstHalf, VBezier *secondHalf) const
+{
+ float c = (x2 + x3) * 0.5f;
+ firstHalf->x2 = (x1 + x2) * 0.5f;
+ secondHalf->x3 = (x3 + x4) * 0.5f;
+ firstHalf->x1 = x1;
+ secondHalf->x4 = x4;
+ firstHalf->x3 = (firstHalf->x2 + c) * 0.5f;
+ secondHalf->x2 = (secondHalf->x3 + c) * 0.5f;
+ firstHalf->x4 = secondHalf->x1 = (firstHalf->x3 + secondHalf->x2) * 0.5f;
+
+ c = (y2 + y3) / 2;
+ firstHalf->y2 = (y1 + y2) * 0.5f;
+ secondHalf->y3 = (y3 + y4) * 0.5f;
+ firstHalf->y1 = y1;
+ secondHalf->y4 = y4;
+ firstHalf->y3 = (firstHalf->y2 + c) * 0.5f;
+ secondHalf->y2 = (secondHalf->y3 + c) * 0.5f;
+ firstHalf->y4 = secondHalf->y1 = (firstHalf->y3 + secondHalf->y2) * 0.5f;
+}
+
+V_END_NAMESPACE
+
+#endif // VBEZIER_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vbitmap.cpp b/vendor/github.com/Benau/go_rlottie/vector_vbitmap.cpp
new file mode 100644
index 00000000..cadae578
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vbitmap.cpp
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "vector_vbitmap.h"
+#include <string>
+#include <memory>
+#include "vector_vdrawhelper.h"
+#include "vector_vglobal.h"
+
+V_BEGIN_NAMESPACE
+
+void VBitmap::Impl::reset(size_t width, size_t height, VBitmap::Format format)
+{
+ mRoData = nullptr;
+ mWidth = uint(width);
+ mHeight = uint(height);
+ mFormat = format;
+
+ mDepth = depth(format);
+ mStride = ((mWidth * mDepth + 31) >> 5)
+ << 2; // bytes per scanline (must be multiple of 4)
+ mOwnData = std::make_unique<uchar[]>(mStride * mHeight);
+}
+
+void VBitmap::Impl::reset(uchar *data, size_t width, size_t height, size_t bytesPerLine,
+ VBitmap::Format format)
+{
+ mRoData = data;
+ mWidth = uint(width);
+ mHeight = uint(height);
+ mStride = uint(bytesPerLine);
+ mFormat = format;
+ mDepth = depth(format);
+ mOwnData = nullptr;
+}
+
+uchar VBitmap::Impl::depth(VBitmap::Format format)
+{
+ uchar depth = 1;
+ switch (format) {
+ case VBitmap::Format::Alpha8:
+ depth = 8;
+ break;
+ case VBitmap::Format::ARGB32:
+ case VBitmap::Format::ARGB32_Premultiplied:
+ depth = 32;
+ break;
+ default:
+ break;
+ }
+ return depth;
+}
+
+void VBitmap::Impl::fill(uint /*pixel*/)
+{
+ //@TODO
+}
+
+void VBitmap::Impl::updateLuma()
+{
+ if (mFormat != VBitmap::Format::ARGB32_Premultiplied) return;
+ auto dataPtr = data();
+ for (uint col = 0; col < mHeight; col++) {
+ uint *pixel = (uint *)(dataPtr + mStride * col);
+ for (uint row = 0; row < mWidth; row++) {
+ int alpha = vAlpha(*pixel);
+ if (alpha == 0) {
+ pixel++;
+ continue;
+ }
+
+ int red = vRed(*pixel);
+ int green = vGreen(*pixel);
+ int blue = vBlue(*pixel);
+
+ if (alpha != 255) {
+ // un multiply
+ red = (red * 255) / alpha;
+ green = (green * 255) / alpha;
+ blue = (blue * 255) / alpha;
+ }
+ int luminosity = int(0.299f * red + 0.587f * green + 0.114f * blue);
+ *pixel = luminosity << 24;
+ pixel++;
+ }
+ }
+}
+
+VBitmap::VBitmap(size_t width, size_t height, VBitmap::Format format)
+{
+ if (width <= 0 || height <= 0 || format == Format::Invalid) return;
+
+ mImpl = rc_ptr<Impl>(width, height, format);
+}
+
+VBitmap::VBitmap(uchar *data, size_t width, size_t height, size_t bytesPerLine,
+ VBitmap::Format format)
+{
+ if (!data || width <= 0 || height <= 0 || bytesPerLine <= 0 ||
+ format == Format::Invalid)
+ return;
+
+ mImpl = rc_ptr<Impl>(data, width, height, bytesPerLine, format);
+}
+
+void VBitmap::reset(uchar *data, size_t w, size_t h, size_t bytesPerLine,
+ VBitmap::Format format)
+{
+ if (mImpl) {
+ mImpl->reset(data, w, h, bytesPerLine, format);
+ } else {
+ mImpl = rc_ptr<Impl>(data, w, h, bytesPerLine, format);
+ }
+}
+
+void VBitmap::reset(size_t w, size_t h, VBitmap::Format format)
+{
+ if (mImpl) {
+ if (w == mImpl->width() && h == mImpl->height() &&
+ format == mImpl->format()) {
+ return;
+ }
+ mImpl->reset(w, h, format);
+ } else {
+ mImpl = rc_ptr<Impl>(w, h, format);
+ }
+}
+
+size_t VBitmap::stride() const
+{
+ return mImpl ? mImpl->stride() : 0;
+}
+
+size_t VBitmap::width() const
+{
+ return mImpl ? mImpl->width() : 0;
+}
+
+size_t VBitmap::height() const
+{
+ return mImpl ? mImpl->height() : 0;
+}
+
+size_t VBitmap::depth() const
+{
+ return mImpl ? mImpl->mDepth : 0;
+}
+
+uchar *VBitmap::data()
+{
+ return mImpl ? mImpl->data() : nullptr;
+}
+
+uchar *VBitmap::data() const
+{
+ return mImpl ? mImpl->data() : nullptr;
+}
+
+VRect VBitmap::rect() const
+{
+ return mImpl ? mImpl->rect() : VRect();
+}
+
+VSize VBitmap::size() const
+{
+ return mImpl ? mImpl->size() : VSize();
+}
+
+bool VBitmap::valid() const
+{
+ return mImpl;
+}
+
+VBitmap::Format VBitmap::format() const
+{
+ return mImpl ? mImpl->format() : VBitmap::Format::Invalid;
+}
+
+void VBitmap::fill(uint pixel)
+{
+ if (mImpl) mImpl->fill(pixel);
+}
+
+/*
+ * This is special function which converts
+ * RGB value to Luminosity and stores it in
+ * the Alpha component of the pixel.
+ * After this conversion the bitmap data is no more
+ * in RGB space. but the Alpha component contains the
+ * Luminosity value of the pixel in HSL color space.
+ * NOTE: this api has its own special usecase
+ * make sure you know what you are doing before using
+ * this api.
+ */
+void VBitmap::updateLuma()
+{
+ if (mImpl) mImpl->updateLuma();
+}
+
+V_END_NAMESPACE
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vbitmap.h b/vendor/github.com/Benau/go_rlottie/vector_vbitmap.h
new file mode 100644
index 00000000..0236e34d
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vbitmap.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VBITMAP_H
+#define VBITMAP_H
+
+#include "vector_vrect.h"
+#include "vector_vsharedptr.h"
+
+V_BEGIN_NAMESPACE
+
+class VBitmap {
+public:
+ enum class Format: uchar {
+ Invalid,
+ Alpha8,
+ ARGB32,
+ ARGB32_Premultiplied
+ };
+
+ VBitmap() = default;
+ VBitmap(size_t w, size_t h, VBitmap::Format format);
+ VBitmap(uchar *data, size_t w, size_t h, size_t bytesPerLine, VBitmap::Format format);
+ void reset(uchar *data, size_t w, size_t h, size_t stride, VBitmap::Format format);
+ void reset(size_t w, size_t h, VBitmap::Format format=Format::ARGB32_Premultiplied);
+ size_t stride() const;
+ size_t width() const;
+ size_t height() const;
+ size_t depth() const;
+ VBitmap::Format format() const;
+ bool valid() const;
+ uchar * data();
+ uchar * data() const;
+ VRect rect() const;
+ VSize size() const;
+ void fill(uint pixel);
+ void updateLuma();
+private:
+ struct Impl {
+ std::unique_ptr<uchar[]> mOwnData{nullptr};
+ uchar * mRoData{nullptr};
+ uint mWidth{0};
+ uint mHeight{0};
+ uint mStride{0};
+ uchar mDepth{0};
+ VBitmap::Format mFormat{VBitmap::Format::Invalid};
+
+ explicit Impl(size_t width, size_t height, VBitmap::Format format)
+ {
+ reset(width, height, format);
+ }
+ explicit Impl(uchar *data, size_t w, size_t h, size_t bytesPerLine, VBitmap::Format format)
+ {
+ reset(data, w, h, bytesPerLine, format);
+ }
+ VRect rect() const { return VRect(0, 0, mWidth, mHeight);}
+ VSize size() const { return VSize(mWidth, mHeight); }
+ size_t stride() const { return mStride; }
+ size_t width() const { return mWidth; }
+ size_t height() const { return mHeight; }
+ uchar * data() { return mRoData ? mRoData : mOwnData.get(); }
+ VBitmap::Format format() const { return mFormat; }
+ void reset(uchar *, size_t, size_t, size_t, VBitmap::Format);
+ void reset(size_t, size_t, VBitmap::Format);
+ static uchar depth(VBitmap::Format format);
+ void fill(uint);
+ void updateLuma();
+ };
+
+ rc_ptr<Impl> mImpl;
+};
+
+V_END_NAMESPACE
+
+#endif // VBITMAP_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vbrush.cpp b/vendor/github.com/Benau/go_rlottie/vector_vbrush.cpp
new file mode 100644
index 00000000..6f1e5394
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vbrush.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "vector_vbrush.h"
+
+V_BEGIN_NAMESPACE
+
+VGradient::VGradient(VGradient::Type type)
+ : mType(type)
+{
+ if (mType == Type::Linear)
+ linear.x1 = linear.y1 = linear.x2 = linear.y2 = 0.0f;
+ else
+ radial.cx = radial.cy = radial.fx =
+ radial.fy = radial.cradius = radial.fradius = 0.0f;
+}
+
+void VGradient::setStops(const VGradientStops &stops)
+{
+ mStops = stops;
+}
+
+VBrush::VBrush(const VColor &color) : mType(VBrush::Type::Solid), mColor(color)
+{
+}
+
+VBrush::VBrush(uchar r, uchar g, uchar b, uchar a)
+ : mType(VBrush::Type::Solid), mColor(r, g, b, a)
+
+{
+}
+
+VBrush::VBrush(const VGradient *gradient)
+{
+ if (!gradient) return;
+
+ mGradient = gradient;
+
+ if (gradient->mType == VGradient::Type::Linear) {
+ mType = VBrush::Type::LinearGradient;
+ } else if (gradient->mType == VGradient::Type::Radial) {
+ mType = VBrush::Type::RadialGradient;
+ }
+}
+
+VBrush::VBrush(const VTexture *texture):mType(VBrush::Type::Texture), mTexture(texture)
+{
+}
+
+V_END_NAMESPACE
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vbrush.h b/vendor/github.com/Benau/go_rlottie/vector_vbrush.h
new file mode 100644
index 00000000..7cede06a
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vbrush.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VBRUSH_H
+#define VBRUSH_H
+
+#include <vector>
+#include "vector_vglobal.h"
+#include "vector_vmatrix.h"
+#include "vector_vpoint.h"
+#include "vector_vbitmap.h"
+
+V_BEGIN_NAMESPACE
+
+using VGradientStop = std::pair<float, VColor>;
+using VGradientStops = std::vector<VGradientStop>;
+class VGradient {
+public:
+ enum class Mode { Absolute, Relative };
+ enum class Spread { Pad, Repeat, Reflect };
+ enum class Type { Linear, Radial };
+ explicit VGradient(VGradient::Type type);
+ void setStops(const VGradientStops &stops);
+ void setAlpha(float alpha) {mAlpha = alpha;}
+ float alpha() const {return mAlpha;}
+
+public:
+ static constexpr int colorTableSize = 1024;
+ VGradient::Type mType{Type::Linear};
+ VGradient::Spread mSpread{Spread::Pad};
+ VGradient::Mode mMode{Mode::Absolute};
+ VGradientStops mStops;
+ float mAlpha{1.0};
+ struct Linear{
+ float x1{0}, y1{0}, x2{0}, y2{0};
+ };
+ struct Radial{
+ float cx{0}, cy{0}, fx{0}, fy{0}, cradius{0}, fradius{0};
+ };
+ union {
+ Linear linear;
+ Radial radial;
+ };
+ VMatrix mMatrix;
+};
+
+struct VTexture {
+ VBitmap mBitmap;
+ VMatrix mMatrix;
+ int mAlpha{255};
+};
+
+class VBrush {
+public:
+ enum class Type { NoBrush, Solid, LinearGradient, RadialGradient, Texture };
+ VBrush():mType(Type::NoBrush),mColor(){};
+ explicit VBrush(const VColor &color);
+ explicit VBrush(const VGradient *gradient);
+ explicit VBrush(uchar r, uchar g, uchar b, uchar a);
+ explicit VBrush(const VTexture *texture);
+ inline VBrush::Type type() const { return mType; }
+public:
+ VBrush::Type mType{Type::NoBrush};
+ union {
+ VColor mColor{};
+ const VGradient *mGradient;
+ const VTexture *mTexture;
+ };
+};
+
+V_END_NAMESPACE
+
+#endif // VBRUSH_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vcowptr.h b/vendor/github.com/Benau/go_rlottie/vector_vcowptr.h
new file mode 100644
index 00000000..af2c7f24
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vcowptr.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VCOWPTR_H
+#define VCOWPTR_H
+
+#include <cassert>
+#include <atomic>
+
+template <typename T>
+class vcow_ptr {
+ struct model {
+ std::atomic<std::size_t> mRef{1};
+
+ model() = default;
+
+ template <class... Args>
+ explicit model(Args&&... args) : mValue(std::forward<Args>(args)...){}
+ explicit model(const T& other) : mValue(other){}
+
+ T mValue;
+ };
+ model* mModel;
+
+public:
+ using element_type = T;
+
+ vcow_ptr()
+ {
+ static model default_s;
+ mModel = &default_s;
+ ++mModel->mRef;
+ }
+
+ ~vcow_ptr()
+ {
+ if (mModel && (--mModel->mRef == 0)) delete mModel;
+ }
+
+ template <class... Args>
+ explicit vcow_ptr(Args&&... args) : mModel(new model(std::forward<Args>(args)...))
+ {
+ }
+
+ vcow_ptr(const vcow_ptr& x) noexcept : mModel(x.mModel)
+ {
+ assert(mModel);
+ ++mModel->mRef;
+ }
+ vcow_ptr(vcow_ptr&& x) noexcept : mModel(x.mModel)
+ {
+ assert(mModel);
+ x.mModel = nullptr;
+ }
+
+ auto operator=(const vcow_ptr& x) noexcept -> vcow_ptr&
+ {
+ *this = vcow_ptr(x);
+ return *this;
+ }
+
+ auto operator=(vcow_ptr&& x) noexcept -> vcow_ptr&
+ {
+ auto tmp = std::move(x);
+ swap(*this, tmp);
+ return *this;
+ }
+
+ auto operator*() const noexcept -> const element_type& { return read(); }
+
+ auto operator-> () const noexcept -> const element_type* { return &read(); }
+
+ std::size_t refCount() const noexcept
+ {
+ assert(mModel);
+
+ return mModel->mRef;
+ }
+
+ bool unique() const noexcept
+ {
+ assert(mModel);
+
+ return mModel->mRef == 1;
+ }
+
+ auto write() -> element_type&
+ {
+ if (!unique()) *this = vcow_ptr(read());
+
+ return mModel->mValue;
+ }
+
+ auto read() const noexcept -> const element_type&
+ {
+ assert(mModel);
+
+ return mModel->mValue;
+ }
+
+ friend inline void swap(vcow_ptr& x, vcow_ptr& y) noexcept
+ {
+ std::swap(x.mModel, y.mModel);
+ }
+};
+
+#endif // VCOWPTR_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdasher.cpp b/vendor/github.com/Benau/go_rlottie/vector_vdasher.cpp
new file mode 100644
index 00000000..b2bc9d1d
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vdasher.cpp
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "vector_vbezier.h"
+
+#include <cmath>
+
+#include "vector_vdasher.h"
+#include "vector_vline.h"
+
+V_BEGIN_NAMESPACE
+
+static constexpr float tolerance = 0.1f;
+VDasher::VDasher(const float *dashArray, size_t size)
+{
+ mDashArray = reinterpret_cast<const VDasher::Dash *>(dashArray);
+ mArraySize = size / 2;
+ if (size % 2) mDashOffset = dashArray[size - 1];
+ mIndex = 0;
+ mCurrentLength = 0;
+ mDiscard = false;
+ //if the dash array contains ZERO length
+ // segments or ZERO lengths gaps we could
+ // optimize those usecase.
+ for (size_t i = 0; i < mArraySize; i++) {
+ if (!vCompare(mDashArray[i].length, 0.0f))
+ mNoLength = false;
+ if (!vCompare(mDashArray[i].gap, 0.0f))
+ mNoGap = false;
+ }
+}
+
+void VDasher::moveTo(const VPointF &p)
+{
+ mDiscard = false;
+ mStartNewSegment = true;
+ mCurPt = p;
+ mIndex = 0;
+
+ if (!vCompare(mDashOffset, 0.0f)) {
+ float totalLength = 0.0;
+ for (size_t i = 0; i < mArraySize; i++) {
+ totalLength = mDashArray[i].length + mDashArray[i].gap;
+ }
+ float normalizeLen = std::fmod(mDashOffset, totalLength);
+ if (normalizeLen < 0.0f) {
+ normalizeLen = totalLength + normalizeLen;
+ }
+ // now the length is less than total length and +ve
+ // findout the current dash index , dashlength and gap.
+ for (size_t i = 0; i < mArraySize; i++) {
+ if (normalizeLen < mDashArray[i].length) {
+ mIndex = i;
+ mCurrentLength = mDashArray[i].length - normalizeLen;
+ mDiscard = false;
+ break;
+ }
+ normalizeLen -= mDashArray[i].length;
+ if (normalizeLen < mDashArray[i].gap) {
+ mIndex = i;
+ mCurrentLength = mDashArray[i].gap - normalizeLen;
+ mDiscard = true;
+ break;
+ }
+ normalizeLen -= mDashArray[i].gap;
+ }
+ } else {
+ mCurrentLength = mDashArray[mIndex].length;
+ }
+ if (vIsZero(mCurrentLength)) updateActiveSegment();
+}
+
+void VDasher::addLine(const VPointF &p)
+{
+ if (mDiscard) return;
+
+ if (mStartNewSegment) {
+ mResult->moveTo(mCurPt);
+ mStartNewSegment = false;
+ }
+ mResult->lineTo(p);
+}
+
+void VDasher::updateActiveSegment()
+{
+ mStartNewSegment = true;
+
+ if (mDiscard) {
+ mDiscard = false;
+ mIndex = (mIndex + 1) % mArraySize;
+ mCurrentLength = mDashArray[mIndex].length;
+ } else {
+ mDiscard = true;
+ mCurrentLength = mDashArray[mIndex].gap;
+ }
+ if (vIsZero(mCurrentLength)) updateActiveSegment();
+}
+
+void VDasher::lineTo(const VPointF &p)
+{
+ VLine left, right;
+ VLine line(mCurPt, p);
+ float length = line.length();
+
+ if (length <= mCurrentLength) {
+ mCurrentLength -= length;
+ addLine(p);
+ } else {
+ while (length > mCurrentLength) {
+ length -= mCurrentLength;
+ line.splitAtLength(mCurrentLength, left, right);
+
+ addLine(left.p2());
+ updateActiveSegment();
+
+ line = right;
+ mCurPt = line.p1();
+ }
+ // handle remainder
+ if (length > tolerance) {
+ mCurrentLength -= length;
+ addLine(line.p2());
+ }
+ }
+
+ if (mCurrentLength < tolerance) updateActiveSegment();
+
+ mCurPt = p;
+}
+
+void VDasher::addCubic(const VPointF &cp1, const VPointF &cp2, const VPointF &e)
+{
+ if (mDiscard) return;
+
+ if (mStartNewSegment) {
+ mResult->moveTo(mCurPt);
+ mStartNewSegment = false;
+ }
+ mResult->cubicTo(cp1, cp2, e);
+}
+
+void VDasher::cubicTo(const VPointF &cp1, const VPointF &cp2, const VPointF &e)
+{
+ VBezier left, right;
+ VBezier b = VBezier::fromPoints(mCurPt, cp1, cp2, e);
+ float bezLen = b.length();
+
+ if (bezLen <= mCurrentLength) {
+ mCurrentLength -= bezLen;
+ addCubic(cp1, cp2, e);
+ } else {
+ while (bezLen > mCurrentLength) {
+ bezLen -= mCurrentLength;
+ b.splitAtLength(mCurrentLength, &left, &right);
+
+ addCubic(left.pt2(), left.pt3(), left.pt4());
+ updateActiveSegment();
+
+ b = right;
+ mCurPt = b.pt1();
+ }
+ // handle remainder
+ if (bezLen > tolerance) {
+ mCurrentLength -= bezLen;
+ addCubic(b.pt2(), b.pt3(), b.pt4());
+ }
+ }
+
+ if (mCurrentLength < tolerance) updateActiveSegment();
+
+ mCurPt = e;
+}
+
+void VDasher::dashHelper(const VPath &path, VPath &result)
+{
+ mResult = &result;
+ mResult->reserve(path.points().size(), path.elements().size());
+ mIndex = 0;
+ const std::vector<VPath::Element> &elms = path.elements();
+ const std::vector<VPointF> & pts = path.points();
+ const VPointF * ptPtr = pts.data();
+
+ for (auto &i : elms) {
+ switch (i) {
+ case VPath::Element::MoveTo: {
+ moveTo(*ptPtr++);
+ break;
+ }
+ case VPath::Element::LineTo: {
+ lineTo(*ptPtr++);
+ break;
+ }
+ case VPath::Element::CubicTo: {
+ cubicTo(*ptPtr, *(ptPtr + 1), *(ptPtr + 2));
+ ptPtr += 3;
+ break;
+ }
+ case VPath::Element::Close: {
+ // The point is already joined to start point in VPath
+ // no need to do anything here.
+ break;
+ }
+ }
+ }
+ mResult = nullptr;
+}
+
+void VDasher::dashed(const VPath &path, VPath &result)
+{
+ if (mNoLength && mNoGap) return result.reset();
+
+ if (path.empty() || mNoLength) return result.reset();
+
+ if (mNoGap) return result.clone(path);
+
+ result.reset();
+
+ dashHelper(path, result);
+}
+
+VPath VDasher::dashed(const VPath &path)
+{
+ if (mNoLength && mNoGap) return path;
+
+ if (path.empty() || mNoLength) return VPath();
+
+ if (mNoGap) return path;
+
+ VPath result;
+
+ dashHelper(path, result);
+
+ return result;
+}
+
+V_END_NAMESPACE
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdasher.h b/vendor/github.com/Benau/go_rlottie/vector_vdasher.h
new file mode 100644
index 00000000..aa4bcb5e
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vdasher.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VDASHER_H
+#define VDASHER_H
+#include "vector_vpath.h"
+
+V_BEGIN_NAMESPACE
+
+class VDasher {
+public:
+ VDasher(const float *dashArray, size_t size);
+ VPath dashed(const VPath &path);
+ void dashed(const VPath &path, VPath &result);
+
+private:
+ void moveTo(const VPointF &p);
+ void lineTo(const VPointF &p);
+ void cubicTo(const VPointF &cp1, const VPointF &cp2, const VPointF &e);
+ void close();
+ void addLine(const VPointF &p);
+ void addCubic(const VPointF &cp1, const VPointF &cp2, const VPointF &e);
+ void updateActiveSegment();
+
+private:
+ void dashHelper(const VPath &path, VPath &result);
+ struct Dash {
+ float length;
+ float gap;
+ };
+ const VDasher::Dash *mDashArray;
+ size_t mArraySize{0};
+ VPointF mCurPt;
+ size_t mIndex{0}; /* index to the dash Array */
+ float mCurrentLength;
+ float mDashOffset{0};
+ VPath *mResult{nullptr};
+ bool mDiscard{false};
+ bool mStartNewSegment{true};
+ bool mNoLength{true};
+ bool mNoGap{true};
+};
+
+V_END_NAMESPACE
+
+#endif // VDASHER_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdebug.cpp b/vendor/github.com/Benau/go_rlottie/vector_vdebug.cpp
new file mode 100644
index 00000000..dc11f233
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vdebug.cpp
@@ -0,0 +1,758 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "vector_vdebug.h"
+
+#ifdef LOTTIE_LOGGING_SUPPORT
+
+#include <atomic>
+#include <chrono>
+#include <cstring>
+#include <ctime>
+#include <fstream>
+#include <memory>
+#include <queue>
+#include <sstream>
+#include <thread>
+#include <tuple>
+
+namespace {
+
+/* Returns microseconds since epoch */
+uint64_t timestamp_now()
+{
+ return std::chrono::duration_cast<std::chrono::microseconds>(
+ std::chrono::high_resolution_clock::now().time_since_epoch())
+ .count();
+}
+
+/* I want [2016-10-13 00:01:23.528514] */
+void format_timestamp(std::ostream& os, uint64_t timestamp)
+{
+ // The next 3 lines do not work on MSVC!
+ // auto duration = std::chrono::microseconds(timestamp);
+ // std::chrono::high_resolution_clock::time_point time_point(duration);
+ // std::time_t time_t =
+ // std::chrono::high_resolution_clock::to_time_t(time_point);
+ std::time_t time_t = timestamp / 1000000;
+ auto gmtime = std::gmtime(&time_t);
+ char buffer[32];
+ strftime(buffer, 32, "%Y-%m-%d %T.", gmtime);
+ char microseconds[7];
+ snprintf(microseconds, 7, "%06llu",
+ (long long unsigned int)timestamp % 1000000);
+ os << '[' << buffer << microseconds << ']';
+}
+
+std::thread::id this_thread_id()
+{
+ static thread_local const std::thread::id id = std::this_thread::get_id();
+ return id;
+}
+
+template <typename T, typename Tuple>
+struct TupleIndex;
+
+template <typename T, typename... Types>
+struct TupleIndex<T, std::tuple<T, Types...> > {
+ static constexpr const std::size_t value = 0;
+};
+
+template <typename T, typename U, typename... Types>
+struct TupleIndex<T, std::tuple<U, Types...> > {
+ static constexpr const std::size_t value =
+ 1 + TupleIndex<T, std::tuple<Types...> >::value;
+};
+
+} // anonymous namespace
+
+typedef std::tuple<char, uint32_t, uint64_t, int32_t, int64_t, double,
+ VDebug::string_literal_t, char*>
+ SupportedTypes;
+
+char const* to_string(LogLevel loglevel)
+{
+ switch (loglevel) {
+ case LogLevel::OFF:
+ return "OFF";
+ case LogLevel::INFO:
+ return "INFO";
+ case LogLevel::WARN:
+ return "WARN";
+ case LogLevel::CRIT:
+ return "CRIT";
+ }
+ return "XXXX";
+}
+
+template <typename Arg>
+void VDebug::encode(Arg arg)
+{
+ *reinterpret_cast<Arg*>(buffer()) = arg;
+ m_bytes_used += sizeof(Arg);
+}
+
+template <typename Arg>
+void VDebug::encode(Arg arg, uint8_t type_id)
+{
+ resize_buffer_if_needed(sizeof(Arg) + sizeof(uint8_t));
+ encode<uint8_t>(type_id);
+ encode<Arg>(arg);
+}
+
+VDebug::VDebug(LogLevel level, char const* file, char const* function,
+ uint32_t line)
+ : m_bytes_used(0), m_buffer_size(sizeof(m_stack_buffer))
+{
+ encode<uint64_t>(timestamp_now());
+ encode<std::thread::id>(this_thread_id());
+ encode<string_literal_t>(string_literal_t(file));
+ encode<string_literal_t>(string_literal_t(function));
+ encode<uint32_t>(line);
+ encode<LogLevel>(level);
+ if (level == LogLevel::INFO) {
+ m_logAll = false;
+ } else {
+ m_logAll = true;
+ }
+}
+
+VDebug::~VDebug() = default;
+
+void VDebug::stringify(std::ostream& os)
+{
+ char* b = !m_heap_buffer ? m_stack_buffer : m_heap_buffer.get();
+ char const* const end = b + m_bytes_used;
+ uint64_t timestamp = *reinterpret_cast<uint64_t*>(b);
+ b += sizeof(uint64_t);
+ std::thread::id threadid = *reinterpret_cast<std::thread::id*>(b);
+ b += sizeof(std::thread::id);
+ string_literal_t file = *reinterpret_cast<string_literal_t*>(b);
+ b += sizeof(string_literal_t);
+ string_literal_t function = *reinterpret_cast<string_literal_t*>(b);
+ b += sizeof(string_literal_t);
+ uint32_t line = *reinterpret_cast<uint32_t*>(b);
+ b += sizeof(uint32_t);
+ LogLevel loglevel = *reinterpret_cast<LogLevel*>(b);
+ b += sizeof(LogLevel);
+ if (m_logAll) {
+ format_timestamp(os, timestamp);
+
+ os << '[' << to_string(loglevel) << ']' << '[' << threadid << ']' << '['
+ << file.m_s << ':' << function.m_s << ':' << line << "] ";
+ }
+
+ stringify(os, b, end);
+ os << std::endl;
+
+ if (loglevel >= LogLevel::CRIT) os.flush();
+}
+
+template <typename Arg>
+char* decode(std::ostream& os, char* b, Arg* /*dummy*/)
+{
+ Arg arg = *reinterpret_cast<Arg*>(b);
+ os << arg;
+ return b + sizeof(Arg);
+}
+
+template <>
+char* decode(std::ostream& os, char* b, VDebug::string_literal_t* /*dummy*/)
+{
+ VDebug::string_literal_t s =
+ *reinterpret_cast<VDebug::string_literal_t*>(b);
+ os << s.m_s;
+ return b + sizeof(VDebug::string_literal_t);
+}
+
+template <>
+char* decode(std::ostream& os, char* b, char** /*dummy*/)
+{
+ while (*b != '\0') {
+ os << *b;
+ ++b;
+ }
+ return ++b;
+}
+
+void VDebug::stringify(std::ostream& os, char* start, char const* const end)
+{
+ if (start == end) return;
+
+ int type_id = static_cast<int>(*start);
+ start++;
+
+ switch (type_id) {
+ case 0:
+ stringify(
+ os,
+ decode(os, start,
+ static_cast<std::tuple_element<0, SupportedTypes>::type*>(
+ nullptr)),
+ end);
+ return;
+ case 1:
+ stringify(
+ os,
+ decode(os, start,
+ static_cast<std::tuple_element<1, SupportedTypes>::type*>(
+ nullptr)),
+ end);
+ return;
+ case 2:
+ stringify(
+ os,
+ decode(os, start,
+ static_cast<std::tuple_element<2, SupportedTypes>::type*>(
+ nullptr)),
+ end);
+ return;
+ case 3:
+ stringify(
+ os,
+ decode(os, start,
+ static_cast<std::tuple_element<3, SupportedTypes>::type*>(
+ nullptr)),
+ end);
+ return;
+ case 4:
+ stringify(
+ os,
+ decode(os, start,
+ static_cast<std::tuple_element<4, SupportedTypes>::type*>(
+ nullptr)),
+ end);
+ return;
+ case 5:
+ stringify(
+ os,
+ decode(os, start,
+ static_cast<std::tuple_element<5, SupportedTypes>::type*>(
+ nullptr)),
+ end);
+ return;
+ case 6:
+ stringify(
+ os,
+ decode(os, start,
+ static_cast<std::tuple_element<6, SupportedTypes>::type*>(
+ nullptr)),
+ end);
+ return;
+ case 7:
+ stringify(
+ os,
+ decode(os, start,
+ static_cast<std::tuple_element<7, SupportedTypes>::type*>(
+ nullptr)),
+ end);
+ return;
+ }
+}
+
+char* VDebug::buffer()
+{
+ return !m_heap_buffer ? &m_stack_buffer[m_bytes_used]
+ : &(m_heap_buffer.get())[m_bytes_used];
+}
+
+void VDebug::resize_buffer_if_needed(size_t additional_bytes)
+{
+ size_t const required_size = m_bytes_used + additional_bytes;
+
+ if (required_size <= m_buffer_size) return;
+
+ if (!m_heap_buffer) {
+ m_buffer_size = std::max(static_cast<size_t>(512), required_size);
+ m_heap_buffer = std::make_unique<char[]>(m_buffer_size);
+ memcpy(m_heap_buffer.get(), m_stack_buffer, m_bytes_used);
+ return;
+ } else {
+ m_buffer_size =
+ std::max(static_cast<size_t>(2 * m_buffer_size), required_size);
+ std::unique_ptr<char[]> new_heap_buffer(new char[m_buffer_size]);
+ memcpy(new_heap_buffer.get(), m_heap_buffer.get(), m_bytes_used);
+ m_heap_buffer.swap(new_heap_buffer);
+ }
+}
+
+void VDebug::encode(char const* arg)
+{
+ if (arg != nullptr) encode_c_string(arg, strlen(arg));
+}
+
+void VDebug::encode(char* arg)
+{
+ if (arg != nullptr) encode_c_string(arg, strlen(arg));
+}
+
+void VDebug::encode_c_string(char const* arg, size_t length)
+{
+ if (length == 0) return;
+
+ resize_buffer_if_needed(1 + length + 1);
+ char* b = buffer();
+ auto type_id = TupleIndex<char*, SupportedTypes>::value;
+ *reinterpret_cast<uint8_t*>(b++) = static_cast<uint8_t>(type_id);
+ memcpy(b, arg, length + 1);
+ m_bytes_used += 1 + length + 1;
+}
+
+void VDebug::encode(string_literal_t arg)
+{
+ encode<string_literal_t>(
+ arg, TupleIndex<string_literal_t, SupportedTypes>::value);
+}
+
+VDebug& VDebug::operator<<(std::string const& arg)
+{
+ encode_c_string(arg.c_str(), arg.length());
+ return *this;
+}
+
+VDebug& VDebug::operator<<(int32_t arg)
+{
+ encode<int32_t>(arg, TupleIndex<int32_t, SupportedTypes>::value);
+ return *this;
+}
+
+VDebug& VDebug::operator<<(uint32_t arg)
+{
+ encode<uint32_t>(arg, TupleIndex<uint32_t, SupportedTypes>::value);
+ return *this;
+}
+
+// VDebug& VDebug::operator<<(int64_t arg)
+// {
+// encode < int64_t >(arg, TupleIndex < int64_t, SupportedTypes >::value);
+// return *this;
+// }
+
+// VDebug& VDebug::operator<<(uint64_t arg)
+// {
+// encode < uint64_t >(arg, TupleIndex < uint64_t, SupportedTypes >::value);
+// return *this;
+// }
+VDebug& VDebug::operator<<(unsigned long arg)
+{
+ encode<uint64_t>(arg, TupleIndex<uint64_t, SupportedTypes>::value);
+ return *this;
+}
+
+VDebug& VDebug::operator<<(long arg)
+{
+ encode<int64_t>(arg, TupleIndex<int64_t, SupportedTypes>::value);
+ return *this;
+}
+
+VDebug& VDebug::operator<<(double arg)
+{
+ encode<double>(arg, TupleIndex<double, SupportedTypes>::value);
+ return *this;
+}
+
+VDebug& VDebug::operator<<(char arg)
+{
+ encode<char>(arg, TupleIndex<char, SupportedTypes>::value);
+ return *this;
+}
+
+struct BufferBase {
+ virtual ~BufferBase() = default;
+ virtual void push(VDebug&& logline) = 0;
+ virtual bool try_pop(VDebug& logline) = 0;
+};
+
+struct SpinLock {
+ SpinLock(std::atomic_flag& flag) : m_flag(flag)
+ {
+ while (m_flag.test_and_set(std::memory_order_acquire))
+ ;
+ }
+
+ ~SpinLock() { m_flag.clear(std::memory_order_release); }
+
+private:
+ std::atomic_flag& m_flag;
+};
+
+/* Multi Producer Single Consumer Ring Buffer */
+class RingBuffer : public BufferBase {
+public:
+ struct alignas(64) Item {
+ Item()
+ : flag(), written(0), logline(LogLevel::INFO, nullptr, nullptr, 0)
+ {
+ }
+
+ std::atomic_flag flag;
+ char written;
+ char padding[256 - sizeof(std::atomic_flag) - sizeof(char) -
+ sizeof(VDebug)];
+ VDebug logline;
+ };
+
+ RingBuffer(size_t const size)
+ : m_size(size),
+ m_ring(static_cast<Item*>(std::malloc(size * sizeof(Item)))),
+ m_write_index(0),
+ m_read_index(0)
+ {
+ for (size_t i = 0; i < m_size; ++i) {
+ new (&m_ring[i]) Item();
+ }
+ static_assert(sizeof(Item) == 256, "Unexpected size != 256");
+ }
+
+ ~RingBuffer() override
+ {
+ for (size_t i = 0; i < m_size; ++i) {
+ m_ring[i].~Item();
+ }
+ std::free(m_ring);
+ }
+
+ void push(VDebug&& logline) override
+ {
+ unsigned int write_index =
+ m_write_index.fetch_add(1, std::memory_order_relaxed) % m_size;
+ Item& item = m_ring[write_index];
+ SpinLock spinlock(item.flag);
+ item.logline = std::move(logline);
+ item.written = 1;
+ }
+
+ bool try_pop(VDebug& logline) override
+ {
+ Item& item = m_ring[m_read_index % m_size];
+ SpinLock spinlock(item.flag);
+ if (item.written == 1) {
+ logline = std::move(item.logline);
+ item.written = 0;
+ ++m_read_index;
+ return true;
+ }
+ return false;
+ }
+
+ RingBuffer(RingBuffer const&) = delete;
+ RingBuffer& operator=(RingBuffer const&) = delete;
+
+private:
+ size_t const m_size;
+ Item* m_ring;
+ std::atomic<unsigned int> m_write_index;
+
+public:
+ char pad[64];
+
+private:
+ unsigned int m_read_index;
+};
+
+class Buffer {
+public:
+ struct Item {
+ Item(VDebug&& logline) : logline(std::move(logline)) {}
+ char padding[256 - sizeof(VDebug)];
+ VDebug logline;
+ };
+
+ static constexpr const size_t size =
+ 32768; // 8MB. Helps reduce memory fragmentation
+
+ Buffer() : m_buffer(static_cast<Item*>(std::malloc(size * sizeof(Item))))
+ {
+ for (size_t i = 0; i <= size; ++i) {
+ m_write_state[i].store(0, std::memory_order_relaxed);
+ }
+ static_assert(sizeof(Item) == 256, "Unexpected size != 256");
+ }
+
+ ~Buffer()
+ {
+ unsigned int write_count = m_write_state[size].load();
+ for (size_t i = 0; i < write_count; ++i) {
+ m_buffer[i].~Item();
+ }
+ std::free(m_buffer);
+ }
+
+ // Returns true if we need to switch to next buffer
+ bool push(VDebug&& logline, unsigned int const write_index)
+ {
+ new (&m_buffer[write_index]) Item(std::move(logline));
+ m_write_state[write_index].store(1, std::memory_order_release);
+ return m_write_state[size].fetch_add(1, std::memory_order_acquire) +
+ 1 ==
+ size;
+ }
+
+ bool try_pop(VDebug& logline, unsigned int const read_index)
+ {
+ if (m_write_state[read_index].load(std::memory_order_acquire)) {
+ Item& item = m_buffer[read_index];
+ logline = std::move(item.logline);
+ return true;
+ }
+ return false;
+ }
+
+ Buffer(Buffer const&) = delete;
+ Buffer& operator=(Buffer const&) = delete;
+
+private:
+ Item* m_buffer;
+ std::atomic<unsigned int> m_write_state[size + 1];
+};
+
+class QueueBuffer : public BufferBase {
+public:
+ QueueBuffer(QueueBuffer const&) = delete;
+ QueueBuffer& operator=(QueueBuffer const&) = delete;
+
+ QueueBuffer()
+ : m_current_read_buffer{nullptr},
+ m_write_index(0),
+ m_flag(),
+ m_read_index(0)
+ {
+ setup_next_write_buffer();
+ }
+
+ void push(VDebug&& logline) override
+ {
+ unsigned int write_index =
+ m_write_index.fetch_add(1, std::memory_order_relaxed);
+ if (write_index < Buffer::size) {
+ if (m_current_write_buffer.load(std::memory_order_acquire)
+ ->push(std::move(logline), write_index)) {
+ setup_next_write_buffer();
+ }
+ } else {
+ while (m_write_index.load(std::memory_order_acquire) >=
+ Buffer::size)
+ ;
+ push(std::move(logline));
+ }
+ }
+
+ bool try_pop(VDebug& logline) override
+ {
+ if (m_current_read_buffer == nullptr)
+ m_current_read_buffer = get_next_read_buffer();
+
+ Buffer* read_buffer = m_current_read_buffer;
+
+ if (read_buffer == nullptr) return false;
+
+ if (read_buffer->try_pop(logline, m_read_index)) {
+ m_read_index++;
+ if (m_read_index == Buffer::size) {
+ m_read_index = 0;
+ m_current_read_buffer = nullptr;
+ SpinLock spinlock(m_flag);
+ m_buffers.pop();
+ }
+ return true;
+ }
+
+ return false;
+ }
+
+private:
+ void setup_next_write_buffer()
+ {
+ std::unique_ptr<Buffer> next_write_buffer(new Buffer());
+ m_current_write_buffer.store(next_write_buffer.get(),
+ std::memory_order_release);
+ SpinLock spinlock(m_flag);
+ m_buffers.push(std::move(next_write_buffer));
+ m_write_index.store(0, std::memory_order_relaxed);
+ }
+
+ Buffer* get_next_read_buffer()
+ {
+ SpinLock spinlock(m_flag);
+ return m_buffers.empty() ? nullptr : m_buffers.front().get();
+ }
+
+private:
+ std::queue<std::unique_ptr<Buffer> > m_buffers;
+ std::atomic<Buffer*> m_current_write_buffer;
+ Buffer* m_current_read_buffer;
+ std::atomic<unsigned int> m_write_index;
+ std::atomic_flag m_flag;
+ unsigned int m_read_index;
+};
+
+class FileWriter {
+public:
+ FileWriter(std::string const& log_directory,
+ std::string const& log_file_name, uint32_t log_file_roll_size_mb)
+ : m_log_file_roll_size_bytes(log_file_roll_size_mb * 1024 * 1024),
+ m_name(log_directory + log_file_name)
+ {
+ roll_file();
+ }
+
+ void write(VDebug& logline)
+ {
+ auto pos = m_os->tellp();
+ logline.stringify(*m_os);
+ m_bytes_written += m_os->tellp() - pos;
+ if (m_bytes_written > m_log_file_roll_size_bytes) {
+ roll_file();
+ }
+ }
+
+private:
+ void roll_file()
+ {
+ if (m_os) {
+ m_os->flush();
+ m_os->close();
+ }
+
+ m_bytes_written = 0;
+ m_os = std::make_unique<std::ofstream>();
+ // TODO Optimize this part. Does it even matter ?
+ std::string log_file_name = m_name;
+ log_file_name.append(".");
+ log_file_name.append(std::to_string(++m_file_number));
+ log_file_name.append(".txt");
+ m_os->open(log_file_name, std::ofstream::out | std::ofstream::trunc);
+ }
+
+private:
+ uint32_t m_file_number = 0;
+ std::streamoff m_bytes_written = 0;
+ uint32_t const m_log_file_roll_size_bytes;
+ std::string const m_name;
+ std::unique_ptr<std::ofstream> m_os;
+};
+
+class NanoLogger {
+public:
+ NanoLogger(NonGuaranteedLogger ngl, std::string const& log_directory,
+ std::string const& log_file_name, uint32_t log_file_roll_size_mb)
+ : m_state(State::INIT),
+ m_buffer_base(
+ new RingBuffer(std::max(1u, ngl.ring_buffer_size_mb) * 1024 * 4)),
+ m_file_writer(log_directory, log_file_name,
+ std::max(1u, log_file_roll_size_mb)),
+ m_thread(&NanoLogger::pop, this)
+ {
+ m_state.store(State::READY, std::memory_order_release);
+ }
+
+ NanoLogger(GuaranteedLogger /*gl*/, std::string const& log_directory,
+ std::string const& log_file_name, uint32_t log_file_roll_size_mb)
+ : m_state(State::INIT),
+ m_buffer_base(new QueueBuffer()),
+ m_file_writer(log_directory, log_file_name,
+ std::max(1u, log_file_roll_size_mb)),
+ m_thread(&NanoLogger::pop, this)
+ {
+ m_state.store(State::READY, std::memory_order_release);
+ }
+
+ ~NanoLogger()
+ {
+ m_state.store(State::SHUTDOWN);
+ m_thread.join();
+ }
+
+ void add(VDebug&& logline) { m_buffer_base->push(std::move(logline)); }
+
+ void pop()
+ {
+ // Wait for constructor to complete and pull all stores done there to
+ // this thread / core.
+ while (m_state.load(std::memory_order_acquire) == State::INIT)
+ std::this_thread::sleep_for(std::chrono::microseconds(50));
+
+ VDebug logline(LogLevel::INFO, nullptr, nullptr, 0);
+
+ while (m_state.load() == State::READY) {
+ if (m_buffer_base->try_pop(logline))
+ m_file_writer.write(logline);
+ else
+ std::this_thread::sleep_for(std::chrono::microseconds(50));
+ }
+
+ // Pop and log all remaining entries
+ while (m_buffer_base->try_pop(logline)) {
+ m_file_writer.write(logline);
+ }
+ }
+
+private:
+ enum class State { INIT, READY, SHUTDOWN };
+
+ std::atomic<State> m_state;
+ std::unique_ptr<BufferBase> m_buffer_base;
+ FileWriter m_file_writer;
+ std::thread m_thread;
+};
+
+std::unique_ptr<NanoLogger> nanologger;
+std::atomic<NanoLogger*> atomic_nanologger;
+
+bool VDebugServer::operator==(VDebug& logline)
+{
+ atomic_nanologger.load(std::memory_order_acquire)->add(std::move(logline));
+ return true;
+}
+
+void initialize(NonGuaranteedLogger ngl, std::string const& log_directory,
+ std::string const& log_file_name,
+ uint32_t log_file_roll_size_mb)
+{
+ nanologger = std::make_unique<NanoLogger>(ngl, log_directory, log_file_name,
+ log_file_roll_size_mb);
+ atomic_nanologger.store(nanologger.get(), std::memory_order_seq_cst);
+}
+
+void initialize(GuaranteedLogger gl, std::string const& log_directory,
+ std::string const& log_file_name,
+ uint32_t log_file_roll_size_mb)
+{
+ nanologger = std::make_unique<NanoLogger>(gl, log_directory, log_file_name,
+ log_file_roll_size_mb);
+ atomic_nanologger.store(nanologger.get(), std::memory_order_seq_cst);
+}
+
+std::atomic<unsigned int> loglevel = {0};
+
+void set_log_level(LogLevel level)
+{
+ loglevel.store(static_cast<unsigned int>(level), std::memory_order_release);
+}
+
+bool is_logged(LogLevel level)
+{
+ return static_cast<unsigned int>(level) >=
+ loglevel.load(std::memory_order_relaxed);
+}
+
+#endif // LOTTIE_LOGGING_SUPPORT
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdebug.h b/vendor/github.com/Benau/go_rlottie/vector_vdebug.h
new file mode 100644
index 00000000..5b6bef5b
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vdebug.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VDEBUG_H
+#define VDEBUG_H
+
+#include "config.h"
+
+#ifdef LOTTIE_LOGGING_SUPPORT
+
+#include <cstdint>
+#include <iosfwd>
+#include <memory>
+#include <string>
+#include <type_traits>
+
+enum class LogLevel : uint8_t { INFO, WARN, CRIT, OFF };
+
+class VDebug {
+public:
+ VDebug();
+ VDebug& debug() { return *this; }
+ VDebug(LogLevel level, char const* file, char const* function,
+ uint32_t line);
+ ~VDebug();
+
+ VDebug(VDebug&&) = default;
+ VDebug& operator=(VDebug&&) = default;
+
+ void stringify(std::ostream& os);
+
+ VDebug& operator<<(char arg);
+ VDebug& operator<<(int32_t arg);
+ VDebug& operator<<(uint32_t arg);
+ // VDebug& operator<<(int64_t arg);
+ // VDebug& operator<<(uint64_t arg);
+
+ VDebug& operator<<(long arg);
+ VDebug& operator<<(unsigned long arg);
+ VDebug& operator<<(double arg);
+ VDebug& operator<<(std::string const& arg);
+
+ template <size_t N>
+ VDebug& operator<<(const char (&arg)[N])
+ {
+ encode(string_literal_t(arg));
+ return *this;
+ }
+
+ template <typename Arg>
+ typename std::enable_if<std::is_same<Arg, char const*>::value,
+ VDebug&>::type
+ operator<<(Arg const& arg)
+ {
+ encode(arg);
+ return *this;
+ }
+
+ template <typename Arg>
+ typename std::enable_if<std::is_same<Arg, char*>::value, VDebug&>::type
+ operator<<(Arg const& arg)
+ {
+ encode(arg);
+ return *this;
+ }
+
+ struct string_literal_t {
+ explicit string_literal_t(char const* s) : m_s(s) {}
+ char const* m_s;
+ };
+
+private:
+ char* buffer();
+
+ template <typename Arg>
+ void encode(Arg arg);
+
+ template <typename Arg>
+ void encode(Arg arg, uint8_t type_id);
+
+ void encode(char* arg);
+ void encode(char const* arg);
+ void encode(string_literal_t arg);
+ void encode_c_string(char const* arg, size_t length);
+ void resize_buffer_if_needed(size_t additional_bytes);
+ void stringify(std::ostream& os, char* start, char const* const end);
+
+private:
+ size_t m_bytes_used{0};
+ size_t m_buffer_size{0};
+ std::unique_ptr<char[]> m_heap_buffer;
+ bool m_logAll;
+ char m_stack_buffer[256 - sizeof(bool) - 2 * sizeof(size_t) -
+ sizeof(decltype(m_heap_buffer)) - 8 /* Reserved */];
+};
+
+struct VDebugServer {
+ /*
+ * Ideally this should have been operator+=
+ * Could not get that to compile, so here we are...
+ */
+ bool operator==(VDebug&);
+};
+
+void set_log_level(LogLevel level);
+
+bool is_logged(LogLevel level);
+
+/*
+ * Non guaranteed logging. Uses a ring buffer to hold log lines.
+ * When the ring gets full, the previous log line in the slot will be dropped.
+ * Does not block producer even if the ring buffer is full.
+ * ring_buffer_size_mb - LogLines are pushed into a mpsc ring buffer whose size
+ * is determined by this parameter. Since each LogLine is 256 bytes,
+ * ring_buffer_size = ring_buffer_size_mb * 1024 * 1024 / 256
+ */
+struct NonGuaranteedLogger {
+ NonGuaranteedLogger(uint32_t ring_buffer_size_mb_)
+ : ring_buffer_size_mb(ring_buffer_size_mb_)
+ {
+ }
+ uint32_t ring_buffer_size_mb;
+};
+
+/*
+ * Provides a guarantee log lines will not be dropped.
+ */
+struct GuaranteedLogger {
+};
+
+/*
+ * Ensure initialize() is called prior to any log statements.
+ * log_directory - where to create the logs. For example - "/tmp/"
+ * log_file_name - root of the file name. For example - "nanolog"
+ * This will create log files of the form -
+ * /tmp/nanolog.1.txt
+ * /tmp/nanolog.2.txt
+ * etc.
+ * log_file_roll_size_mb - mega bytes after which we roll to next log file.
+ */
+void initialize(GuaranteedLogger gl, std::string const& log_directory,
+ std::string const& log_file_name,
+ uint32_t log_file_roll_size_mb);
+void initialize(NonGuaranteedLogger ngl, std::string const& log_directory,
+ std::string const& log_file_name,
+ uint32_t log_file_roll_size_mb);
+
+#define VDEBUG_LOG(LEVEL) \
+ VDebugServer() == VDebug(LEVEL, __FILE__, __func__, __LINE__).debug()
+#define vDebug is_logged(LogLevel::INFO) && VDEBUG_LOG(LogLevel::INFO)
+#define vWarning is_logged(LogLevel::WARN) && VDEBUG_LOG(LogLevel::WARN)
+#define vCritical is_logged(LogLevel::CRIT) && VDEBUG_LOG(LogLevel::CRIT)
+
+#else
+
+struct VDebug
+{
+ template<typename Args>
+ VDebug& operator<<(const Args &){return *this;}
+};
+
+#define vDebug VDebug()
+#define vWarning VDebug()
+#define vCritical VDebug()
+
+#endif //LOTTIE_LOGGING_SUPPORT
+
+#endif // VDEBUG_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdrawable.cpp b/vendor/github.com/Benau/go_rlottie/vector_vdrawable.cpp
new file mode 100644
index 00000000..a7a477f8
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vdrawable.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "vector_vdrawable.h"
+#include "vector_vdasher.h"
+#include "vector_vraster.h"
+
+VDrawable::VDrawable(VDrawable::Type type)
+{
+ setType(type);
+}
+
+VDrawable::~VDrawable() noexcept
+{
+ if (mStrokeInfo) {
+ if (mType == Type::StrokeWithDash) {
+ delete static_cast<StrokeWithDashInfo *>(mStrokeInfo);
+ } else {
+ delete mStrokeInfo;
+ }
+ }
+}
+
+void VDrawable::setType(VDrawable::Type type)
+{
+ mType = type;
+ if (mType == VDrawable::Type::Stroke) {
+ mStrokeInfo = new StrokeInfo();
+ } else if (mType == VDrawable::Type::StrokeWithDash) {
+ mStrokeInfo = new StrokeWithDashInfo();
+ }
+}
+
+void VDrawable::applyDashOp()
+{
+ if (mStrokeInfo && (mType == Type::StrokeWithDash)) {
+ auto obj = static_cast<StrokeWithDashInfo *>(mStrokeInfo);
+ if (!obj->mDash.empty()) {
+ VDasher dasher(obj->mDash.data(), obj->mDash.size());
+ mPath.clone(dasher.dashed(mPath));
+ }
+ }
+}
+
+void VDrawable::preprocess(const VRect &clip)
+{
+ if (mFlag & (DirtyState::Path)) {
+ if (mType == Type::Fill) {
+ mRasterizer.rasterize(std::move(mPath), mFillRule, clip);
+ } else {
+ applyDashOp();
+ mRasterizer.rasterize(std::move(mPath), mStrokeInfo->cap, mStrokeInfo->join,
+ mStrokeInfo->width, mStrokeInfo->miterLimit, clip);
+ }
+ mPath = {};
+ mFlag &= ~DirtyFlag(DirtyState::Path);
+ }
+}
+
+VRle VDrawable::rle()
+{
+ return mRasterizer.rle();
+}
+
+void VDrawable::setStrokeInfo(CapStyle cap, JoinStyle join, float miterLimit,
+ float strokeWidth)
+{
+ assert(mStrokeInfo);
+ if ((mStrokeInfo->cap == cap) && (mStrokeInfo->join == join) &&
+ vCompare(mStrokeInfo->miterLimit, miterLimit) &&
+ vCompare(mStrokeInfo->width, strokeWidth))
+ return;
+
+ mStrokeInfo->cap = cap;
+ mStrokeInfo->join = join;
+ mStrokeInfo->miterLimit = miterLimit;
+ mStrokeInfo->width = strokeWidth;
+ mFlag |= DirtyState::Path;
+}
+
+void VDrawable::setDashInfo(std::vector<float> &dashInfo)
+{
+ assert(mStrokeInfo);
+ assert(mType == VDrawable::Type::StrokeWithDash);
+
+ auto obj = static_cast<StrokeWithDashInfo *>(mStrokeInfo);
+ bool hasChanged = false;
+
+ if (obj->mDash.size() == dashInfo.size()) {
+ for (uint i = 0; i < dashInfo.size(); ++i) {
+ if (!vCompare(obj->mDash[i], dashInfo[i])) {
+ hasChanged = true;
+ break;
+ }
+ }
+ } else {
+ hasChanged = true;
+ }
+
+ if (!hasChanged) return;
+
+ obj->mDash = dashInfo;
+
+ mFlag |= DirtyState::Path;
+}
+
+void VDrawable::setPath(const VPath &path)
+{
+ mPath = path;
+ mFlag |= DirtyState::Path;
+}
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdrawable.h b/vendor/github.com/Benau/go_rlottie/vector_vdrawable.h
new file mode 100644
index 00000000..77f889da
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vdrawable.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VDRAWABLE_H
+#define VDRAWABLE_H
+#include <future>
+#include <cstring>
+#include "vector_vbrush.h"
+#include "vector_vpath.h"
+#include "vector_vrle.h"
+#include "vector_vraster.h"
+
+class VDrawable {
+public:
+ enum class DirtyState : unsigned char {
+ None = 1<<0,
+ Path = 1<<1,
+ Stroke = 1<<2,
+ Brush = 1<<3,
+ All = (Path | Stroke | Brush)
+ };
+
+ enum class Type : unsigned char{
+ Fill,
+ Stroke,
+ StrokeWithDash
+ };
+
+ explicit VDrawable(VDrawable::Type type = Type::Fill);
+ void setType(VDrawable::Type type);
+ ~VDrawable() noexcept;
+
+ typedef vFlag<DirtyState> DirtyFlag;
+ void setPath(const VPath &path);
+ void setFillRule(FillRule rule) { mFillRule = rule; }
+ void setBrush(const VBrush &brush) { mBrush = brush; }
+ void setStrokeInfo(CapStyle cap, JoinStyle join, float miterLimit,
+ float strokeWidth);
+ void setDashInfo(std::vector<float> &dashInfo);
+ void preprocess(const VRect &clip);
+ void applyDashOp();
+ VRle rle();
+ void setName(const char *name)
+ {
+ mName = name;
+ }
+ const char* name() const { return mName; }
+
+public:
+ struct StrokeInfo {
+ float width{0.0};
+ float miterLimit{10};
+ CapStyle cap{CapStyle::Flat};
+ JoinStyle join{JoinStyle::Bevel};
+ };
+
+ struct StrokeWithDashInfo : public StrokeInfo{
+ std::vector<float> mDash;
+ };
+
+public:
+ VPath mPath;
+ VBrush mBrush;
+ VRasterizer mRasterizer;
+ StrokeInfo *mStrokeInfo{nullptr};
+
+ DirtyFlag mFlag{DirtyState::All};
+ FillRule mFillRule{FillRule::Winding};
+ VDrawable::Type mType{Type::Fill};
+
+ const char *mName{nullptr};
+};
+
+#endif // VDRAWABLE_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper.cpp b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper.cpp
new file mode 100644
index 00000000..e2601362
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper.cpp
@@ -0,0 +1,767 @@
+#include "config.h"
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "vector_vdrawhelper.h"
+#include <algorithm>
+#include <climits>
+#include <cstring>
+#include <mutex>
+#include <unordered_map>
+#include <array>
+
+static RenderFuncTable RenderTable;
+
+void VTextureData::setClip(const VRect &clip)
+{
+ left = clip.left();
+ top = clip.top();
+ right = std::min(clip.right(), int(width())) - 1;
+ bottom = std::min(clip.bottom(), int(height())) - 1;
+}
+
+class VGradientCache {
+public:
+ struct CacheInfo : public VColorTable {
+ inline CacheInfo(VGradientStops s) : stops(std::move(s)) {}
+ VGradientStops stops;
+ };
+ using VCacheData = std::shared_ptr<const CacheInfo>;
+ using VCacheKey = int64_t;
+ using VGradientColorTableHash =
+ std::unordered_multimap<VCacheKey, VCacheData>;
+
+ bool generateGradientColorTable(const VGradientStops &stops, float alpha,
+ uint32_t *colorTable, int size);
+ VCacheData getBuffer(const VGradient &gradient)
+ {
+ VCacheKey hash_val = 0;
+ VCacheData info;
+ const VGradientStops &stops = gradient.mStops;
+ for (uint i = 0; i < stops.size() && i <= 2; i++)
+ hash_val +=
+ VCacheKey(stops[i].second.premulARGB() * gradient.alpha());
+
+ {
+ std::lock_guard<std::mutex> guard(mMutex);
+
+ size_t count = mCache.count(hash_val);
+ if (!count) {
+ // key is not present in the hash
+ info = addCacheElement(hash_val, gradient);
+ } else if (count == 1) {
+ auto search = mCache.find(hash_val);
+ if (search->second->stops == stops) {
+ info = search->second;
+ } else {
+ // didn't find an exact match
+ info = addCacheElement(hash_val, gradient);
+ }
+ } else {
+ // we have a multiple data with same key
+ auto range = mCache.equal_range(hash_val);
+ for (auto it = range.first; it != range.second; ++it) {
+ if (it->second->stops == stops) {
+ info = it->second;
+ break;
+ }
+ }
+ if (!info) {
+ // didn't find an exact match
+ info = addCacheElement(hash_val, gradient);
+ }
+ }
+ }
+ return info;
+ }
+
+ static VGradientCache &instance()
+ {
+ static VGradientCache CACHE;
+ return CACHE;
+ }
+
+protected:
+ uint maxCacheSize() const { return 60; }
+ VCacheData addCacheElement(VCacheKey hash_val, const VGradient &gradient)
+ {
+ if (mCache.size() == maxCacheSize()) {
+ uint count = maxCacheSize() / 10;
+ while (count--) {
+ mCache.erase(mCache.begin());
+ }
+ }
+ auto cache_entry = std::make_shared<CacheInfo>(gradient.mStops);
+ cache_entry->alpha = generateGradientColorTable(
+ gradient.mStops, gradient.alpha(), cache_entry->buffer32,
+ VGradient::colorTableSize);
+ mCache.insert(std::make_pair(hash_val, cache_entry));
+ return cache_entry;
+ }
+
+private:
+ VGradientCache() = default;
+
+ VGradientColorTableHash mCache;
+ std::mutex mMutex;
+};
+
+bool VGradientCache::generateGradientColorTable(const VGradientStops &stops,
+ float opacity,
+ uint32_t *colorTable, int size)
+{
+ int dist, idist, pos = 0;
+ size_t i;
+ bool alpha = false;
+ size_t stopCount = stops.size();
+ const VGradientStop *curr, *next, *start;
+ uint32_t curColor, nextColor;
+ float delta, t, incr, fpos;
+
+ if (!vCompare(opacity, 1.0f)) alpha = true;
+
+ start = stops.data();
+ curr = start;
+ if (!curr->second.isOpaque()) alpha = true;
+ curColor = curr->second.premulARGB(opacity);
+ incr = 1.0f / (float)size;
+ fpos = 1.5f * incr;
+
+ colorTable[pos++] = curColor;
+
+ while (fpos <= curr->first) {
+ colorTable[pos] = colorTable[pos - 1];
+ pos++;
+ fpos += incr;
+ }
+
+ for (i = 0; i < stopCount - 1; ++i) {
+ curr = (start + i);
+ next = (start + i + 1);
+ delta = 1 / (next->first - curr->first);
+ if (!next->second.isOpaque()) alpha = true;
+ nextColor = next->second.premulARGB(opacity);
+ while (fpos < next->first && pos < size) {
+ t = (fpos - curr->first) * delta;
+ dist = (int)(255 * t);
+ idist = 255 - dist;
+ colorTable[pos] =
+ interpolate_pixel(curColor, idist, nextColor, dist);
+ ++pos;
+ fpos += incr;
+ }
+ curColor = nextColor;
+ }
+
+ for (; pos < size; ++pos) colorTable[pos] = curColor;
+
+ // Make sure the last color stop is represented at the end of the table
+ colorTable[size - 1] = curColor;
+ return alpha;
+}
+
+void VRasterBuffer::clear()
+{
+ memset(mBuffer, 0, mHeight * mBytesPerLine);
+}
+
+VBitmap::Format VRasterBuffer::prepare(const VBitmap *image)
+{
+ mBuffer = image->data();
+ mWidth = image->width();
+ mHeight = image->height();
+ mBytesPerPixel = 4;
+ mBytesPerLine = image->stride();
+
+ mFormat = image->format();
+ return mFormat;
+}
+
+void VSpanData::init(VRasterBuffer *image)
+{
+ mRasterBuffer = image;
+ setDrawRegion(VRect(0, 0, int(image->width()), int(image->height())));
+ mType = VSpanData::Type::None;
+ mBlendFunc = nullptr;
+ mUnclippedBlendFunc = nullptr;
+}
+
+/*
+ * Gradient Draw routines
+ *
+ */
+
+#define FIXPT_BITS 8
+#define FIXPT_SIZE (1 << FIXPT_BITS)
+static inline void getLinearGradientValues(LinearGradientValues *v,
+ const VSpanData * data)
+{
+ const VGradientData *grad = &data->mGradient;
+ v->dx = grad->linear.x2 - grad->linear.x1;
+ v->dy = grad->linear.y2 - grad->linear.y1;
+ v->l = v->dx * v->dx + v->dy * v->dy;
+ v->off = 0;
+ if (v->l != 0) {
+ v->dx /= v->l;
+ v->dy /= v->l;
+ v->off = -v->dx * grad->linear.x1 - v->dy * grad->linear.y1;
+ }
+}
+
+static inline void getRadialGradientValues(RadialGradientValues *v,
+ const VSpanData * data)
+{
+ const VGradientData &gradient = data->mGradient;
+ v->dx = gradient.radial.cx - gradient.radial.fx;
+ v->dy = gradient.radial.cy - gradient.radial.fy;
+
+ v->dr = gradient.radial.cradius - gradient.radial.fradius;
+ v->sqrfr = gradient.radial.fradius * gradient.radial.fradius;
+
+ v->a = v->dr * v->dr - v->dx * v->dx - v->dy * v->dy;
+ v->inv2a = 1 / (2 * v->a);
+
+ v->extended = !vIsZero(gradient.radial.fradius) || v->a <= 0;
+}
+
+static inline int gradientClamp(const VGradientData *grad, int ipos)
+{
+ int limit;
+
+ if (grad->mSpread == VGradient::Spread::Repeat) {
+ ipos = ipos % VGradient::colorTableSize;
+ ipos = ipos < 0 ? VGradient::colorTableSize + ipos : ipos;
+ } else if (grad->mSpread == VGradient::Spread::Reflect) {
+ limit = VGradient::colorTableSize * 2;
+ ipos = ipos % limit;
+ ipos = ipos < 0 ? limit + ipos : ipos;
+ ipos = ipos >= VGradient::colorTableSize ? limit - 1 - ipos : ipos;
+ } else {
+ if (ipos < 0)
+ ipos = 0;
+ else if (ipos >= VGradient::colorTableSize)
+ ipos = VGradient::colorTableSize - 1;
+ }
+ return ipos;
+}
+
+static uint32_t gradientPixelFixed(const VGradientData *grad, int fixed_pos)
+{
+ int ipos = (fixed_pos + (FIXPT_SIZE / 2)) >> FIXPT_BITS;
+
+ return grad->mColorTable[gradientClamp(grad, ipos)];
+}
+
+static inline uint32_t gradientPixel(const VGradientData *grad, float pos)
+{
+ int ipos = (int)(pos * (VGradient::colorTableSize - 1) + (float)(0.5));
+
+ return grad->mColorTable[gradientClamp(grad, ipos)];
+}
+
+void fetch_linear_gradient(uint32_t *buffer, const Operator *op,
+ const VSpanData *data, int y, int x, int length)
+{
+ float t, inc;
+ const VGradientData *gradient = &data->mGradient;
+
+ bool affine = true;
+ float rx = 0, ry = 0;
+ if (op->linear.l == 0) {
+ t = inc = 0;
+ } else {
+ rx = data->m21 * (y + float(0.5)) + data->m11 * (x + float(0.5)) +
+ data->dx;
+ ry = data->m22 * (y + float(0.5)) + data->m12 * (x + float(0.5)) +
+ data->dy;
+ t = op->linear.dx * rx + op->linear.dy * ry + op->linear.off;
+ inc = op->linear.dx * data->m11 + op->linear.dy * data->m12;
+ affine = !data->m13 && !data->m23;
+
+ if (affine) {
+ t *= (VGradient::colorTableSize - 1);
+ inc *= (VGradient::colorTableSize - 1);
+ }
+ }
+
+ const uint32_t *end = buffer + length;
+ if (affine) {
+ if (inc > float(-1e-5) && inc < float(1e-5)) {
+ memfill32(buffer, gradientPixelFixed(gradient, int(t * FIXPT_SIZE)),
+ length);
+ } else {
+ if (t + inc * length < float(INT_MAX >> (FIXPT_BITS + 1)) &&
+ t + inc * length > float(INT_MIN >> (FIXPT_BITS + 1))) {
+ // we can use fixed point math
+ int t_fixed = int(t * FIXPT_SIZE);
+ int inc_fixed = int(inc * FIXPT_SIZE);
+ while (buffer < end) {
+ *buffer = gradientPixelFixed(gradient, t_fixed);
+ t_fixed += inc_fixed;
+ ++buffer;
+ }
+ } else {
+ // we have to fall back to float math
+ while (buffer < end) {
+ *buffer =
+ gradientPixel(gradient, t / VGradient::colorTableSize);
+ t += inc;
+ ++buffer;
+ }
+ }
+ }
+ } else { // fall back to float math here as well
+ float rw = data->m23 * (y + float(0.5)) + data->m13 * (x + float(0.5)) +
+ data->m33;
+ while (buffer < end) {
+ float xt = rx / rw;
+ float yt = ry / rw;
+ t = (op->linear.dx * xt + op->linear.dy * yt) + op->linear.off;
+
+ *buffer = gradientPixel(gradient, t);
+ rx += data->m11;
+ ry += data->m12;
+ rw += data->m13;
+ if (!rw) {
+ rw += data->m13;
+ }
+ ++buffer;
+ }
+ }
+}
+
+static inline float radialDeterminant(float a, float b, float c)
+{
+ return (b * b) - (4 * a * c);
+}
+
+static void fetch(uint32_t *buffer, uint32_t *end, const Operator *op,
+ const VSpanData *data, float det, float delta_det,
+ float delta_delta_det, float b, float delta_b)
+{
+ if (op->radial.extended) {
+ while (buffer < end) {
+ uint32_t result = 0;
+ if (det >= 0) {
+ float w = std::sqrt(det) - b;
+ if (data->mGradient.radial.fradius + op->radial.dr * w >= 0)
+ result = gradientPixel(&data->mGradient, w);
+ }
+
+ *buffer = result;
+
+ det += delta_det;
+ delta_det += delta_delta_det;
+ b += delta_b;
+
+ ++buffer;
+ }
+ } else {
+ while (buffer < end) {
+ *buffer++ = gradientPixel(&data->mGradient, std::sqrt(det) - b);
+
+ det += delta_det;
+ delta_det += delta_delta_det;
+ b += delta_b;
+ }
+ }
+}
+
+void fetch_radial_gradient(uint32_t *buffer, const Operator *op,
+ const VSpanData *data, int y, int x, int length)
+{
+ // avoid division by zero
+ if (vIsZero(op->radial.a)) {
+ memfill32(buffer, 0, length);
+ return;
+ }
+
+ float rx =
+ data->m21 * (y + float(0.5)) + data->dx + data->m11 * (x + float(0.5));
+ float ry =
+ data->m22 * (y + float(0.5)) + data->dy + data->m12 * (x + float(0.5));
+ bool affine = !data->m13 && !data->m23;
+
+ uint32_t *end = buffer + length;
+ if (affine) {
+ rx -= data->mGradient.radial.fx;
+ ry -= data->mGradient.radial.fy;
+
+ float inv_a = 1 / float(2 * op->radial.a);
+
+ const float delta_rx = data->m11;
+ const float delta_ry = data->m12;
+
+ float b = 2 * (op->radial.dr * data->mGradient.radial.fradius +
+ rx * op->radial.dx + ry * op->radial.dy);
+ float delta_b =
+ 2 * (delta_rx * op->radial.dx + delta_ry * op->radial.dy);
+ const float b_delta_b = 2 * b * delta_b;
+ const float delta_b_delta_b = 2 * delta_b * delta_b;
+
+ const float bb = b * b;
+ const float delta_bb = delta_b * delta_b;
+
+ b *= inv_a;
+ delta_b *= inv_a;
+
+ const float rxrxryry = rx * rx + ry * ry;
+ const float delta_rxrxryry = delta_rx * delta_rx + delta_ry * delta_ry;
+ const float rx_plus_ry = 2 * (rx * delta_rx + ry * delta_ry);
+ const float delta_rx_plus_ry = 2 * delta_rxrxryry;
+
+ inv_a *= inv_a;
+
+ float det =
+ (bb - 4 * op->radial.a * (op->radial.sqrfr - rxrxryry)) * inv_a;
+ float delta_det = (b_delta_b + delta_bb +
+ 4 * op->radial.a * (rx_plus_ry + delta_rxrxryry)) *
+ inv_a;
+ const float delta_delta_det =
+ (delta_b_delta_b + 4 * op->radial.a * delta_rx_plus_ry) * inv_a;
+
+ fetch(buffer, end, op, data, det, delta_det, delta_delta_det, b,
+ delta_b);
+ } else {
+ float rw = data->m23 * (y + float(0.5)) + data->m33 +
+ data->m13 * (x + float(0.5));
+
+ while (buffer < end) {
+ if (rw == 0) {
+ *buffer = 0;
+ } else {
+ float invRw = 1 / rw;
+ float gx = rx * invRw - data->mGradient.radial.fx;
+ float gy = ry * invRw - data->mGradient.radial.fy;
+ float b = 2 * (op->radial.dr * data->mGradient.radial.fradius +
+ gx * op->radial.dx + gy * op->radial.dy);
+ float det = radialDeterminant(
+ op->radial.a, b, op->radial.sqrfr - (gx * gx + gy * gy));
+
+ uint32_t result = 0;
+ if (det >= 0) {
+ float detSqrt = std::sqrt(det);
+
+ float s0 = (-b - detSqrt) * op->radial.inv2a;
+ float s1 = (-b + detSqrt) * op->radial.inv2a;
+
+ float s = vMax(s0, s1);
+
+ if (data->mGradient.radial.fradius + op->radial.dr * s >= 0)
+ result = gradientPixel(&data->mGradient, s);
+ }
+
+ *buffer = result;
+ }
+
+ rx += data->m11;
+ ry += data->m12;
+ rw += data->m13;
+
+ ++buffer;
+ }
+ }
+}
+
+static inline Operator getOperator(const VSpanData *data)
+{
+ Operator op;
+ bool solidSource = false;
+
+ switch (data->mType) {
+ case VSpanData::Type::Solid:
+ solidSource = (vAlpha(data->mSolid) == 255);
+ op.srcFetch = nullptr;
+ break;
+ case VSpanData::Type::LinearGradient:
+ solidSource = false;
+ getLinearGradientValues(&op.linear, data);
+ op.srcFetch = &fetch_linear_gradient;
+ break;
+ case VSpanData::Type::RadialGradient:
+ solidSource = false;
+ getRadialGradientValues(&op.radial, data);
+ op.srcFetch = &fetch_radial_gradient;
+ break;
+ default:
+ op.srcFetch = nullptr;
+ break;
+ }
+
+ op.mode = data->mBlendMode;
+ if (op.mode == BlendMode::SrcOver && solidSource) op.mode = BlendMode::Src;
+
+ op.funcSolid = RenderTable.color(op.mode);
+ op.func = RenderTable.src(op.mode);
+
+ return op;
+}
+
+static void blend_color(size_t size, const VRle::Span *array, void *userData)
+{
+ VSpanData *data = (VSpanData *)(userData);
+ Operator op = getOperator(data);
+ const uint color = data->mSolid;
+
+ for (size_t i = 0 ; i < size; ++i) {
+ const auto &span = array[i];
+ op.funcSolid(data->buffer(span.x, span.y), span.len, color, span.coverage);
+ }
+}
+
+// Signature of Process Object
+// void Pocess(uint* scratchBuffer, size_t x, size_t y, uchar cov)
+template <class Process>
+static inline void process_in_chunk(const VRle::Span *array, size_t size,
+ Process process)
+{
+ std::array<uint, 2048> buf;
+ for (size_t i = 0; i < size; i++) {
+ const auto &span = array[i];
+ size_t len = span.len;
+ auto x = span.x;
+ while (len) {
+ auto l = std::min(len, buf.size());
+ process(buf.data(), x, span.y, l, span.coverage);
+ x += l;
+ len -= l;
+ }
+ }
+}
+
+static void blend_gradient(size_t size, const VRle::Span *array,
+ void *userData)
+{
+ VSpanData *data = (VSpanData *)(userData);
+ Operator op = getOperator(data);
+
+ if (!op.srcFetch) return;
+
+ process_in_chunk(
+ array, size,
+ [&](uint *scratch, size_t x, size_t y, size_t len, uchar cov) {
+ op.srcFetch(scratch, &op, data, (int)y, (int)x, (int)len);
+ op.func(data->buffer((int)x, (int)y), (int)len, scratch, cov);
+ });
+}
+
+template <class T>
+constexpr const T &clamp(const T &v, const T &lo, const T &hi)
+{
+ return v < lo ? lo : hi < v ? hi : v;
+}
+
+static constexpr inline uchar alpha_mul(uchar a, uchar b)
+{
+ return ((a * b) >> 8);
+}
+
+static void blend_image_xform(size_t size, const VRle::Span *array,
+ void *userData)
+{
+ const auto data = reinterpret_cast<const VSpanData *>(userData);
+ const auto &src = data->texture();
+
+ if (src.format() != VBitmap::Format::ARGB32_Premultiplied &&
+ src.format() != VBitmap::Format::ARGB32) {
+ //@TODO other formats not yet handled.
+ return;
+ }
+
+ Operator op = getOperator(data);
+
+ process_in_chunk(
+ array, size,
+ [&](uint *scratch, size_t x, size_t y, size_t len, uchar cov) {
+ const auto coverage = (cov * src.alpha()) >> 8;
+ const float xfactor = y * data->m21 + data->dx + data->m11;
+ const float yfactor = y * data->m22 + data->dy + data->m12;
+ for (size_t i = 0; i < len; i++) {
+ const float fx = (x + i) * data->m11 + xfactor;
+ const float fy = (x + i) * data->m12 + yfactor;
+ const int px = clamp(int(fx), src.left, src.right);
+ const int py = clamp(int(fy), src.top, src.bottom);
+ scratch[i] = src.pixel(px, py);
+ }
+ op.func(data->buffer((int)x, (int)y), (int)len, scratch, coverage);
+ });
+}
+
+static void blend_image(size_t size, const VRle::Span *array, void *userData)
+{
+ const auto data = reinterpret_cast<const VSpanData *>(userData);
+ const auto &src = data->texture();
+
+ if (src.format() != VBitmap::Format::ARGB32_Premultiplied &&
+ src.format() != VBitmap::Format::ARGB32) {
+ //@TODO other formats not yet handled.
+ return;
+ }
+
+ Operator op = getOperator(data);
+
+ for (size_t i = 0; i < size; i++) {
+ const auto &span = array[i];
+ int x = span.x;
+ int length = span.len;
+ int sx = x + int(data->dx);
+ int sy = span.y + int(data->dy);
+
+ // notyhing to copy.
+ if (sy < 0 || sy >= int(src.height()) || sx >= int(src.width()) ||
+ (sx + length) <= 0)
+ continue;
+
+ // intersecting left edge of image
+ if (sx < 0) {
+ x -= sx;
+ length += sx;
+ sx = 0;
+ }
+ // intersecting right edge of image
+ if (sx + length > int(src.width())) length = (int)src.width() - sx;
+
+ op.func(data->buffer(x, span.y), length, src.pixelRef(sx, sy),
+ alpha_mul(span.coverage, src.alpha()));
+ }
+}
+
+void VSpanData::setup(const VBrush &brush, BlendMode /*mode*/, int /*alpha*/)
+{
+ transformType = VMatrix::MatrixType::None;
+
+ switch (brush.type()) {
+ case VBrush::Type::NoBrush:
+ mType = VSpanData::Type::None;
+ break;
+ case VBrush::Type::Solid:
+ mType = VSpanData::Type::Solid;
+ mSolid = brush.mColor.premulARGB();
+ break;
+ case VBrush::Type::LinearGradient: {
+ mType = VSpanData::Type::LinearGradient;
+ mColorTable = VGradientCache::instance().getBuffer(*brush.mGradient);
+ mGradient.mColorTable = mColorTable->buffer32;
+ mGradient.mColorTableAlpha = mColorTable->alpha;
+ mGradient.linear.x1 = brush.mGradient->linear.x1;
+ mGradient.linear.y1 = brush.mGradient->linear.y1;
+ mGradient.linear.x2 = brush.mGradient->linear.x2;
+ mGradient.linear.y2 = brush.mGradient->linear.y2;
+ mGradient.mSpread = brush.mGradient->mSpread;
+ setupMatrix(brush.mGradient->mMatrix);
+ break;
+ }
+ case VBrush::Type::RadialGradient: {
+ mType = VSpanData::Type::RadialGradient;
+ mColorTable = VGradientCache::instance().getBuffer(*brush.mGradient);
+ mGradient.mColorTable = mColorTable->buffer32;
+ mGradient.mColorTableAlpha = mColorTable->alpha;
+ mGradient.radial.cx = brush.mGradient->radial.cx;
+ mGradient.radial.cy = brush.mGradient->radial.cy;
+ mGradient.radial.fx = brush.mGradient->radial.fx;
+ mGradient.radial.fy = brush.mGradient->radial.fy;
+ mGradient.radial.cradius = brush.mGradient->radial.cradius;
+ mGradient.radial.fradius = brush.mGradient->radial.fradius;
+ mGradient.mSpread = brush.mGradient->mSpread;
+ setupMatrix(brush.mGradient->mMatrix);
+ break;
+ }
+ case VBrush::Type::Texture: {
+ mType = VSpanData::Type::Texture;
+ initTexture(&brush.mTexture->mBitmap, brush.mTexture->mAlpha,
+ brush.mTexture->mBitmap.rect());
+ setupMatrix(brush.mTexture->mMatrix);
+ break;
+ }
+ default:
+ break;
+ }
+ updateSpanFunc();
+}
+
+void VSpanData::setupMatrix(const VMatrix &matrix)
+{
+ VMatrix inv = matrix.inverted();
+ m11 = inv.m11;
+ m12 = inv.m12;
+ m13 = inv.m13;
+ m21 = inv.m21;
+ m22 = inv.m22;
+ m23 = inv.m23;
+ m33 = inv.m33;
+ dx = inv.mtx;
+ dy = inv.mty;
+ transformType = inv.type();
+
+ const bool affine = inv.isAffine();
+ const float f1 = m11 * m11 + m21 * m21;
+ const float f2 = m12 * m12 + m22 * m22;
+ fast_matrix = affine && f1 < 1e4 && f2 < 1e4 && f1 > (1.0 / 65536) &&
+ f2 > (1.0 / 65536) && fabs(dx) < 1e4 && fabs(dy) < 1e4;
+}
+
+void VSpanData::initTexture(const VBitmap *bitmap, int alpha,
+ const VRect &sourceRect)
+{
+ mType = VSpanData::Type::Texture;
+ mTexture.prepare(bitmap);
+ mTexture.setClip(sourceRect);
+ mTexture.setAlpha(alpha);
+ updateSpanFunc();
+}
+
+void VSpanData::updateSpanFunc()
+{
+ switch (mType) {
+ case VSpanData::Type::None:
+ mUnclippedBlendFunc = nullptr;
+ break;
+ case VSpanData::Type::Solid:
+ mUnclippedBlendFunc = &blend_color;
+ break;
+ case VSpanData::Type::LinearGradient:
+ case VSpanData::Type::RadialGradient: {
+ mUnclippedBlendFunc = &blend_gradient;
+ break;
+ }
+ case VSpanData::Type::Texture: {
+ //@TODO update proper image function.
+ if (transformType <= VMatrix::MatrixType::Translate) {
+ mUnclippedBlendFunc = &blend_image;
+ } else {
+ mUnclippedBlendFunc = &blend_image_xform;
+ }
+ break;
+ }
+ }
+}
+
+#if !defined(__SSE2__) && !defined(USE_ARM_NEON)
+void memfill32(uint32_t *dest, uint32_t value, int length)
+{
+ // let compiler do the auto vectorization.
+ for (int i = 0 ; i < length; i++) {
+ *dest++ = value;
+ }
+}
+#endif
+
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper.h b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper.h
new file mode 100644
index 00000000..590c5950
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper.h
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VDRAWHELPER_H
+#define VDRAWHELPER_H
+
+#include <memory>
+#include <array>
+#include "assert.h"
+#include "vector_vbitmap.h"
+#include "vector_vbrush.h"
+#include "vector_vrect.h"
+#include "vector_vrle.h"
+
+V_USE_NAMESPACE
+
+struct VSpanData;
+struct Operator;
+
+struct RenderFunc
+{
+ using Color = void (*)(uint32_t *dest, int length, uint32_t color, uint32_t alpha);
+ using Src = void (*)(uint32_t *dest, int length, const uint32_t *src, uint32_t alpha);
+ enum class Type {
+ Invalid,
+ Color,
+ Src,
+ };
+ RenderFunc() = default;
+ RenderFunc(Type t, Color f):type_(t), color_(f){assert(t == Type::Color);}
+ RenderFunc(Type t, Src f):type_(t), src_(f){ assert(t == Type::Src);}
+
+ Type type_{Type::Invalid};
+ union {
+ Color color_;
+ Src src_;
+ };
+};
+
+class RenderFuncTable
+{
+public:
+ RenderFuncTable();
+ RenderFunc::Color color(BlendMode mode) const
+ {
+ return colorTable[uint32_t(mode)].color_;
+ }
+ RenderFunc::Src src(BlendMode mode) const
+ {
+ return srcTable[uint32_t(mode)].src_;
+ }
+private:
+ void neon();
+ void sse();
+ void updateColor(BlendMode mode, RenderFunc::Color f)
+ {
+ colorTable[uint32_t(mode)] = {RenderFunc::Type::Color, f};
+ }
+ void updateSrc(BlendMode mode, RenderFunc::Src f)
+ {
+ srcTable[uint32_t(mode)] = {RenderFunc::Type::Src, f};
+ }
+private:
+ std::array<RenderFunc, uint32_t(BlendMode::Last)> colorTable;
+ std::array<RenderFunc, uint32_t(BlendMode::Last)> srcTable;
+};
+
+typedef void (*SourceFetchProc)(uint32_t *buffer, const Operator *o,
+ const VSpanData *data, int y, int x,
+ int length);
+typedef void (*ProcessRleSpan)(size_t count, const VRle::Span *spans,
+ void *userData);
+
+extern void memfill32(uint32_t *dest, uint32_t value, int count);
+
+struct LinearGradientValues {
+ float dx;
+ float dy;
+ float l;
+ float off;
+};
+
+struct RadialGradientValues {
+ float dx;
+ float dy;
+ float dr;
+ float sqrfr;
+ float a;
+ float inv2a;
+ bool extended;
+};
+
+struct Operator {
+ BlendMode mode;
+ SourceFetchProc srcFetch;
+ RenderFunc::Color funcSolid;
+ RenderFunc::Src func;
+ union {
+ LinearGradientValues linear;
+ RadialGradientValues radial;
+ };
+};
+
+class VRasterBuffer {
+public:
+ VBitmap::Format prepare(const VBitmap *image);
+ void clear();
+
+ void resetBuffer(int val = 0);
+
+ inline uchar *scanLine(int y)
+ {
+ assert(y >= 0);
+ assert(size_t(y) < mHeight);
+ return mBuffer + y * mBytesPerLine;
+ }
+ uint32_t *pixelRef(int x, int y) const
+ {
+ return (uint32_t *)(mBuffer + y * mBytesPerLine + x * mBytesPerPixel);
+ }
+
+ size_t width() const { return mWidth; }
+ size_t height() const { return mHeight; }
+ size_t bytesPerLine() const { return mBytesPerLine; }
+ size_t bytesPerPixel() const { return mBytesPerPixel; }
+ VBitmap::Format format() const { return mFormat; }
+
+private:
+ VBitmap::Format mFormat{VBitmap::Format::ARGB32_Premultiplied};
+ size_t mWidth{0};
+ size_t mHeight{0};
+ size_t mBytesPerLine{0};
+ size_t mBytesPerPixel{0};
+ mutable uchar * mBuffer{nullptr};
+};
+
+struct VGradientData {
+ VGradient::Spread mSpread;
+ struct Linear {
+ float x1, y1, x2, y2;
+ };
+ struct Radial {
+ float cx, cy, fx, fy, cradius, fradius;
+ };
+ union {
+ Linear linear;
+ Radial radial;
+ };
+ const uint32_t *mColorTable;
+ bool mColorTableAlpha;
+};
+
+struct VTextureData : public VRasterBuffer {
+ uint32_t pixel(int x, int y) const { return *pixelRef(x, y); };
+ uchar alpha() const { return mAlpha; }
+ void setAlpha(uchar alpha) { mAlpha = alpha; }
+ void setClip(const VRect &clip);
+ // clip rect
+ int left;
+ int right;
+ int top;
+ int bottom;
+ bool hasAlpha;
+ uchar mAlpha;
+};
+
+struct VColorTable {
+ uint32_t buffer32[VGradient::colorTableSize];
+ bool alpha{true};
+};
+
+struct VSpanData {
+ enum class Type { None, Solid, LinearGradient, RadialGradient, Texture };
+
+ void updateSpanFunc();
+ void init(VRasterBuffer *image);
+ void setup(const VBrush &brush, BlendMode mode = BlendMode::SrcOver,
+ int alpha = 255);
+ void setupMatrix(const VMatrix &matrix);
+
+ VRect clipRect() const
+ {
+ return VRect(0, 0, mDrawableSize.width(), mDrawableSize.height());
+ }
+
+ void setDrawRegion(const VRect &region)
+ {
+ mOffset = VPoint(region.left(), region.top());
+ mDrawableSize = VSize(region.width(), region.height());
+ }
+
+ uint *buffer(int x, int y) const
+ {
+ return mRasterBuffer->pixelRef(x + mOffset.x(), y + mOffset.y());
+ }
+ void initTexture(const VBitmap *image, int alpha, const VRect &sourceRect);
+ const VTextureData &texture() const { return mTexture; }
+
+ BlendMode mBlendMode{BlendMode::SrcOver};
+ VRasterBuffer * mRasterBuffer;
+ ProcessRleSpan mBlendFunc;
+ ProcessRleSpan mUnclippedBlendFunc;
+ VSpanData::Type mType;
+ std::shared_ptr<const VColorTable> mColorTable{nullptr};
+ VPoint mOffset; // offset to the subsurface
+ VSize mDrawableSize; // suburface size
+ uint32_t mSolid;
+ VGradientData mGradient;
+ VTextureData mTexture;
+
+ float m11, m12, m13, m21, m22, m23, m33, dx, dy; // inverse xform matrix
+ bool fast_matrix{true};
+ VMatrix::MatrixType transformType{VMatrix::MatrixType::None};
+};
+
+#define BYTE_MUL(c, a) \
+ ((((((c) >> 8) & 0x00ff00ff) * (a)) & 0xff00ff00) + \
+ (((((c)&0x00ff00ff) * (a)) >> 8) & 0x00ff00ff))
+
+inline constexpr int vRed(uint32_t c)
+{
+ return ((c >> 16) & 0xff);
+}
+
+inline constexpr int vGreen(uint32_t c)
+{
+ return ((c >> 8) & 0xff);
+}
+
+inline constexpr int vBlue(uint32_t c)
+{
+ return (c & 0xff);
+}
+
+inline constexpr int vAlpha(uint32_t c)
+{
+ return c >> 24;
+}
+
+static inline uint32_t interpolate_pixel(uint x, uint a, uint y, uint b)
+{
+ uint t = (x & 0xff00ff) * a + (y & 0xff00ff) * b;
+ t >>= 8;
+ t &= 0xff00ff;
+ x = ((x >> 8) & 0xff00ff) * a + ((y >> 8) & 0xff00ff) * b;
+ x &= 0xff00ff00;
+ x |= t;
+ return x;
+}
+
+#endif // QDRAWHELPER_P_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_common.cpp b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_common.cpp
new file mode 100644
index 00000000..a4346c96
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_common.cpp
@@ -0,0 +1,190 @@
+#include "config.h"
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstring>
+#include "vector_vdrawhelper.h"
+
+/*
+result = s
+dest = s * ca + d * cia
+*/
+static void color_Source(uint32_t *dest, int length, uint32_t color,
+ uint32_t alpha)
+{
+ int ialpha, i;
+
+ if (alpha == 255) {
+ memfill32(dest, color, length);
+ } else {
+ ialpha = 255 - alpha;
+ color = BYTE_MUL(color, alpha);
+ for (i = 0; i < length; ++i)
+ dest[i] = color + BYTE_MUL(dest[i], ialpha);
+ }
+}
+
+/*
+ r = s + d * sia
+ dest = r * ca + d * cia
+ = (s + d * sia) * ca + d * cia
+ = s * ca + d * (sia * ca + cia)
+ = s * ca + d * (1 - sa*ca)
+ = s' + d ( 1 - s'a)
+*/
+static void color_SourceOver(uint32_t *dest, int length, uint32_t color,
+ uint32_t alpha)
+{
+ int ialpha, i;
+
+ if (alpha != 255) color = BYTE_MUL(color, alpha);
+ ialpha = 255 - vAlpha(color);
+ for (i = 0; i < length; ++i) dest[i] = color + BYTE_MUL(dest[i], ialpha);
+}
+
+/*
+ result = d * sa
+ dest = d * sa * ca + d * cia
+ = d * (sa * ca + cia)
+*/
+static void color_DestinationIn(uint *dest, int length, uint color,
+ uint alpha)
+{
+ uint a = vAlpha(color);
+ if (alpha != 255) {
+ a = BYTE_MUL(a, alpha) + 255 - alpha;
+ }
+ for (int i = 0; i < length; ++i) {
+ dest[i] = BYTE_MUL(dest[i], a);
+ }
+}
+
+/*
+ result = d * sia
+ dest = d * sia * ca + d * cia
+ = d * (sia * ca + cia)
+*/
+static void color_DestinationOut(uint *dest, int length, uint color,
+ uint alpha)
+{
+ uint a = vAlpha(~color);
+ if (alpha != 255) a = BYTE_MUL(a, alpha) + 255 - alpha;
+ for (int i = 0; i < length; ++i) {
+ dest[i] = BYTE_MUL(dest[i], a);
+ }
+}
+
+static void src_Source(uint32_t *dest, int length, const uint32_t *src,
+ uint32_t alpha)
+{
+ if (alpha == 255) {
+ memcpy(dest, src, size_t(length) * sizeof(uint));
+ } else {
+ uint ialpha = 255 - alpha;
+ for (int i = 0; i < length; ++i) {
+ dest[i] =
+ interpolate_pixel(src[i], alpha, dest[i], ialpha);
+ }
+ }
+}
+
+/* s' = s * ca
+ * d' = s' + d (1 - s'a)
+ */
+static void src_SourceOver(uint32_t *dest, int length, const uint32_t *src,
+ uint32_t alpha)
+{
+ uint s, sia;
+
+ if (alpha == 255) {
+ for (int i = 0; i < length; ++i) {
+ s = src[i];
+ if (s >= 0xff000000)
+ dest[i] = s;
+ else if (s != 0) {
+ sia = vAlpha(~s);
+ dest[i] = s + BYTE_MUL(dest[i], sia);
+ }
+ }
+ } else {
+ /* source' = source * const_alpha
+ * dest = source' + dest ( 1- source'a)
+ */
+ for (int i = 0; i < length; ++i) {
+ s = BYTE_MUL(src[i], alpha);
+ sia = vAlpha(~s);
+ dest[i] = s + BYTE_MUL(dest[i], sia);
+ }
+ }
+}
+
+static void src_DestinationIn(uint *dest, int length, const uint *src,
+ uint alpha)
+{
+ if (alpha == 255) {
+ for (int i = 0; i < length; ++i) {
+ dest[i] = BYTE_MUL(dest[i], vAlpha(src[i]));
+ }
+ } else {
+ uint cia = 255 - alpha;
+ for (int i = 0; i < length; ++i) {
+ uint a = BYTE_MUL(vAlpha(src[i]), alpha) + cia;
+ dest[i] = BYTE_MUL(dest[i], a);
+ }
+ }
+}
+
+static void src_DestinationOut(uint *dest, int length, const uint *src,
+ uint alpha)
+{
+ if (alpha == 255) {
+ for (int i = 0; i < length; ++i) {
+ dest[i] = BYTE_MUL(dest[i], vAlpha(~src[i]));
+ }
+ } else {
+ uint cia = 255 - alpha;
+ for (int i = 0; i < length; ++i) {
+ uint sia = BYTE_MUL(vAlpha(~src[i]), alpha) + cia;
+ dest[i] = BYTE_MUL(dest[i], sia);
+ }
+ }
+}
+
+RenderFuncTable::RenderFuncTable()
+{
+ updateColor(BlendMode::Src, color_Source);
+ updateColor(BlendMode::SrcOver, color_SourceOver);
+ updateColor(BlendMode::DestIn, color_DestinationIn);
+ updateColor(BlendMode::DestOut, color_DestinationOut);
+
+ updateSrc(BlendMode::Src, src_Source);
+ updateSrc(BlendMode::SrcOver, src_SourceOver);
+ updateSrc(BlendMode::DestIn, src_DestinationIn);
+ updateSrc(BlendMode::DestOut, src_DestinationOut);
+
+#if defined(USE_ARM_NEON)
+ neon();
+#endif
+#if defined(__SSE2__)
+ sse();
+#endif
+}
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_neon.cpp b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_neon.cpp
new file mode 100644
index 00000000..24d7f579
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_neon.cpp
@@ -0,0 +1,34 @@
+#include "config.h"
+#if defined(USE_ARM_NEON)
+
+#include "vector_vdrawhelper.h"
+
+extern "C" void pixman_composite_src_n_8888_asm_neon(int32_t w, int32_t h,
+ uint32_t *dst,
+ int32_t dst_stride,
+ uint32_t src);
+
+extern "C" void pixman_composite_over_n_8888_asm_neon(int32_t w, int32_t h,
+ uint32_t *dst,
+ int32_t dst_stride,
+ uint32_t src);
+
+void memfill32(uint32_t *dest, uint32_t value, int length)
+{
+ pixman_composite_src_n_8888_asm_neon(length, 1, dest, length, value);
+}
+
+static void color_SourceOver(uint32_t *dest, int length,
+ uint32_t color,
+ uint32_t const_alpha)
+{
+ if (const_alpha != 255) color = BYTE_MUL(color, const_alpha);
+
+ pixman_composite_over_n_8888_asm_neon(length, 1, dest, length, color);
+}
+
+void RenderFuncTable::neon()
+{
+ updateColor(BlendMode::Src , color_SourceOver);
+}
+#endif
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_sse2.cpp b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_sse2.cpp
new file mode 100644
index 00000000..51a6c3e0
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_sse2.cpp
@@ -0,0 +1,261 @@
+#if defined(__SSE2__)
+
+#include <cstring>
+#include <emmintrin.h> /* for SSE2 intrinsics */
+#include <xmmintrin.h> /* for _mm_shuffle_pi16 and _MM_SHUFFLE */
+
+#include "vector_vdrawhelper.h"
+// Each 32bits components of alphaChannel must be in the form 0x00AA00AA
+inline static __m128i v4_byte_mul_sse2(__m128i c, __m128i a)
+{
+ const __m128i ag_mask = _mm_set1_epi32(0xFF00FF00);
+ const __m128i rb_mask = _mm_set1_epi32(0x00FF00FF);
+
+ /* for AG */
+ __m128i v_ag = _mm_and_si128(ag_mask, c);
+ v_ag = _mm_srli_epi32(v_ag, 8);
+ v_ag = _mm_mullo_epi16(a, v_ag);
+ v_ag = _mm_and_si128(ag_mask, v_ag);
+
+ /* for RB */
+ __m128i v_rb = _mm_and_si128(rb_mask, c);
+ v_rb = _mm_mullo_epi16(a, v_rb);
+ v_rb = _mm_srli_epi32(v_rb, 8);
+ v_rb = _mm_and_si128(rb_mask, v_rb);
+
+ /* combine */
+ return _mm_add_epi32(v_ag, v_rb);
+}
+
+static inline __m128i v4_interpolate_color_sse2(__m128i a, __m128i c0,
+ __m128i c1)
+{
+ const __m128i rb_mask = _mm_set1_epi32(0xFF00FF00);
+ const __m128i zero = _mm_setzero_si128();
+
+ __m128i a_l = a;
+ __m128i a_h = a;
+ a_l = _mm_unpacklo_epi16(a_l, a_l);
+ a_h = _mm_unpackhi_epi16(a_h, a_h);
+
+ __m128i a_t = _mm_slli_epi64(a_l, 32);
+ __m128i a_t0 = _mm_slli_epi64(a_h, 32);
+
+ a_l = _mm_add_epi32(a_l, a_t);
+ a_h = _mm_add_epi32(a_h, a_t0);
+
+ __m128i c0_l = c0;
+ __m128i c0_h = c0;
+
+ c0_l = _mm_unpacklo_epi8(c0_l, zero);
+ c0_h = _mm_unpackhi_epi8(c0_h, zero);
+
+ __m128i c1_l = c1;
+ __m128i c1_h = c1;
+
+ c1_l = _mm_unpacklo_epi8(c1_l, zero);
+ c1_h = _mm_unpackhi_epi8(c1_h, zero);
+
+ __m128i cl_sub = _mm_sub_epi16(c0_l, c1_l);
+ __m128i ch_sub = _mm_sub_epi16(c0_h, c1_h);
+
+ cl_sub = _mm_mullo_epi16(cl_sub, a_l);
+ ch_sub = _mm_mullo_epi16(ch_sub, a_h);
+
+ __m128i c1ls = _mm_slli_epi16(c1_l, 8);
+ __m128i c1hs = _mm_slli_epi16(c1_h, 8);
+
+ cl_sub = _mm_add_epi16(cl_sub, c1ls);
+ ch_sub = _mm_add_epi16(ch_sub, c1hs);
+
+ cl_sub = _mm_and_si128(cl_sub, rb_mask);
+ ch_sub = _mm_and_si128(ch_sub, rb_mask);
+
+ cl_sub = _mm_srli_epi64(cl_sub, 8);
+ ch_sub = _mm_srli_epi64(ch_sub, 8);
+
+ cl_sub = _mm_packus_epi16(cl_sub, cl_sub);
+ ch_sub = _mm_packus_epi16(ch_sub, ch_sub);
+
+ return (__m128i)_mm_shuffle_ps((__m128)cl_sub, (__m128)ch_sub, 0x44);
+}
+
+// Load src and dest vector
+#define V4_FETCH_SRC_DEST \
+ __m128i v_src = _mm_loadu_si128((__m128i*)src); \
+ __m128i v_dest = _mm_load_si128((__m128i*)dest);
+
+#define V4_FETCH_SRC __m128i v_src = _mm_loadu_si128((__m128i*)src);
+
+#define V4_STORE_DEST _mm_store_si128((__m128i*)dest, v_src);
+
+#define V4_SRC_DEST_LEN_INC \
+ dest += 4; \
+ src += 4; \
+ length -= 4;
+
+// Multiply src color with const_alpha
+#define V4_ALPHA_MULTIPLY v_src = v4_byte_mul_sse2(v_src, v_alpha);
+
+
+// dest = src + dest * sia
+#define V4_COMP_OP_SRC \
+ v_src = v4_interpolate_color_sse2(v_alpha, v_src, v_dest);
+
+#define LOOP_ALIGNED_U1_A4(DEST, LENGTH, UOP, A4OP) \
+ { \
+ while ((uintptr_t)DEST & 0xF && LENGTH) \
+ UOP \
+ \
+ while (LENGTH) \
+ { \
+ switch (LENGTH) { \
+ case 3: \
+ case 2: \
+ case 1: \
+ UOP break; \
+ default: \
+ A4OP break; \
+ } \
+ } \
+ }
+
+void memfill32(uint32_t* dest, uint32_t value, int length)
+{
+ __m128i vector_data = _mm_set_epi32(value, value, value, value);
+
+ // run till memory alligned to 16byte memory
+ while (length && ((uintptr_t)dest & 0xf)) {
+ *dest++ = value;
+ length--;
+ }
+
+ while (length >= 32) {
+ _mm_store_si128((__m128i*)(dest), vector_data);
+ _mm_store_si128((__m128i*)(dest + 4), vector_data);
+ _mm_store_si128((__m128i*)(dest + 8), vector_data);
+ _mm_store_si128((__m128i*)(dest + 12), vector_data);
+ _mm_store_si128((__m128i*)(dest + 16), vector_data);
+ _mm_store_si128((__m128i*)(dest + 20), vector_data);
+ _mm_store_si128((__m128i*)(dest + 24), vector_data);
+ _mm_store_si128((__m128i*)(dest + 28), vector_data);
+
+ dest += 32;
+ length -= 32;
+ }
+
+ if (length >= 16) {
+ _mm_store_si128((__m128i*)(dest), vector_data);
+ _mm_store_si128((__m128i*)(dest + 4), vector_data);
+ _mm_store_si128((__m128i*)(dest + 8), vector_data);
+ _mm_store_si128((__m128i*)(dest + 12), vector_data);
+
+ dest += 16;
+ length -= 16;
+ }
+
+ if (length >= 8) {
+ _mm_store_si128((__m128i*)(dest), vector_data);
+ _mm_store_si128((__m128i*)(dest + 4), vector_data);
+
+ dest += 8;
+ length -= 8;
+ }
+
+ if (length >= 4) {
+ _mm_store_si128((__m128i*)(dest), vector_data);
+
+ dest += 4;
+ length -= 4;
+ }
+
+ while (length) {
+ *dest++ = value;
+ length--;
+ }
+}
+
+// dest = color + (dest * alpha)
+inline static void copy_helper_sse2(uint32_t* dest, int length,
+ uint32_t color, uint32_t alpha)
+{
+ const __m128i v_color = _mm_set1_epi32(color);
+ const __m128i v_a = _mm_set1_epi16(alpha);
+
+ LOOP_ALIGNED_U1_A4(dest, length,
+ { /* UOP */
+ *dest = color + BYTE_MUL(*dest, alpha);
+ dest++;
+ length--;
+ },
+ { /* A4OP */
+ __m128i v_dest = _mm_load_si128((__m128i*)dest);
+
+ v_dest = v4_byte_mul_sse2(v_dest, v_a);
+ v_dest = _mm_add_epi32(v_dest, v_color);
+
+ _mm_store_si128((__m128i*)dest, v_dest);
+
+ dest += 4;
+ length -= 4;
+ })
+}
+
+static void color_Source(uint32_t* dest, int length, uint32_t color,
+ uint32_t const_alpha)
+{
+ if (const_alpha == 255) {
+ memfill32(dest, color, length);
+ } else {
+ int ialpha;
+
+ ialpha = 255 - const_alpha;
+ color = BYTE_MUL(color, const_alpha);
+ copy_helper_sse2(dest, length, color, ialpha);
+ }
+}
+
+static void color_SourceOver(uint32_t* dest, int length,
+ uint32_t color,
+ uint32_t const_alpha)
+{
+ int ialpha;
+
+ if (const_alpha != 255) color = BYTE_MUL(color, const_alpha);
+ ialpha = 255 - vAlpha(color);
+ copy_helper_sse2(dest, length, color, ialpha);
+}
+
+static void src_Source(uint32_t* dest, int length, const uint32_t* src,
+ uint32_t const_alpha)
+{
+ int ialpha;
+ if (const_alpha == 255) {
+ memcpy(dest, src, length * sizeof(uint32_t));
+ } else {
+ ialpha = 255 - const_alpha;
+ __m128i v_alpha = _mm_set1_epi32(const_alpha);
+
+ LOOP_ALIGNED_U1_A4(dest, length,
+ { /* UOP */
+ *dest = interpolate_pixel(*src, const_alpha,
+ *dest, ialpha);
+ dest++;
+ src++;
+ length--;
+ },
+ {/* A4OP */
+ V4_FETCH_SRC_DEST V4_COMP_OP_SRC V4_STORE_DEST
+ V4_SRC_DEST_LEN_INC})
+ }
+}
+
+void RenderFuncTable::sse()
+{
+ updateColor(BlendMode::Src , color_Source);
+ updateColor(BlendMode::SrcOver , color_SourceOver);
+
+ updateSrc(BlendMode::Src , src_Source);
+}
+
+#endif
diff --git a/vendor/github.com/Benau/go_rlottie/vector_velapsedtimer.cpp b/vendor/github.com/Benau/go_rlottie/vector_velapsedtimer.cpp
new file mode 100644
index 00000000..be065c27
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_velapsedtimer.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "vector_velapsedtimer.h"
+
+void VElapsedTimer::start()
+{
+ clock = std::chrono::high_resolution_clock::now();
+ m_valid = true;
+}
+
+double VElapsedTimer::restart()
+{
+ double elapsedTime = elapsed();
+ start();
+ return elapsedTime;
+}
+
+double VElapsedTimer::elapsed() const
+{
+ if (!isValid()) return 0;
+ return std::chrono::duration<double, std::milli>(
+ std::chrono::high_resolution_clock::now() - clock)
+ .count();
+}
+
+bool VElapsedTimer::hasExpired(double time)
+{
+ double elapsedTime = elapsed();
+ if (elapsedTime > time) return true;
+ return false;
+}
diff --git a/vendor/github.com/Benau/go_rlottie/vector_velapsedtimer.h b/vendor/github.com/Benau/go_rlottie/vector_velapsedtimer.h
new file mode 100644
index 00000000..2cfe9cef
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_velapsedtimer.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VELAPSEDTIMER_H
+#define VELAPSEDTIMER_H
+
+#include <chrono>
+#include "vector_vglobal.h"
+
+class VElapsedTimer {
+public:
+ double elapsed() const;
+ bool hasExpired(double millsec);
+ void start();
+ double restart();
+ inline bool isValid() const { return m_valid; }
+
+private:
+ std::chrono::high_resolution_clock::time_point clock;
+ bool m_valid{false};
+};
+#endif // VELAPSEDTIMER_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vglobal.h b/vendor/github.com/Benau/go_rlottie/vector_vglobal.h
new file mode 100644
index 00000000..678ee199
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vglobal.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VGLOBAL_H
+#define VGLOBAL_H
+
+#include <cmath>
+#include <cstdint>
+#include <iostream>
+#include <type_traits>
+#include <utility>
+
+using uint = uint32_t;
+using ushort = uint16_t;
+using uchar = uint8_t;
+
+#if !defined(V_NAMESPACE)
+
+#define V_USE_NAMESPACE
+#define V_BEGIN_NAMESPACE
+#define V_END_NAMESPACE
+
+#else /* user namespace */
+
+#define V_USE_NAMESPACE using namespace ::V_NAMESPACE;
+#define V_BEGIN_NAMESPACE namespace V_NAMESPACE {
+#define V_END_NAMESPACE }
+
+#endif
+
+#ifndef __has_attribute
+# define __has_attribute(x) 0
+#endif /* !__has_attribute */
+
+#if __has_attribute(unused)
+# define V_UNUSED __attribute__((__unused__))
+#else
+# define V_UNUSED
+#endif /* V_UNUSED */
+
+#if __has_attribute(warn_unused_result)
+# define V_REQUIRED_RESULT __attribute__((__warn_unused_result__))
+#else
+# define V_REQUIRED_RESULT
+#endif /* V_REQUIRED_RESULT */
+
+#define V_CONSTEXPR constexpr
+#define V_NOTHROW noexcept
+
+#include "vector_vdebug.h"
+
+#if __GNUC__ >= 7
+#define VECTOR_FALLTHROUGH __attribute__ ((fallthrough));
+#else
+#define VECTOR_FALLTHROUGH
+#endif
+
+#ifdef LOTTIE_THREAD_SUPPORT
+#define vthread_local thread_local
+#else
+#define vthread_local
+#endif
+
+#if defined(_MSC_VER)
+ #define V_ALWAYS_INLINE __forceinline
+#else
+ #define V_ALWAYS_INLINE __attribute__((always_inline))
+#endif
+
+template <typename T>
+V_CONSTEXPR inline const T &vMin(const T &a, const T &b)
+{
+ return (a < b) ? a : b;
+}
+template <typename T>
+V_CONSTEXPR inline const T &vMax(const T &a, const T &b)
+{
+ return (a < b) ? b : a;
+}
+
+static const double EPSILON_DOUBLE = 0.000000000001f;
+static const float EPSILON_FLOAT = 0.000001f;
+
+static inline bool vCompare(float p1, float p2)
+{
+ return (std::abs(p1 - p2) < EPSILON_FLOAT);
+}
+
+static inline bool vIsZero(float f)
+{
+ return (std::abs(f) <= EPSILON_FLOAT);
+}
+
+static inline bool vIsZero(double f)
+{
+ return (std::abs(f) <= EPSILON_DOUBLE);
+}
+
+class vFlagHelper {
+ int i;
+
+public:
+ explicit constexpr inline vFlagHelper(int ai) noexcept : i(ai) {}
+ constexpr inline operator int() const noexcept { return i; }
+
+ explicit constexpr inline vFlagHelper(uint ai) noexcept : i(int(ai)) {}
+ explicit constexpr inline vFlagHelper(short ai) noexcept : i(int(ai)) {}
+ explicit constexpr inline vFlagHelper(ushort ai) noexcept : i(int(uint(ai))) {}
+ constexpr inline operator uint() const noexcept { return uint(i); }
+};
+
+template <typename Enum>
+class vFlag {
+public:
+ static_assert(
+ (sizeof(Enum) <= sizeof(int)),
+ "vFlag only supports int as storage so bigger type will overflow");
+ static_assert((std::is_enum<Enum>::value),
+ "vFlag is only usable on enumeration types.");
+
+ using Int = typename std::conditional<
+ std::is_unsigned<typename std::underlying_type<Enum>::type>::value,
+ unsigned int, signed int>::type;
+
+ using enum_type = Enum;
+ // compiler-generated copy/move ctor/assignment operators are fine!
+
+ vFlag() = default;
+ constexpr vFlag(Enum f) noexcept : i(Int(f)) {}
+ explicit constexpr vFlag(vFlagHelper f) noexcept : i(f) {}
+
+ inline vFlag &operator&=(int mask) noexcept
+ {
+ i &= mask;
+ return *this;
+ }
+ inline vFlag &operator&=(uint mask) noexcept
+ {
+ i &= mask;
+ return *this;
+ }
+ inline vFlag &operator&=(Enum mask) noexcept
+ {
+ i &= Int(mask);
+ return *this;
+ }
+ inline vFlag &operator|=(vFlag f) noexcept
+ {
+ i |= f.i;
+ return *this;
+ }
+ inline vFlag &operator|=(Enum f) noexcept
+ {
+ i |= Int(f);
+ return *this;
+ }
+ inline vFlag &operator^=(vFlag f) noexcept
+ {
+ i ^= f.i;
+ return *this;
+ }
+ inline vFlag &operator^=(Enum f) noexcept
+ {
+ i ^= Int(f);
+ return *this;
+ }
+
+ constexpr inline operator Int() const noexcept { return i; }
+
+ constexpr inline vFlag operator|(vFlag f) const
+ {
+ return vFlag(vFlagHelper(i | f.i));
+ }
+ constexpr inline vFlag operator|(Enum f) const noexcept
+ {
+ return vFlag(vFlagHelper(i | Int(f)));
+ }
+ constexpr inline vFlag operator^(vFlag f) const noexcept
+ {
+ return vFlag(vFlagHelper(i ^ f.i));
+ }
+ constexpr inline vFlag operator^(Enum f) const noexcept
+ {
+ return vFlag(vFlagHelper(i ^ Int(f)));
+ }
+ constexpr inline vFlag operator&(int mask) const noexcept
+ {
+ return vFlag(vFlagHelper(i & mask));
+ }
+ constexpr inline vFlag operator&(uint mask) const noexcept
+ {
+ return vFlag(vFlagHelper(i & mask));
+ }
+ constexpr inline vFlag operator&(Enum f) const noexcept
+ {
+ return vFlag(vFlagHelper(i & Int(f)));
+ }
+ constexpr inline vFlag operator~() const noexcept
+ {
+ return vFlag(vFlagHelper(~i));
+ }
+
+ constexpr inline bool operator!() const noexcept { return !i; }
+
+ constexpr inline bool testFlag(Enum f) const noexcept
+ {
+ return (i & Int(f)) == Int(f) && (Int(f) != 0 || i == Int(f));
+ }
+ inline vFlag &setFlag(Enum f, bool on = true) noexcept
+ {
+ return on ? (*this |= f) : (*this &= ~f);
+ }
+
+ Int i{0};
+};
+
+class VColor {
+public:
+ VColor() = default;
+ explicit VColor(uchar red, uchar green, uchar blue, uchar alpha = 255) noexcept
+ :a(alpha), r(red), g(green), b(blue){}
+ inline uchar red() const noexcept { return r; }
+ inline uchar green() const noexcept { return g; }
+ inline uchar blue() const noexcept { return b; }
+ inline uchar alpha() const noexcept { return a; }
+ inline void setRed(uchar red) noexcept { r = red; }
+ inline void setGreen(uchar green) noexcept { g = green; }
+ inline void setBlue(uchar blue) noexcept { b = blue; }
+ inline void setAlpha(uchar alpha) noexcept { a = alpha; }
+ inline bool isOpaque() const { return a == 255; }
+ inline bool isTransparent() const { return a == 0; }
+ inline bool operator==(const VColor &o) const
+ {
+ return ((a == o.a) && (r == o.r) && (g == o.g) && (b == o.b));
+ }
+ uint premulARGB() const
+ {
+ int pr = (r * a) / 255;
+ int pg = (g * a) / 255;
+ int pb = (b * a) / 255;
+ return uint((a << 24) | (pr << 16) | (pg << 8) | (pb));
+ }
+
+ uint premulARGB(float opacity) const
+ {
+ int alpha = int(a * opacity);
+ int pr = (r * alpha) / 255;
+ int pg = (g * alpha) / 255;
+ int pb = (b * alpha) / 255;
+ return uint((alpha << 24) | (pr << 16) | (pg << 8) | (pb));
+ }
+
+public:
+ uchar a{0};
+ uchar r{0};
+ uchar g{0};
+ uchar b{0};
+};
+
+enum class FillRule: unsigned char { EvenOdd, Winding };
+enum class JoinStyle: unsigned char { Miter, Bevel, Round };
+enum class CapStyle: unsigned char { Flat, Square, Round };
+
+enum class BlendMode {
+ Src,
+ SrcOver,
+ DestIn,
+ DestOut,
+ Last,
+};
+
+#ifndef V_CONSTRUCTOR_FUNCTION
+#define V_CONSTRUCTOR_FUNCTION0(AFUNC) \
+ namespace { \
+ static const struct AFUNC##_ctor_class_ { \
+ inline AFUNC##_ctor_class_() { AFUNC(); } \
+ } AFUNC##_ctor_instance_; \
+ }
+
+#define V_CONSTRUCTOR_FUNCTION(AFUNC) V_CONSTRUCTOR_FUNCTION0(AFUNC)
+#endif
+
+#endif // VGLOBAL_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vimageloader.cpp b/vendor/github.com/Benau/go_rlottie/vector_vimageloader.cpp
new file mode 100644
index 00000000..08f502f8
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vimageloader.cpp
@@ -0,0 +1,220 @@
+#include "vector_vimageloader.h"
+#include "config.h"
+#include "vector_vdebug.h"
+#include <cstring>
+
+#ifdef _WIN32
+# include <windows.h>
+#else
+# include <dlfcn.h>
+#endif // _WIN32
+
+using lottie_image_load_f = unsigned char *(*)(const char *filename, int *x,
+ int *y, int *comp, int req_comp);
+using lottie_image_load_data_f = unsigned char *(*)(const char *data, int len,
+ int *x, int *y, int *comp,
+ int req_comp);
+using lottie_image_free_f = void (*)(unsigned char *);
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern unsigned char *lottie_image_load(char const *filename, int *x, int *y,
+ int *comp, int req_comp);
+extern unsigned char *lottie_image_load_from_data(const char *imageData,
+ int len, int *x, int *y,
+ int *comp, int req_comp);
+extern void lottie_image_free(unsigned char *data);
+
+#ifdef __cplusplus
+}
+#endif
+
+struct VImageLoader::Impl {
+ lottie_image_load_f imageLoad{nullptr};
+ lottie_image_free_f imageFree{nullptr};
+ lottie_image_load_data_f imageFromData{nullptr};
+
+#ifdef LOTTIE_IMAGE_MODULE_SUPPORT
+# ifdef _WIN32
+ HMODULE dl_handle{nullptr};
+ bool moduleLoad()
+ {
+ dl_handle = LoadLibraryA(LOTTIE_IMAGE_MODULE_PLUGIN);
+ return (dl_handle == nullptr);
+ }
+ void moduleFree()
+ {
+ if (dl_handle) FreeLibrary(dl_handle);
+ }
+ void init()
+ {
+ imageLoad = reinterpret_cast<lottie_image_load_f>(
+ GetProcAddress(dl_handle, "lottie_image_load"));
+ imageFree = reinterpret_cast<lottie_image_free_f>(
+ GetProcAddress(dl_handle, "lottie_image_free"));
+ imageFromData = reinterpret_cast<lottie_image_load_data_f>(
+ GetProcAddress(dl_handle, "lottie_image_load_from_data"));
+ }
+# else // _WIN32
+ void *dl_handle{nullptr};
+ void init()
+ {
+ imageLoad = reinterpret_cast<lottie_image_load_f>(
+ dlsym(dl_handle, "lottie_image_load"));
+ imageFree = reinterpret_cast<lottie_image_free_f>(
+ dlsym(dl_handle, "lottie_image_free"));
+ imageFromData = reinterpret_cast<lottie_image_load_data_f>(
+ dlsym(dl_handle, "lottie_image_load_from_data"));
+ }
+
+ void moduleFree()
+ {
+ if (dl_handle) dlclose(dl_handle);
+ }
+ bool moduleLoad()
+ {
+ dl_handle = dlopen(LOTTIE_IMAGE_MODULE_PLUGIN, RTLD_LAZY);
+ return (dl_handle == nullptr);
+ }
+# endif // _WIN32
+#else // LOTTIE_IMAGE_MODULE_SUPPORT
+ void init()
+ {
+ imageLoad = lottie_image_load;
+ imageFree = lottie_image_free;
+ imageFromData = lottie_image_load_from_data;
+ }
+ void moduleFree() {}
+ bool moduleLoad() { return false; }
+#endif // LOTTIE_IMAGE_MODULE_SUPPORT
+
+ Impl()
+ {
+ if (moduleLoad()) {
+ vWarning << "Failed to dlopen librlottie-image-loader library";
+ return;
+ }
+
+ init();
+
+ if (!imageLoad)
+ vWarning << "Failed to find symbol lottie_image_load in "
+ "librlottie-image-loader library";
+
+ if (!imageFree)
+ vWarning << "Failed to find symbol lottie_image_free in "
+ "librlottie-image-loader library";
+
+ if (!imageFromData)
+ vWarning << "Failed to find symbol lottie_image_load_data in "
+ "librlottie-image-loader library";
+ }
+
+ ~Impl() { moduleFree(); }
+
+ VBitmap createBitmap(unsigned char *data, int width, int height,
+ int channel)
+ {
+ // premultiply alpha
+ if (channel == 4)
+ convertToBGRAPremul(data, width, height);
+ else
+ convertToBGRA(data, width, height);
+
+ // create a bitmap of same size.
+ VBitmap result =
+ VBitmap(width, height, VBitmap::Format::ARGB32_Premultiplied);
+
+ // copy the data to bitmap buffer
+ memcpy(result.data(), data, width * height * 4);
+
+ // free the image data
+ imageFree(data);
+
+ return result;
+ }
+
+ VBitmap load(const char *fileName)
+ {
+ if (!imageLoad) return VBitmap();
+
+ int width, height, n;
+ unsigned char *data = imageLoad(fileName, &width, &height, &n, 4);
+
+ if (!data) {
+ return VBitmap();
+ }
+
+ return createBitmap(data, width, height, n);
+ }
+
+ VBitmap load(const char *imageData, size_t len)
+ {
+ if (!imageFromData) return VBitmap();
+
+ int width, height, n;
+ unsigned char *data =
+ imageFromData(imageData, static_cast<int>(len), &width, &height, &n, 4);
+
+ if (!data) {
+ return VBitmap();
+ }
+
+ return createBitmap(data, width, height, n);
+ }
+ /*
+ * convert from RGBA to BGRA and premultiply
+ */
+ void convertToBGRAPremul(unsigned char *bits, int width, int height)
+ {
+ int pixelCount = width * height;
+ unsigned char *pix = bits;
+ for (int i = 0; i < pixelCount; i++) {
+ unsigned char r = pix[0];
+ unsigned char g = pix[1];
+ unsigned char b = pix[2];
+ unsigned char a = pix[3];
+
+ r = (r * a) / 255;
+ g = (g * a) / 255;
+ b = (b * a) / 255;
+
+ pix[0] = b;
+ pix[1] = g;
+ pix[2] = r;
+
+ pix += 4;
+ }
+ }
+ /*
+ * convert from RGBA to BGRA
+ */
+ void convertToBGRA(unsigned char *bits, int width, int height)
+ {
+ int pixelCount = width * height;
+ unsigned char *pix = bits;
+ for (int i = 0; i < pixelCount; i++) {
+ unsigned char r = pix[0];
+ unsigned char b = pix[2];
+ pix[0] = b;
+ pix[2] = r;
+ pix += 4;
+ }
+ }
+};
+
+VImageLoader::VImageLoader() : mImpl(std::make_unique<VImageLoader::Impl>()) {}
+
+VImageLoader::~VImageLoader() {}
+
+VBitmap VImageLoader::load(const char *fileName)
+{
+ return mImpl->load(fileName);
+}
+
+VBitmap VImageLoader::load(const char *data, size_t len)
+{
+ return mImpl->load(data, int(len));
+}
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vimageloader.h b/vendor/github.com/Benau/go_rlottie/vector_vimageloader.h
new file mode 100644
index 00000000..fe9a0be4
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vimageloader.h
@@ -0,0 +1,26 @@
+#ifndef VIMAGELOADER_H
+#define VIMAGELOADER_H
+
+#include <memory>
+
+#include "vector_vbitmap.h"
+
+class VImageLoader
+{
+public:
+ static VImageLoader& instance()
+ {
+ static VImageLoader singleton;
+ return singleton;
+ }
+
+ VBitmap load(const char *fileName);
+ VBitmap load(const char *data, size_t len);
+ ~VImageLoader();
+private:
+ VImageLoader();
+ struct Impl;
+ std::unique_ptr<Impl> mImpl;
+};
+
+#endif // VIMAGELOADER_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vinterpolator.cpp b/vendor/github.com/Benau/go_rlottie/vector_vinterpolator.cpp
new file mode 100644
index 00000000..89e462a1
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vinterpolator.cpp
@@ -0,0 +1,124 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vector_vinterpolator.h"
+#include <cmath>
+
+V_BEGIN_NAMESPACE
+
+#define NEWTON_ITERATIONS 4
+#define NEWTON_MIN_SLOPE 0.02
+#define SUBDIVISION_PRECISION 0.0000001
+#define SUBDIVISION_MAX_ITERATIONS 10
+
+const float VInterpolator::kSampleStepSize =
+ 1.0f / float(VInterpolator::kSplineTableSize - 1);
+
+void VInterpolator::init(float aX1, float aY1, float aX2, float aY2)
+{
+ mX1 = aX1;
+ mY1 = aY1;
+ mX2 = aX2;
+ mY2 = aY2;
+
+ if (mX1 != mY1 || mX2 != mY2) CalcSampleValues();
+}
+
+/*static*/ float VInterpolator::CalcBezier(float aT, float aA1, float aA2)
+{
+ // use Horner's scheme to evaluate the Bezier polynomial
+ return ((A(aA1, aA2) * aT + B(aA1, aA2)) * aT + C(aA1)) * aT;
+}
+
+void VInterpolator::CalcSampleValues()
+{
+ for (int i = 0; i < kSplineTableSize; ++i) {
+ mSampleValues[i] = CalcBezier(float(i) * kSampleStepSize, mX1, mX2);
+ }
+}
+
+float VInterpolator::GetSlope(float aT, float aA1, float aA2)
+{
+ return 3.0f * A(aA1, aA2) * aT * aT + 2.0f * B(aA1, aA2) * aT + C(aA1);
+}
+
+float VInterpolator::value(float aX) const
+{
+ if (mX1 == mY1 && mX2 == mY2) return aX;
+
+ return CalcBezier(GetTForX(aX), mY1, mY2);
+}
+
+float VInterpolator::GetTForX(float aX) const
+{
+ // Find interval where t lies
+ float intervalStart = 0.0;
+ const float* currentSample = &mSampleValues[1];
+ const float* const lastSample = &mSampleValues[kSplineTableSize - 1];
+ for (; currentSample != lastSample && *currentSample <= aX;
+ ++currentSample) {
+ intervalStart += kSampleStepSize;
+ }
+ --currentSample; // t now lies between *currentSample and *currentSample+1
+
+ // Interpolate to provide an initial guess for t
+ float dist =
+ (aX - *currentSample) / (*(currentSample + 1) - *currentSample);
+ float guessForT = intervalStart + dist * kSampleStepSize;
+
+ // Check the slope to see what strategy to use. If the slope is too small
+ // Newton-Raphson iteration won't converge on a root so we use bisection
+ // instead.
+ float initialSlope = GetSlope(guessForT, mX1, mX2);
+ if (initialSlope >= NEWTON_MIN_SLOPE) {
+ return NewtonRaphsonIterate(aX, guessForT);
+ } else if (initialSlope == 0.0) {
+ return guessForT;
+ } else {
+ return BinarySubdivide(aX, intervalStart,
+ intervalStart + kSampleStepSize);
+ }
+}
+
+float VInterpolator::NewtonRaphsonIterate(float aX, float aGuessT) const
+{
+ // Refine guess with Newton-Raphson iteration
+ for (int i = 0; i < NEWTON_ITERATIONS; ++i) {
+ // We're trying to find where f(t) = aX,
+ // so we're actually looking for a root for: CalcBezier(t) - aX
+ float currentX = CalcBezier(aGuessT, mX1, mX2) - aX;
+ float currentSlope = GetSlope(aGuessT, mX1, mX2);
+
+ if (currentSlope == 0.0) return aGuessT;
+
+ aGuessT -= currentX / currentSlope;
+ }
+
+ return aGuessT;
+}
+
+float VInterpolator::BinarySubdivide(float aX, float aA, float aB) const
+{
+ float currentX;
+ float currentT;
+ int i = 0;
+
+ do {
+ currentT = aA + (aB - aA) / 2.0f;
+ currentX = CalcBezier(currentT, mX1, mX2) - aX;
+
+ if (currentX > 0.0) {
+ aB = currentT;
+ } else {
+ aA = currentT;
+ }
+ } while (fabs(currentX) > SUBDIVISION_PRECISION &&
+ ++i < SUBDIVISION_MAX_ITERATIONS);
+
+ return currentT;
+}
+
+V_END_NAMESPACE
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vinterpolator.h b/vendor/github.com/Benau/go_rlottie/vector_vinterpolator.h
new file mode 100644
index 00000000..fb7fa937
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vinterpolator.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VINTERPOLATOR_H
+#define VINTERPOLATOR_H
+
+#include "vector_vpoint.h"
+
+V_BEGIN_NAMESPACE
+
+class VInterpolator {
+public:
+ VInterpolator()
+ { /* caller must call Init later */
+ }
+
+ VInterpolator(float aX1, float aY1, float aX2, float aY2)
+ {
+ init(aX1, aY1, aX2, aY2);
+ }
+
+ VInterpolator(VPointF pt1, VPointF pt2)
+ {
+ init(pt1.x(), pt1.y(), pt2.x(), pt2.y());
+ }
+
+ void init(float aX1, float aY1, float aX2, float aY2);
+
+ float value(float aX) const;
+
+ void GetSplineDerivativeValues(float aX, float& aDX, float& aDY) const;
+
+private:
+ void CalcSampleValues();
+
+ /**
+ * Returns x(t) given t, x1, and x2, or y(t) given t, y1, and y2.
+ */
+ static float CalcBezier(float aT, float aA1, float aA2);
+
+ /**
+ * Returns dx/dt given t, x1, and x2, or dy/dt given t, y1, and y2.
+ */
+ static float GetSlope(float aT, float aA1, float aA2);
+
+ float GetTForX(float aX) const;
+
+ float NewtonRaphsonIterate(float aX, float aGuessT) const;
+
+ float BinarySubdivide(float aX, float aA, float aB) const;
+
+ static float A(float aA1, float aA2) { return 1.0f - 3.0f * aA2 + 3.0f * aA1; }
+
+ static float B(float aA1, float aA2) { return 3.0f * aA2 - 6.0f * aA1; }
+
+ static float C(float aA1) { return 3.0f * aA1; }
+
+ float mX1;
+ float mY1;
+ float mX2;
+ float mY2;
+ enum { kSplineTableSize = 11 };
+ float mSampleValues[kSplineTableSize];
+ static const float kSampleStepSize;
+};
+
+V_END_NAMESPACE
+
+#endif // VINTERPOLATOR_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vline.h b/vendor/github.com/Benau/go_rlottie/vector_vline.h
new file mode 100644
index 00000000..68edaa59
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vline.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VLINE_H
+#define VLINE_H
+
+#include "vector_vglobal.h"
+#include "vector_vpoint.h"
+
+V_BEGIN_NAMESPACE
+
+class VLine {
+public:
+ VLine() = default;
+ VLine(float x1, float y1, float x2, float y2)
+ : mX1(x1), mY1(y1), mX2(x2), mY2(y2)
+ {
+ }
+ VLine(const VPointF &p1, const VPointF &p2)
+ : mX1(p1.x()), mY1(p1.y()), mX2(p2.x()), mY2(p2.y())
+ {
+ }
+ float length() const { return length(mX1, mY1, mX2, mY2);}
+ void splitAtLength(float length, VLine &left, VLine &right) const;
+ VPointF p1() const { return {mX1, mY1}; }
+ VPointF p2() const { return {mX2, mY2}; }
+ float angle() const;
+ static float length(float x1, float y1, float x2, float y2);
+
+private:
+ float mX1{0};
+ float mY1{0};
+ float mX2{0};
+ float mY2{0};
+};
+
+inline float VLine::angle() const
+{
+ static constexpr float K_PI = 3.141592f;
+ const float dx = mX2 - mX1;
+ const float dy = mY2 - mY1;
+
+ const float theta = std::atan2(dy, dx) * 180.0f / K_PI;
+ return theta;
+}
+
+// approximate sqrt(x*x + y*y) using alpha max plus beta min algorithm.
+// With alpha = 1, beta = 3/8, giving results with the largest error less
+// than 7% compared to the exact value.
+inline V_ALWAYS_INLINE float VLine::length(float x1, float y1, float x2, float y2)
+{
+ float x = x2 - x1;
+ float y = y2 - y1;
+
+ x = x < 0 ? -x : x;
+ y = y < 0 ? -y : y;
+
+ return (x > y ? x + 0.375f * y : y + 0.375f * x);
+}
+
+inline void VLine::splitAtLength(float lengthAt, VLine &left, VLine &right) const
+{
+ float len = length();
+ float dx = ((mX2 - mX1) / len) * lengthAt;
+ float dy = ((mY2 - mY1) / len) * lengthAt;
+
+ left.mX1 = mX1;
+ left.mY1 = mY1;
+ left.mX2 = left.mX1 + dx;
+ left.mY2 = left.mY1 + dy;
+
+ right.mX1 = left.mX2;
+ right.mY1 = left.mY2;
+ right.mX2 = mX2;
+ right.mY2 = mY2;
+}
+
+#endif //VLINE_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vmatrix.cpp b/vendor/github.com/Benau/go_rlottie/vector_vmatrix.cpp
new file mode 100644
index 00000000..a06ad1b0
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vmatrix.cpp
@@ -0,0 +1,684 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "vector_vmatrix.h"
+#include "vector_vglobal.h"
+#include <cassert>
+#include <cmath>
+
+V_BEGIN_NAMESPACE
+
+/* m11 m21 mtx
+ * m12 m22 mty
+ * m13 m23 m33
+ */
+
+inline float VMatrix::determinant() const
+{
+ return m11 * (m33 * m22 - mty * m23) - m21 * (m33 * m12 - mty * m13) +
+ mtx * (m23 * m12 - m22 * m13);
+}
+
+bool VMatrix::isAffine() const
+{
+ return type() < MatrixType::Project;
+}
+
+bool VMatrix::isIdentity() const
+{
+ return type() == MatrixType::None;
+}
+
+bool VMatrix::isInvertible() const
+{
+ return !vIsZero(determinant());
+}
+
+bool VMatrix::isScaling() const
+{
+ return type() >= MatrixType::Scale;
+}
+bool VMatrix::isRotating() const
+{
+ return type() >= MatrixType::Rotate;
+}
+
+bool VMatrix::isTranslating() const
+{
+ return type() >= MatrixType::Translate;
+}
+
+VMatrix &VMatrix::operator*=(float num)
+{
+ if (num == 1.) return *this;
+
+ m11 *= num;
+ m12 *= num;
+ m13 *= num;
+ m21 *= num;
+ m22 *= num;
+ m23 *= num;
+ mtx *= num;
+ mty *= num;
+ m33 *= num;
+ if (dirty < MatrixType::Scale) dirty = MatrixType::Scale;
+
+ return *this;
+}
+
+VMatrix &VMatrix::operator/=(float div)
+{
+ if (div == 0) return *this;
+
+ div = 1 / div;
+ return operator*=(div);
+}
+
+VMatrix::MatrixType VMatrix::type() const
+{
+ if (dirty == MatrixType::None || dirty < mType) return mType;
+
+ switch (dirty) {
+ case MatrixType::Project:
+ if (!vIsZero(m13) || !vIsZero(m23) || !vIsZero(m33 - 1)) {
+ mType = MatrixType::Project;
+ break;
+ }
+ VECTOR_FALLTHROUGH
+ case MatrixType::Shear:
+ case MatrixType::Rotate:
+ if (!vIsZero(m12) || !vIsZero(m21)) {
+ const float dot = m11 * m12 + m21 * m22;
+ if (vIsZero(dot))
+ mType = MatrixType::Rotate;
+ else
+ mType = MatrixType::Shear;
+ break;
+ }
+ VECTOR_FALLTHROUGH
+ case MatrixType::Scale:
+ if (!vIsZero(m11 - 1) || !vIsZero(m22 - 1)) {
+ mType = MatrixType::Scale;
+ break;
+ }
+ VECTOR_FALLTHROUGH
+ case MatrixType::Translate:
+ if (!vIsZero(mtx) || !vIsZero(mty)) {
+ mType = MatrixType::Translate;
+ break;
+ }
+ VECTOR_FALLTHROUGH
+ case MatrixType::None:
+ mType = MatrixType::None;
+ break;
+ }
+
+ dirty = MatrixType::None;
+ return mType;
+}
+
+VMatrix &VMatrix::translate(float dx, float dy)
+{
+ if (dx == 0 && dy == 0) return *this;
+
+ switch (type()) {
+ case MatrixType::None:
+ mtx = dx;
+ mty = dy;
+ break;
+ case MatrixType::Translate:
+ mtx += dx;
+ mty += dy;
+ break;
+ case MatrixType::Scale:
+ mtx += dx * m11;
+ mty += dy * m22;
+ break;
+ case MatrixType::Project:
+ m33 += dx * m13 + dy * m23;
+ VECTOR_FALLTHROUGH
+ case MatrixType::Shear:
+ case MatrixType::Rotate:
+ mtx += dx * m11 + dy * m21;
+ mty += dy * m22 + dx * m12;
+ break;
+ }
+ if (dirty < MatrixType::Translate) dirty = MatrixType::Translate;
+ return *this;
+}
+
+VMatrix &VMatrix::scale(float sx, float sy)
+{
+ if (sx == 1 && sy == 1) return *this;
+
+ switch (type()) {
+ case MatrixType::None:
+ case MatrixType::Translate:
+ m11 = sx;
+ m22 = sy;
+ break;
+ case MatrixType::Project:
+ m13 *= sx;
+ m23 *= sy;
+ VECTOR_FALLTHROUGH
+ case MatrixType::Rotate:
+ case MatrixType::Shear:
+ m12 *= sx;
+ m21 *= sy;
+ VECTOR_FALLTHROUGH
+ case MatrixType::Scale:
+ m11 *= sx;
+ m22 *= sy;
+ break;
+ }
+ if (dirty < MatrixType::Scale) dirty = MatrixType::Scale;
+ return *this;
+}
+
+VMatrix &VMatrix::shear(float sh, float sv)
+{
+ if (sh == 0 && sv == 0) return *this;
+
+ switch (type()) {
+ case MatrixType::None:
+ case MatrixType::Translate:
+ m12 = sv;
+ m21 = sh;
+ break;
+ case MatrixType::Scale:
+ m12 = sv * m22;
+ m21 = sh * m11;
+ break;
+ case MatrixType::Project: {
+ float tm13 = sv * m23;
+ float tm23 = sh * m13;
+ m13 += tm13;
+ m23 += tm23;
+ VECTOR_FALLTHROUGH
+ }
+ case MatrixType::Rotate:
+ case MatrixType::Shear: {
+ float tm11 = sv * m21;
+ float tm22 = sh * m12;
+ float tm12 = sv * m22;
+ float tm21 = sh * m11;
+ m11 += tm11;
+ m12 += tm12;
+ m21 += tm21;
+ m22 += tm22;
+ break;
+ }
+ }
+ if (dirty < MatrixType::Shear) dirty = MatrixType::Shear;
+ return *this;
+}
+
+static const float deg2rad = float(0.017453292519943295769); // pi/180
+static const float inv_dist_to_plane = 1. / 1024.;
+
+VMatrix &VMatrix::rotate(float a, Axis axis)
+{
+ if (a == 0) return *this;
+
+ float sina = 0;
+ float cosa = 0;
+ if (a == 90. || a == -270.)
+ sina = 1.;
+ else if (a == 270. || a == -90.)
+ sina = -1.;
+ else if (a == 180.)
+ cosa = -1.;
+ else {
+ float b = deg2rad * a; // convert to radians
+ sina = std::sin(b); // fast and convenient
+ cosa = std::cos(b);
+ }
+
+ if (axis == Axis::Z) {
+ switch (type()) {
+ case MatrixType::None:
+ case MatrixType::Translate:
+ m11 = cosa;
+ m12 = sina;
+ m21 = -sina;
+ m22 = cosa;
+ break;
+ case MatrixType::Scale: {
+ float tm11 = cosa * m11;
+ float tm12 = sina * m22;
+ float tm21 = -sina * m11;
+ float tm22 = cosa * m22;
+ m11 = tm11;
+ m12 = tm12;
+ m21 = tm21;
+ m22 = tm22;
+ break;
+ }
+ case MatrixType::Project: {
+ float tm13 = cosa * m13 + sina * m23;
+ float tm23 = -sina * m13 + cosa * m23;
+ m13 = tm13;
+ m23 = tm23;
+ VECTOR_FALLTHROUGH
+ }
+ case MatrixType::Rotate:
+ case MatrixType::Shear: {
+ float tm11 = cosa * m11 + sina * m21;
+ float tm12 = cosa * m12 + sina * m22;
+ float tm21 = -sina * m11 + cosa * m21;
+ float tm22 = -sina * m12 + cosa * m22;
+ m11 = tm11;
+ m12 = tm12;
+ m21 = tm21;
+ m22 = tm22;
+ break;
+ }
+ }
+ if (dirty < MatrixType::Rotate) dirty = MatrixType::Rotate;
+ } else {
+ VMatrix result;
+ if (axis == Axis::Y) {
+ result.m11 = cosa;
+ result.m13 = -sina * inv_dist_to_plane;
+ } else {
+ result.m22 = cosa;
+ result.m23 = -sina * inv_dist_to_plane;
+ }
+ result.mType = MatrixType::Project;
+ *this = result * *this;
+ }
+
+ return *this;
+}
+
+VMatrix VMatrix::operator*(const VMatrix &m) const
+{
+ const MatrixType otherType = m.type();
+ if (otherType == MatrixType::None) return *this;
+
+ const MatrixType thisType = type();
+ if (thisType == MatrixType::None) return m;
+
+ VMatrix t;
+ MatrixType type = vMax(thisType, otherType);
+ switch (type) {
+ case MatrixType::None:
+ break;
+ case MatrixType::Translate:
+ t.mtx = mtx + m.mtx;
+ t.mty += mty + m.mty;
+ break;
+ case MatrixType::Scale: {
+ float m11v = m11 * m.m11;
+ float m22v = m22 * m.m22;
+
+ float m31v = mtx * m.m11 + m.mtx;
+ float m32v = mty * m.m22 + m.mty;
+
+ t.m11 = m11v;
+ t.m22 = m22v;
+ t.mtx = m31v;
+ t.mty = m32v;
+ break;
+ }
+ case MatrixType::Rotate:
+ case MatrixType::Shear: {
+ float m11v = m11 * m.m11 + m12 * m.m21;
+ float m12v = m11 * m.m12 + m12 * m.m22;
+
+ float m21v = m21 * m.m11 + m22 * m.m21;
+ float m22v = m21 * m.m12 + m22 * m.m22;
+
+ float m31v = mtx * m.m11 + mty * m.m21 + m.mtx;
+ float m32v = mtx * m.m12 + mty * m.m22 + m.mty;
+
+ t.m11 = m11v;
+ t.m12 = m12v;
+ t.m21 = m21v;
+ t.m22 = m22v;
+ t.mtx = m31v;
+ t.mty = m32v;
+ break;
+ }
+ case MatrixType::Project: {
+ float m11v = m11 * m.m11 + m12 * m.m21 + m13 * m.mtx;
+ float m12v = m11 * m.m12 + m12 * m.m22 + m13 * m.mty;
+ float m13v = m11 * m.m13 + m12 * m.m23 + m13 * m.m33;
+
+ float m21v = m21 * m.m11 + m22 * m.m21 + m23 * m.mtx;
+ float m22v = m21 * m.m12 + m22 * m.m22 + m23 * m.mty;
+ float m23v = m21 * m.m13 + m22 * m.m23 + m23 * m.m33;
+
+ float m31v = mtx * m.m11 + mty * m.m21 + m33 * m.mtx;
+ float m32v = mtx * m.m12 + mty * m.m22 + m33 * m.mty;
+ float m33v = mtx * m.m13 + mty * m.m23 + m33 * m.m33;
+
+ t.m11 = m11v;
+ t.m12 = m12v;
+ t.m13 = m13v;
+ t.m21 = m21v;
+ t.m22 = m22v;
+ t.m23 = m23v;
+ t.mtx = m31v;
+ t.mty = m32v;
+ t.m33 = m33v;
+ }
+ }
+
+ t.dirty = type;
+ t.mType = type;
+
+ return t;
+}
+
+VMatrix &VMatrix::operator*=(const VMatrix &o)
+{
+ const MatrixType otherType = o.type();
+ if (otherType == MatrixType::None) return *this;
+
+ const MatrixType thisType = type();
+ if (thisType == MatrixType::None) return operator=(o);
+
+ MatrixType t = vMax(thisType, otherType);
+ switch (t) {
+ case MatrixType::None:
+ break;
+ case MatrixType::Translate:
+ mtx += o.mtx;
+ mty += o.mty;
+ break;
+ case MatrixType::Scale: {
+ float m11v = m11 * o.m11;
+ float m22v = m22 * o.m22;
+
+ float m31v = mtx * o.m11 + o.mtx;
+ float m32v = mty * o.m22 + o.mty;
+
+ m11 = m11v;
+ m22 = m22v;
+ mtx = m31v;
+ mty = m32v;
+ break;
+ }
+ case MatrixType::Rotate:
+ case MatrixType::Shear: {
+ float m11v = m11 * o.m11 + m12 * o.m21;
+ float m12v = m11 * o.m12 + m12 * o.m22;
+
+ float m21v = m21 * o.m11 + m22 * o.m21;
+ float m22v = m21 * o.m12 + m22 * o.m22;
+
+ float m31v = mtx * o.m11 + mty * o.m21 + o.mtx;
+ float m32v = mtx * o.m12 + mty * o.m22 + o.mty;
+
+ m11 = m11v;
+ m12 = m12v;
+ m21 = m21v;
+ m22 = m22v;
+ mtx = m31v;
+ mty = m32v;
+ break;
+ }
+ case MatrixType::Project: {
+ float m11v = m11 * o.m11 + m12 * o.m21 + m13 * o.mtx;
+ float m12v = m11 * o.m12 + m12 * o.m22 + m13 * o.mty;
+ float m13v = m11 * o.m13 + m12 * o.m23 + m13 * o.m33;
+
+ float m21v = m21 * o.m11 + m22 * o.m21 + m23 * o.mtx;
+ float m22v = m21 * o.m12 + m22 * o.m22 + m23 * o.mty;
+ float m23v = m21 * o.m13 + m22 * o.m23 + m23 * o.m33;
+
+ float m31v = mtx * o.m11 + mty * o.m21 + m33 * o.mtx;
+ float m32v = mtx * o.m12 + mty * o.m22 + m33 * o.mty;
+ float m33v = mtx * o.m13 + mty * o.m23 + m33 * o.m33;
+
+ m11 = m11v;
+ m12 = m12v;
+ m13 = m13v;
+ m21 = m21v;
+ m22 = m22v;
+ m23 = m23v;
+ mtx = m31v;
+ mty = m32v;
+ m33 = m33v;
+ }
+ }
+
+ dirty = t;
+ mType = t;
+
+ return *this;
+}
+
+VMatrix VMatrix::adjoint() const
+{
+ float h11, h12, h13, h21, h22, h23, h31, h32, h33;
+ h11 = m22 * m33 - m23 * mty;
+ h21 = m23 * mtx - m21 * m33;
+ h31 = m21 * mty - m22 * mtx;
+ h12 = m13 * mty - m12 * m33;
+ h22 = m11 * m33 - m13 * mtx;
+ h32 = m12 * mtx - m11 * mty;
+ h13 = m12 * m23 - m13 * m22;
+ h23 = m13 * m21 - m11 * m23;
+ h33 = m11 * m22 - m12 * m21;
+
+ VMatrix res;
+ res.m11 = h11;
+ res.m12 = h12;
+ res.m13 = h13;
+ res.m21 = h21;
+ res.m22 = h22;
+ res.m23 = h23;
+ res.mtx = h31;
+ res.mty = h32;
+ res.m33 = h33;
+ res.mType = MatrixType::None;
+ res.dirty = MatrixType::Project;
+
+ return res;
+}
+
+VMatrix VMatrix::inverted(bool *invertible) const
+{
+ VMatrix invert;
+ bool inv = true;
+
+ switch (type()) {
+ case MatrixType::None:
+ break;
+ case MatrixType::Translate:
+ invert.mtx = -mtx;
+ invert.mty = -mty;
+ break;
+ case MatrixType::Scale:
+ inv = !vIsZero(m11);
+ inv &= !vIsZero(m22);
+ if (inv) {
+ invert.m11 = 1.0f / m11;
+ invert.m22 = 1.0f / m22;
+ invert.mtx = -mtx * invert.m11;
+ invert.mty = -mty * invert.m22;
+ }
+ break;
+ default:
+ // general case
+ float det = determinant();
+ inv = !vIsZero(det);
+ if (inv) invert = (adjoint() /= det);
+ // TODO Test above line
+ break;
+ }
+
+ if (invertible) *invertible = inv;
+
+ if (inv) {
+ // inverting doesn't change the type
+ invert.mType = mType;
+ invert.dirty = dirty;
+ }
+
+ return invert;
+}
+
+bool VMatrix::operator==(const VMatrix &o) const
+{
+ return fuzzyCompare(o);
+}
+
+bool VMatrix::operator!=(const VMatrix &o) const
+{
+ return !operator==(o);
+}
+
+bool VMatrix::fuzzyCompare(const VMatrix &o) const
+{
+ return vCompare(m11, o.m11) && vCompare(m12, o.m12) &&
+ vCompare(m21, o.m21) && vCompare(m22, o.m22) &&
+ vCompare(mtx, o.mtx) && vCompare(mty, o.mty);
+}
+
+#define V_NEAR_CLIP 0.000001f
+#ifdef MAP
+#undef MAP
+#endif
+#define MAP(x, y, nx, ny) \
+ do { \
+ float FX_ = x; \
+ float FY_ = y; \
+ switch (t) { \
+ case MatrixType::None: \
+ nx = FX_; \
+ ny = FY_; \
+ break; \
+ case MatrixType::Translate: \
+ nx = FX_ + mtx; \
+ ny = FY_ + mty; \
+ break; \
+ case MatrixType::Scale: \
+ nx = m11 * FX_ + mtx; \
+ ny = m22 * FY_ + mty; \
+ break; \
+ case MatrixType::Rotate: \
+ case MatrixType::Shear: \
+ case MatrixType::Project: \
+ nx = m11 * FX_ + m21 * FY_ + mtx; \
+ ny = m12 * FX_ + m22 * FY_ + mty; \
+ if (t == MatrixType::Project) { \
+ float w = (m13 * FX_ + m23 * FY_ + m33); \
+ if (w < V_NEAR_CLIP) w = V_NEAR_CLIP; \
+ w = 1. / w; \
+ nx *= w; \
+ ny *= w; \
+ } \
+ } \
+ } while (0)
+
+VRect VMatrix::map(const VRect &rect) const
+{
+ VMatrix::MatrixType t = type();
+ if (t <= MatrixType::Translate)
+ return rect.translated(std::lround(mtx), std::lround(mty));
+
+ if (t <= MatrixType::Scale) {
+ int x = std::lround(m11 * rect.x() + mtx);
+ int y = std::lround(m22 * rect.y() + mty);
+ int w = std::lround(m11 * rect.width());
+ int h = std::lround(m22 * rect.height());
+ if (w < 0) {
+ w = -w;
+ x -= w;
+ }
+ if (h < 0) {
+ h = -h;
+ y -= h;
+ }
+ return {x, y, w, h};
+ } else if (t < MatrixType::Project) {
+ // see mapToPolygon for explanations of the algorithm.
+ float x = 0, y = 0;
+ MAP(rect.left(), rect.top(), x, y);
+ float xmin = x;
+ float ymin = y;
+ float xmax = x;
+ float ymax = y;
+ MAP(rect.right() + 1, rect.top(), x, y);
+ xmin = vMin(xmin, x);
+ ymin = vMin(ymin, y);
+ xmax = vMax(xmax, x);
+ ymax = vMax(ymax, y);
+ MAP(rect.right() + 1, rect.bottom() + 1, x, y);
+ xmin = vMin(xmin, x);
+ ymin = vMin(ymin, y);
+ xmax = vMax(xmax, x);
+ ymax = vMax(ymax, y);
+ MAP(rect.left(), rect.bottom() + 1, x, y);
+ xmin = vMin(xmin, x);
+ ymin = vMin(ymin, y);
+ xmax = vMax(xmax, x);
+ ymax = vMax(ymax, y);
+ return VRect(std::lround(xmin), std::lround(ymin),
+ std::lround(xmax) - std::lround(xmin),
+ std::lround(ymax) - std::lround(ymin));
+ } else {
+ // Not supported
+ assert(0);
+ return {};
+ }
+}
+
+VPointF VMatrix::map(const VPointF &p) const
+{
+ float fx = p.x();
+ float fy = p.y();
+
+ float x = 0, y = 0;
+
+ VMatrix::MatrixType t = type();
+ switch (t) {
+ case MatrixType::None:
+ x = fx;
+ y = fy;
+ break;
+ case MatrixType::Translate:
+ x = fx + mtx;
+ y = fy + mty;
+ break;
+ case MatrixType::Scale:
+ x = m11 * fx + mtx;
+ y = m22 * fy + mty;
+ break;
+ case MatrixType::Rotate:
+ case MatrixType::Shear:
+ case MatrixType::Project:
+ x = m11 * fx + m21 * fy + mtx;
+ y = m12 * fx + m22 * fy + mty;
+ if (t == MatrixType::Project) {
+ float w = 1.0f / (m13 * fx + m23 * fy + m33);
+ x *= w;
+ y *= w;
+ }
+ }
+ return {x, y};
+}
+
+V_END_NAMESPACE
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vmatrix.h b/vendor/github.com/Benau/go_rlottie/vector_vmatrix.h
new file mode 100644
index 00000000..ee3ad2b5
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vmatrix.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VMATRIX_H
+#define VMATRIX_H
+#include "vector_vglobal.h"
+#include "vector_vpoint.h"
+#include "vector_vrect.h"
+
+V_BEGIN_NAMESPACE
+
+struct VMatrixData;
+class VMatrix {
+public:
+ enum class Axis { X, Y, Z };
+ enum class MatrixType: unsigned char {
+ None = 0x00,
+ Translate = 0x01,
+ Scale = 0x02,
+ Rotate = 0x04,
+ Shear = 0x08,
+ Project = 0x10
+ };
+ VMatrix() = default;
+ bool isAffine() const;
+ bool isIdentity() const;
+ bool isInvertible() const;
+ bool isScaling() const;
+ bool isRotating() const;
+ bool isTranslating() const;
+ MatrixType type() const;
+ inline float determinant() const;
+
+ float m_11() const { return m11;}
+ float m_12() const { return m12;}
+ float m_13() const { return m13;}
+
+ float m_21() const { return m21;}
+ float m_22() const { return m22;}
+ float m_23() const { return m23;}
+
+ float m_tx() const { return mtx;}
+ float m_ty() const { return mty;}
+ float m_33() const { return m33;}
+
+ VMatrix &translate(VPointF pos) { return translate(pos.x(), pos.y()); }
+ VMatrix &translate(float dx, float dy);
+ VMatrix &scale(VPointF s) { return scale(s.x(), s.y()); }
+ VMatrix &scale(float sx, float sy);
+ VMatrix &shear(float sh, float sv);
+ VMatrix &rotate(float a, Axis axis = VMatrix::Axis::Z);
+ VMatrix &rotateRadians(float a, Axis axis = VMatrix::Axis::Z);
+
+ VPointF map(const VPointF &p) const;
+ inline VPointF map(float x, float y) const;
+ VRect map(const VRect &r) const;
+
+ V_REQUIRED_RESULT VMatrix inverted(bool *invertible = nullptr) const;
+ V_REQUIRED_RESULT VMatrix adjoint() const;
+
+ VMatrix operator*(const VMatrix &o) const;
+ VMatrix & operator*=(const VMatrix &);
+ VMatrix & operator*=(float mul);
+ VMatrix & operator/=(float div);
+ bool operator==(const VMatrix &) const;
+ bool operator!=(const VMatrix &) const;
+ bool fuzzyCompare(const VMatrix &) const;
+ float scale() const;
+private:
+ friend struct VSpanData;
+ float m11{1}, m12{0}, m13{0};
+ float m21{0}, m22{1}, m23{0};
+ float mtx{0}, mty{0}, m33{1};
+ mutable MatrixType mType{MatrixType::None};
+ mutable MatrixType dirty{MatrixType::None};
+};
+
+inline float VMatrix::scale() const
+{
+ constexpr float SQRT_2 = 1.41421f;
+ VPointF p1(0, 0);
+ VPointF p2(SQRT_2, SQRT_2);
+ p1 = map(p1);
+ p2 = map(p2);
+ VPointF final = p2 - p1;
+
+ return std::sqrt(final.x() * final.x() + final.y() * final.y()) / 2.0f;
+}
+
+inline VPointF VMatrix::map(float x, float y) const
+{
+ return map(VPointF(x, y));
+}
+
+V_END_NAMESPACE
+
+#endif // VMATRIX_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vpainter.cpp b/vendor/github.com/Benau/go_rlottie/vector_vpainter.cpp
new file mode 100644
index 00000000..bcabc554
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vpainter.cpp
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "vector_vpainter.h"
+#include <algorithm>
+
+
+V_BEGIN_NAMESPACE
+
+
+void VPainter::drawRle(const VPoint &, const VRle &rle)
+{
+ if (rle.empty()) return;
+ // mSpanData.updateSpanFunc();
+
+ if (!mSpanData.mUnclippedBlendFunc) return;
+
+ // do draw after applying clip.
+ rle.intersect(mSpanData.clipRect(), mSpanData.mUnclippedBlendFunc,
+ &mSpanData);
+}
+
+void VPainter::drawRle(const VRle &rle, const VRle &clip)
+{
+ if (rle.empty() || clip.empty()) return;
+
+ if (!mSpanData.mUnclippedBlendFunc) return;
+
+ rle.intersect(clip, mSpanData.mUnclippedBlendFunc, &mSpanData);
+}
+
+static void fillRect(const VRect &r, VSpanData *data)
+{
+ auto x1 = std::max(r.x(), 0);
+ auto x2 = std::min(r.x() + r.width(), data->mDrawableSize.width());
+ auto y1 = std::max(r.y(), 0);
+ auto y2 = std::min(r.y() + r.height(), data->mDrawableSize.height());
+
+ if (x2 <= x1 || y2 <= y1) return;
+
+ const int nspans = 256;
+ VRle::Span spans[nspans];
+
+ int y = y1;
+ while (y < y2) {
+ int n = std::min(nspans, y2 - y);
+ int i = 0;
+ while (i < n) {
+ spans[i].x = short(x1);
+ spans[i].len = ushort(x2 - x1);
+ spans[i].y = short(y + i);
+ spans[i].coverage = 255;
+ ++i;
+ }
+
+ data->mUnclippedBlendFunc(n, spans, data);
+ y += n;
+ }
+}
+
+void VPainter::drawBitmapUntransform(const VRect & target,
+ const VBitmap &bitmap,
+ const VRect & source,
+ uint8_t const_alpha)
+{
+ mSpanData.initTexture(&bitmap, const_alpha, source);
+ if (!mSpanData.mUnclippedBlendFunc) return;
+
+ // update translation matrix for source texture.
+ mSpanData.dx = float(target.x() - source.x());
+ mSpanData.dy = float(target.y() - source.y());
+
+ fillRect(target, &mSpanData);
+}
+
+VPainter::VPainter(VBitmap *buffer)
+{
+ begin(buffer);
+}
+bool VPainter::begin(VBitmap *buffer)
+{
+ mBuffer.prepare(buffer);
+ mSpanData.init(&mBuffer);
+ // TODO find a better api to clear the surface
+ mBuffer.clear();
+ return true;
+}
+void VPainter::end() {}
+
+void VPainter::setDrawRegion(const VRect &region)
+{
+ mSpanData.setDrawRegion(region);
+}
+
+void VPainter::setBrush(const VBrush &brush)
+{
+ mSpanData.setup(brush);
+}
+
+void VPainter::setBlendMode(BlendMode mode)
+{
+ mSpanData.mBlendMode = mode;
+}
+
+VRect VPainter::clipBoundingRect() const
+{
+ return mSpanData.clipRect();
+}
+
+void VPainter::drawBitmap(const VPoint &point, const VBitmap &bitmap,
+ const VRect &source, uint8_t const_alpha)
+{
+ if (!bitmap.valid()) return;
+
+ drawBitmap(VRect(point, bitmap.size()),
+ bitmap, source, const_alpha);
+}
+
+void VPainter::drawBitmap(const VRect &target, const VBitmap &bitmap,
+ const VRect &source, uint8_t const_alpha)
+{
+ if (!bitmap.valid()) return;
+
+ // clear any existing brush data.
+ setBrush(VBrush());
+
+ if (target.size() == source.size()) {
+ drawBitmapUntransform(target, bitmap, source, const_alpha);
+ } else {
+ // @TODO scaling
+ }
+}
+
+void VPainter::drawBitmap(const VPoint &point, const VBitmap &bitmap,
+ uint8_t const_alpha)
+{
+ if (!bitmap.valid()) return;
+
+ drawBitmap(VRect(point, bitmap.size()),
+ bitmap, bitmap.rect(),
+ const_alpha);
+}
+
+void VPainter::drawBitmap(const VRect &rect, const VBitmap &bitmap,
+ uint8_t const_alpha)
+{
+ if (!bitmap.valid()) return;
+
+ drawBitmap(rect, bitmap, bitmap.rect(),
+ const_alpha);
+}
+
+V_END_NAMESPACE
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vpainter.h b/vendor/github.com/Benau/go_rlottie/vector_vpainter.h
new file mode 100644
index 00000000..01cd99a5
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vpainter.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VPAINTER_H
+#define VPAINTER_H
+
+#include "vector_vbrush.h"
+#include "vector_vpoint.h"
+#include "vector_vrle.h"
+#include "vector_vdrawhelper.h"
+
+V_BEGIN_NAMESPACE
+
+class VBitmap;
+class VPainter {
+public:
+ VPainter() = default;
+ explicit VPainter(VBitmap *buffer);
+ bool begin(VBitmap *buffer);
+ void end();
+ void setDrawRegion(const VRect &region); // sub surface rendering area.
+ void setBrush(const VBrush &brush);
+ void setBlendMode(BlendMode mode);
+ void drawRle(const VPoint &pos, const VRle &rle);
+ void drawRle(const VRle &rle, const VRle &clip);
+ VRect clipBoundingRect() const;
+
+ void drawBitmap(const VPoint &point, const VBitmap &bitmap, const VRect &source, uint8_t const_alpha = 255);
+ void drawBitmap(const VRect &target, const VBitmap &bitmap, const VRect &source, uint8_t const_alpha = 255);
+ void drawBitmap(const VPoint &point, const VBitmap &bitmap, uint8_t const_alpha = 255);
+ void drawBitmap(const VRect &rect, const VBitmap &bitmap, uint8_t const_alpha = 255);
+private:
+ void drawBitmapUntransform(const VRect &target, const VBitmap &bitmap,
+ const VRect &source, uint8_t const_alpha);
+ VRasterBuffer mBuffer;
+ VSpanData mSpanData;
+};
+
+V_END_NAMESPACE
+
+#endif // VPAINTER_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vpath.cpp b/vendor/github.com/Benau/go_rlottie/vector_vpath.cpp
new file mode 100644
index 00000000..470c6d42
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vpath.cpp
@@ -0,0 +1,709 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "vector_vpath.h"
+#include <cassert>
+#include <iterator>
+#include <vector>
+#include "vector_vbezier.h"
+#include "vector_vdebug.h"
+#include "vector_vline.h"
+#include "vector_vrect.h"
+
+V_BEGIN_NAMESPACE
+
+void VPath::VPathData::transform(const VMatrix &m)
+{
+ for (auto &i : m_points) {
+ i = m.map(i);
+ }
+ mLengthDirty = true;
+}
+
+float VPath::VPathData::length() const
+{
+ if (!mLengthDirty) return mLength;
+
+ mLengthDirty = false;
+ mLength = 0.0;
+
+ size_t i = 0;
+ for (auto e : m_elements) {
+ switch (e) {
+ case VPath::Element::MoveTo:
+ i++;
+ break;
+ case VPath::Element::LineTo: {
+ mLength += VLine(m_points[i - 1], m_points[i]).length();
+ i++;
+ break;
+ }
+ case VPath::Element::CubicTo: {
+ mLength += VBezier::fromPoints(m_points[i - 1], m_points[i],
+ m_points[i + 1], m_points[i + 2])
+ .length();
+ i += 3;
+ break;
+ }
+ case VPath::Element::Close:
+ break;
+ }
+ }
+
+ return mLength;
+}
+
+void VPath::VPathData::checkNewSegment()
+{
+ if (mNewSegment) {
+ moveTo(0, 0);
+ mNewSegment = false;
+ }
+}
+
+void VPath::VPathData::moveTo(float x, float y)
+{
+ mStartPoint = {x, y};
+ mNewSegment = false;
+ m_elements.emplace_back(VPath::Element::MoveTo);
+ m_points.emplace_back(x, y);
+ m_segments++;
+ mLengthDirty = true;
+}
+
+void VPath::VPathData::lineTo(float x, float y)
+{
+ checkNewSegment();
+ m_elements.emplace_back(VPath::Element::LineTo);
+ m_points.emplace_back(x, y);
+ mLengthDirty = true;
+}
+
+void VPath::VPathData::cubicTo(float cx1, float cy1, float cx2, float cy2,
+ float ex, float ey)
+{
+ checkNewSegment();
+ m_elements.emplace_back(VPath::Element::CubicTo);
+ m_points.emplace_back(cx1, cy1);
+ m_points.emplace_back(cx2, cy2);
+ m_points.emplace_back(ex, ey);
+ mLengthDirty = true;
+}
+
+void VPath::VPathData::close()
+{
+ if (empty()) return;
+
+ const VPointF &lastPt = m_points.back();
+ if (!fuzzyCompare(mStartPoint, lastPt)) {
+ lineTo(mStartPoint.x(), mStartPoint.y());
+ }
+ m_elements.push_back(VPath::Element::Close);
+ mNewSegment = true;
+ mLengthDirty = true;
+}
+
+void VPath::VPathData::reset()
+{
+ if (empty()) return;
+
+ m_elements.clear();
+ m_points.clear();
+ m_segments = 0;
+ mLength = 0;
+ mLengthDirty = false;
+}
+
+size_t VPath::VPathData::segments() const
+{
+ return m_segments;
+}
+
+void VPath::VPathData::reserve(size_t pts, size_t elms)
+{
+ if (m_points.capacity() < m_points.size() + pts)
+ m_points.reserve(m_points.size() + pts);
+ if (m_elements.capacity() < m_elements.size() + elms)
+ m_elements.reserve(m_elements.size() + elms);
+}
+
+static VPointF curvesForArc(const VRectF &, float, float, VPointF *, size_t *);
+static constexpr float PATH_KAPPA = 0.5522847498f;
+static constexpr float K_PI = 3.141592f;
+
+void VPath::VPathData::arcTo(const VRectF &rect, float startAngle,
+ float sweepLength, bool forceMoveTo)
+{
+ size_t point_count = 0;
+ VPointF pts[15];
+ VPointF curve_start =
+ curvesForArc(rect, startAngle, sweepLength, pts, &point_count);
+
+ reserve(point_count + 1, point_count / 3 + 1);
+ if (empty() || forceMoveTo) {
+ moveTo(curve_start.x(), curve_start.y());
+ } else {
+ lineTo(curve_start.x(), curve_start.y());
+ }
+ for (size_t i = 0; i < point_count; i += 3) {
+ cubicTo(pts[i].x(), pts[i].y(), pts[i + 1].x(), pts[i + 1].y(),
+ pts[i + 2].x(), pts[i + 2].y());
+ }
+}
+
+void VPath::VPathData::addCircle(float cx, float cy, float radius,
+ VPath::Direction dir)
+{
+ addOval(VRectF(cx - radius, cy - radius, 2 * radius, 2 * radius), dir);
+}
+
+void VPath::VPathData::addOval(const VRectF &rect, VPath::Direction dir)
+{
+ if (rect.empty()) return;
+
+ float x = rect.x();
+ float y = rect.y();
+
+ float w = rect.width();
+ float w2 = rect.width() / 2;
+ float w2k = w2 * PATH_KAPPA;
+
+ float h = rect.height();
+ float h2 = rect.height() / 2;
+ float h2k = h2 * PATH_KAPPA;
+
+ reserve(13, 6); // 1Move + 4Cubic + 1Close
+ if (dir == VPath::Direction::CW) {
+ // moveto 12 o'clock.
+ moveTo(x + w2, y);
+ // 12 -> 3 o'clock
+ cubicTo(x + w2 + w2k, y, x + w, y + h2 - h2k, x + w, y + h2);
+ // 3 -> 6 o'clock
+ cubicTo(x + w, y + h2 + h2k, x + w2 + w2k, y + h, x + w2, y + h);
+ // 6 -> 9 o'clock
+ cubicTo(x + w2 - w2k, y + h, x, y + h2 + h2k, x, y + h2);
+ // 9 -> 12 o'clock
+ cubicTo(x, y + h2 - h2k, x + w2 - w2k, y, x + w2, y);
+ } else {
+ // moveto 12 o'clock.
+ moveTo(x + w2, y);
+ // 12 -> 9 o'clock
+ cubicTo(x + w2 - w2k, y, x, y + h2 - h2k, x, y + h2);
+ // 9 -> 6 o'clock
+ cubicTo(x, y + h2 + h2k, x + w2 - w2k, y + h, x + w2, y + h);
+ // 6 -> 3 o'clock
+ cubicTo(x + w2 + w2k, y + h, x + w, y + h2 + h2k, x + w, y + h2);
+ // 3 -> 12 o'clock
+ cubicTo(x + w, y + h2 - h2k, x + w2 + w2k, y, x + w2, y);
+ }
+ close();
+}
+
+void VPath::VPathData::addRect(const VRectF &rect, VPath::Direction dir)
+{
+ float x = rect.x();
+ float y = rect.y();
+ float w = rect.width();
+ float h = rect.height();
+
+ if (vCompare(w, 0.f) && vCompare(h, 0.f)) return;
+
+ reserve(5, 6); // 1Move + 4Line + 1Close
+ if (dir == VPath::Direction::CW) {
+ moveTo(x + w, y);
+ lineTo(x + w, y + h);
+ lineTo(x, y + h);
+ lineTo(x, y);
+ close();
+ } else {
+ moveTo(x + w, y);
+ lineTo(x, y);
+ lineTo(x, y + h);
+ lineTo(x + w, y + h);
+ close();
+ }
+}
+
+void VPath::VPathData::addRoundRect(const VRectF &rect, float roundness,
+ VPath::Direction dir)
+{
+ if (2 * roundness > rect.width()) roundness = rect.width() / 2.0f;
+ if (2 * roundness > rect.height()) roundness = rect.height() / 2.0f;
+ addRoundRect(rect, roundness, roundness, dir);
+}
+
+void VPath::VPathData::addRoundRect(const VRectF &rect, float rx, float ry,
+ VPath::Direction dir)
+{
+ if (vCompare(rx, 0.f) || vCompare(ry, 0.f)) {
+ addRect(rect, dir);
+ return;
+ }
+
+ float x = rect.x();
+ float y = rect.y();
+ float w = rect.width();
+ float h = rect.height();
+ // clamp the rx and ry radius value.
+ rx = 2 * rx;
+ ry = 2 * ry;
+ if (rx > w) rx = w;
+ if (ry > h) ry = h;
+
+ reserve(17, 10); // 1Move + 4Cubic + 1Close
+ if (dir == VPath::Direction::CW) {
+ moveTo(x + w, y + ry / 2.f);
+ arcTo(VRectF(x + w - rx, y + h - ry, rx, ry), 0, -90, false);
+ arcTo(VRectF(x, y + h - ry, rx, ry), -90, -90, false);
+ arcTo(VRectF(x, y, rx, ry), -180, -90, false);
+ arcTo(VRectF(x + w - rx, y, rx, ry), -270, -90, false);
+ close();
+ } else {
+ moveTo(x + w, y + ry / 2.f);
+ arcTo(VRectF(x + w - rx, y, rx, ry), 0, 90, false);
+ arcTo(VRectF(x, y, rx, ry), 90, 90, false);
+ arcTo(VRectF(x, y + h - ry, rx, ry), 180, 90, false);
+ arcTo(VRectF(x + w - rx, y + h - ry, rx, ry), 270, 90, false);
+ close();
+ }
+}
+
+static float tForArcAngle(float angle);
+void findEllipseCoords(const VRectF &r, float angle, float length,
+ VPointF *startPoint, VPointF *endPoint)
+{
+ if (r.empty()) {
+ if (startPoint) *startPoint = VPointF();
+ if (endPoint) *endPoint = VPointF();
+ return;
+ }
+
+ float w2 = r.width() / 2;
+ float h2 = r.height() / 2;
+
+ float angles[2] = {angle, angle + length};
+ VPointF *points[2] = {startPoint, endPoint};
+
+ for (int i = 0; i < 2; ++i) {
+ if (!points[i]) continue;
+
+ float theta = angles[i] - 360 * floorf(angles[i] / 360);
+ float t = theta / 90;
+ // truncate
+ int quadrant = int(t);
+ t -= quadrant;
+
+ t = tForArcAngle(90 * t);
+
+ // swap x and y?
+ if (quadrant & 1) t = 1 - t;
+
+ float a, b, c, d;
+ VBezier::coefficients(t, a, b, c, d);
+ VPointF p(a + b + c * PATH_KAPPA, d + c + b * PATH_KAPPA);
+
+ // left quadrants
+ if (quadrant == 1 || quadrant == 2) p.rx() = -p.x();
+
+ // top quadrants
+ if (quadrant == 0 || quadrant == 1) p.ry() = -p.y();
+
+ *points[i] = r.center() + VPointF(w2 * p.x(), h2 * p.y());
+ }
+}
+
+static float tForArcAngle(float angle)
+{
+ float radians, cos_angle, sin_angle, tc, ts, t;
+
+ if (vCompare(angle, 0.f)) return 0;
+ if (vCompare(angle, 90.0f)) return 1;
+
+ radians = (angle / 180) * K_PI;
+
+ cos_angle = cosf(radians);
+ sin_angle = sinf(radians);
+
+ // initial guess
+ tc = angle / 90;
+
+ // do some iterations of newton's method to approximate cos_angle
+ // finds the zero of the function b.pointAt(tc).x() - cos_angle
+ tc -= ((((2 - 3 * PATH_KAPPA) * tc + 3 * (PATH_KAPPA - 1)) * tc) * tc + 1 -
+ cos_angle) // value
+ / (((6 - 9 * PATH_KAPPA) * tc + 6 * (PATH_KAPPA - 1)) *
+ tc); // derivative
+ tc -= ((((2 - 3 * PATH_KAPPA) * tc + 3 * (PATH_KAPPA - 1)) * tc) * tc + 1 -
+ cos_angle) // value
+ / (((6 - 9 * PATH_KAPPA) * tc + 6 * (PATH_KAPPA - 1)) *
+ tc); // derivative
+
+ // initial guess
+ ts = tc;
+ // do some iterations of newton's method to approximate sin_angle
+ // finds the zero of the function b.pointAt(tc).y() - sin_angle
+ ts -= ((((3 * PATH_KAPPA - 2) * ts - 6 * PATH_KAPPA + 3) * ts +
+ 3 * PATH_KAPPA) *
+ ts -
+ sin_angle) /
+ (((9 * PATH_KAPPA - 6) * ts + 12 * PATH_KAPPA - 6) * ts +
+ 3 * PATH_KAPPA);
+ ts -= ((((3 * PATH_KAPPA - 2) * ts - 6 * PATH_KAPPA + 3) * ts +
+ 3 * PATH_KAPPA) *
+ ts -
+ sin_angle) /
+ (((9 * PATH_KAPPA - 6) * ts + 12 * PATH_KAPPA - 6) * ts +
+ 3 * PATH_KAPPA);
+
+ // use the average of the t that best approximates cos_angle
+ // and the t that best approximates sin_angle
+ t = 0.5f * (tc + ts);
+ return t;
+}
+
+// The return value is the starting point of the arc
+static VPointF curvesForArc(const VRectF &rect, float startAngle,
+ float sweepLength, VPointF *curves,
+ size_t *point_count)
+{
+ if (rect.empty()) {
+ return {};
+ }
+
+ float x = rect.x();
+ float y = rect.y();
+
+ float w = rect.width();
+ float w2 = rect.width() / 2;
+ float w2k = w2 * PATH_KAPPA;
+
+ float h = rect.height();
+ float h2 = rect.height() / 2;
+ float h2k = h2 * PATH_KAPPA;
+
+ VPointF points[16] = {
+ // start point
+ VPointF(x + w, y + h2),
+
+ // 0 -> 270 degrees
+ VPointF(x + w, y + h2 + h2k), VPointF(x + w2 + w2k, y + h),
+ VPointF(x + w2, y + h),
+
+ // 270 -> 180 degrees
+ VPointF(x + w2 - w2k, y + h), VPointF(x, y + h2 + h2k),
+ VPointF(x, y + h2),
+
+ // 180 -> 90 degrees
+ VPointF(x, y + h2 - h2k), VPointF(x + w2 - w2k, y), VPointF(x + w2, y),
+
+ // 90 -> 0 degrees
+ VPointF(x + w2 + w2k, y), VPointF(x + w, y + h2 - h2k),
+ VPointF(x + w, y + h2)};
+
+ if (sweepLength > 360)
+ sweepLength = 360;
+ else if (sweepLength < -360)
+ sweepLength = -360;
+
+ // Special case fast paths
+ if (startAngle == 0.0f) {
+ if (vCompare(sweepLength, 360)) {
+ for (int i = 11; i >= 0; --i) curves[(*point_count)++] = points[i];
+ return points[12];
+ } else if (vCompare(sweepLength, -360)) {
+ for (int i = 1; i <= 12; ++i) curves[(*point_count)++] = points[i];
+ return points[0];
+ }
+ }
+
+ int startSegment = int(floorf(startAngle / 90.0f));
+ int endSegment = int(floorf((startAngle + sweepLength) / 90.0f));
+
+ float startT = (startAngle - startSegment * 90) / 90;
+ float endT = (startAngle + sweepLength - endSegment * 90) / 90;
+
+ int delta = sweepLength > 0 ? 1 : -1;
+ if (delta < 0) {
+ startT = 1 - startT;
+ endT = 1 - endT;
+ }
+
+ // avoid empty start segment
+ if (vIsZero(startT - float(1))) {
+ startT = 0;
+ startSegment += delta;
+ }
+
+ // avoid empty end segment
+ if (vIsZero(endT)) {
+ endT = 1;
+ endSegment -= delta;
+ }
+
+ startT = tForArcAngle(startT * 90);
+ endT = tForArcAngle(endT * 90);
+
+ const bool splitAtStart = !vIsZero(startT);
+ const bool splitAtEnd = !vIsZero(endT - float(1));
+
+ const int end = endSegment + delta;
+
+ // empty arc?
+ if (startSegment == end) {
+ const int quadrant = 3 - ((startSegment % 4) + 4) % 4;
+ const int j = 3 * quadrant;
+ return delta > 0 ? points[j + 3] : points[j];
+ }
+
+ VPointF startPoint, endPoint;
+ findEllipseCoords(rect, startAngle, sweepLength, &startPoint, &endPoint);
+
+ for (int i = startSegment; i != end; i += delta) {
+ const int quadrant = 3 - ((i % 4) + 4) % 4;
+ const int j = 3 * quadrant;
+
+ VBezier b;
+ if (delta > 0)
+ b = VBezier::fromPoints(points[j + 3], points[j + 2], points[j + 1],
+ points[j]);
+ else
+ b = VBezier::fromPoints(points[j], points[j + 1], points[j + 2],
+ points[j + 3]);
+
+ // empty arc?
+ if (startSegment == endSegment && vCompare(startT, endT))
+ return startPoint;
+
+ if (i == startSegment) {
+ if (i == endSegment && splitAtEnd)
+ b = b.onInterval(startT, endT);
+ else if (splitAtStart)
+ b = b.onInterval(startT, 1);
+ } else if (i == endSegment && splitAtEnd) {
+ b = b.onInterval(0, endT);
+ }
+
+ // push control points
+ curves[(*point_count)++] = b.pt2();
+ curves[(*point_count)++] = b.pt3();
+ curves[(*point_count)++] = b.pt4();
+ }
+
+ curves[*(point_count)-1] = endPoint;
+
+ return startPoint;
+}
+
+void VPath::VPathData::addPolystar(float points, float innerRadius,
+ float outerRadius, float innerRoundness,
+ float outerRoundness, float startAngle,
+ float cx, float cy, VPath::Direction dir)
+{
+ const static float POLYSTAR_MAGIC_NUMBER = 0.47829f / 0.28f;
+ float currentAngle = (startAngle - 90.0f) * K_PI / 180.0f;
+ float x;
+ float y;
+ float partialPointRadius = 0;
+ float anglePerPoint = (2.0f * K_PI / points);
+ float halfAnglePerPoint = anglePerPoint / 2.0f;
+ float partialPointAmount = points - floorf(points);
+ bool longSegment = false;
+ size_t numPoints = size_t(ceilf(points) * 2);
+ float angleDir = ((dir == VPath::Direction::CW) ? 1.0f : -1.0f);
+ bool hasRoundness = false;
+
+ innerRoundness /= 100.0f;
+ outerRoundness /= 100.0f;
+
+ if (!vCompare(partialPointAmount, 0)) {
+ currentAngle +=
+ halfAnglePerPoint * (1.0f - partialPointAmount) * angleDir;
+ }
+
+ if (!vCompare(partialPointAmount, 0)) {
+ partialPointRadius =
+ innerRadius + partialPointAmount * (outerRadius - innerRadius);
+ x = partialPointRadius * cosf(currentAngle);
+ y = partialPointRadius * sinf(currentAngle);
+ currentAngle += anglePerPoint * partialPointAmount / 2.0f * angleDir;
+ } else {
+ x = outerRadius * cosf(currentAngle);
+ y = outerRadius * sinf(currentAngle);
+ currentAngle += halfAnglePerPoint * angleDir;
+ }
+
+ if (vIsZero(innerRoundness) && vIsZero(outerRoundness)) {
+ reserve(numPoints + 2, numPoints + 3);
+ } else {
+ reserve(numPoints * 3 + 2, numPoints + 3);
+ hasRoundness = true;
+ }
+
+ moveTo(x + cx, y + cy);
+
+ for (size_t i = 0; i < numPoints; i++) {
+ float radius = longSegment ? outerRadius : innerRadius;
+ float dTheta = halfAnglePerPoint;
+ if (!vIsZero(partialPointRadius) && i == numPoints - 2) {
+ dTheta = anglePerPoint * partialPointAmount / 2.0f;
+ }
+ if (!vIsZero(partialPointRadius) && i == numPoints - 1) {
+ radius = partialPointRadius;
+ }
+ float previousX = x;
+ float previousY = y;
+ x = radius * cosf(currentAngle);
+ y = radius * sinf(currentAngle);
+
+ if (hasRoundness) {
+ float cp1Theta =
+ (atan2f(previousY, previousX) - K_PI / 2.0f * angleDir);
+ float cp1Dx = cosf(cp1Theta);
+ float cp1Dy = sinf(cp1Theta);
+ float cp2Theta = (atan2f(y, x) - K_PI / 2.0f * angleDir);
+ float cp2Dx = cosf(cp2Theta);
+ float cp2Dy = sinf(cp2Theta);
+
+ float cp1Roundness = longSegment ? innerRoundness : outerRoundness;
+ float cp2Roundness = longSegment ? outerRoundness : innerRoundness;
+ float cp1Radius = longSegment ? innerRadius : outerRadius;
+ float cp2Radius = longSegment ? outerRadius : innerRadius;
+
+ float cp1x = cp1Radius * cp1Roundness * POLYSTAR_MAGIC_NUMBER *
+ cp1Dx / points;
+ float cp1y = cp1Radius * cp1Roundness * POLYSTAR_MAGIC_NUMBER *
+ cp1Dy / points;
+ float cp2x = cp2Radius * cp2Roundness * POLYSTAR_MAGIC_NUMBER *
+ cp2Dx / points;
+ float cp2y = cp2Radius * cp2Roundness * POLYSTAR_MAGIC_NUMBER *
+ cp2Dy / points;
+
+ if (!vIsZero(partialPointAmount) &&
+ ((i == 0) || (i == numPoints - 1))) {
+ cp1x *= partialPointAmount;
+ cp1y *= partialPointAmount;
+ cp2x *= partialPointAmount;
+ cp2y *= partialPointAmount;
+ }
+
+ cubicTo(previousX - cp1x + cx, previousY - cp1y + cy, x + cp2x + cx,
+ y + cp2y + cy, x + cx, y + cy);
+ } else {
+ lineTo(x + cx, y + cy);
+ }
+
+ currentAngle += dTheta * angleDir;
+ longSegment = !longSegment;
+ }
+
+ close();
+}
+
+void VPath::VPathData::addPolygon(float points, float radius, float roundness,
+ float startAngle, float cx, float cy,
+ VPath::Direction dir)
+{
+ // TODO: Need to support floating point number for number of points
+ const static float POLYGON_MAGIC_NUMBER = 0.25;
+ float currentAngle = (startAngle - 90.0f) * K_PI / 180.0f;
+ float x;
+ float y;
+ float anglePerPoint = 2.0f * K_PI / floorf(points);
+ size_t numPoints = size_t(floorf(points));
+ float angleDir = ((dir == VPath::Direction::CW) ? 1.0f : -1.0f);
+ bool hasRoundness = false;
+
+ roundness /= 100.0f;
+
+ currentAngle = (currentAngle - 90.0f) * K_PI / 180.0f;
+ x = radius * cosf(currentAngle);
+ y = radius * sinf(currentAngle);
+ currentAngle += anglePerPoint * angleDir;
+
+ if (vIsZero(roundness)) {
+ reserve(numPoints + 2, numPoints + 3);
+ } else {
+ reserve(numPoints * 3 + 2, numPoints + 3);
+ hasRoundness = true;
+ }
+
+ moveTo(x + cx, y + cy);
+
+ for (size_t i = 0; i < numPoints; i++) {
+ float previousX = x;
+ float previousY = y;
+ x = (radius * cosf(currentAngle));
+ y = (radius * sinf(currentAngle));
+
+ if (hasRoundness) {
+ float cp1Theta =
+ (atan2f(previousY, previousX) - K_PI / 2.0f * angleDir);
+ float cp1Dx = cosf(cp1Theta);
+ float cp1Dy = sinf(cp1Theta);
+ float cp2Theta = atan2f(y, x) - K_PI / 2.0f * angleDir;
+ float cp2Dx = cosf(cp2Theta);
+ float cp2Dy = sinf(cp2Theta);
+
+ float cp1x = radius * roundness * POLYGON_MAGIC_NUMBER * cp1Dx;
+ float cp1y = radius * roundness * POLYGON_MAGIC_NUMBER * cp1Dy;
+ float cp2x = radius * roundness * POLYGON_MAGIC_NUMBER * cp2Dx;
+ float cp2y = radius * roundness * POLYGON_MAGIC_NUMBER * cp2Dy;
+
+ cubicTo(previousX - cp1x + cx, previousY - cp1y + cy, x + cp2x + cx,
+ y + cp2y + cy, x, y);
+ } else {
+ lineTo(x + cx, y + cy);
+ }
+
+ currentAngle += anglePerPoint * angleDir;
+ }
+
+ close();
+}
+
+void VPath::VPathData::addPath(const VPathData &path, const VMatrix *m)
+{
+ size_t segment = path.segments();
+
+ // make sure enough memory available
+ if (m_points.capacity() < m_points.size() + path.m_points.size())
+ m_points.reserve(m_points.size() + path.m_points.size());
+
+ if (m_elements.capacity() < m_elements.size() + path.m_elements.size())
+ m_elements.reserve(m_elements.size() + path.m_elements.size());
+
+ if (m) {
+ for (const auto &i : path.m_points) {
+ m_points.push_back(m->map(i));
+ }
+ } else {
+ std::copy(path.m_points.begin(), path.m_points.end(),
+ back_inserter(m_points));
+ }
+
+ std::copy(path.m_elements.begin(), path.m_elements.end(),
+ back_inserter(m_elements));
+
+ m_segments += segment;
+ mLengthDirty = true;
+}
+
+V_END_NAMESPACE
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vpath.h b/vendor/github.com/Benau/go_rlottie/vector_vpath.h
new file mode 100644
index 00000000..17d6687d
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vpath.h
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VPATH_H
+#define VPATH_H
+#include <vector>
+#include "vector_vcowptr.h"
+#include "vector_vmatrix.h"
+#include "vector_vpoint.h"
+#include "vector_vrect.h"
+
+V_BEGIN_NAMESPACE
+
+struct VPathData;
+class VPath {
+public:
+ enum class Direction { CCW, CW };
+
+ enum class Element : uchar { MoveTo, LineTo, CubicTo, Close };
+ bool empty() const;
+ bool null() const;
+ void moveTo(const VPointF &p);
+ void moveTo(float x, float y);
+ void lineTo(const VPointF &p);
+ void lineTo(float x, float y);
+ void cubicTo(const VPointF &c1, const VPointF &c2, const VPointF &e);
+ void cubicTo(float c1x, float c1y, float c2x, float c2y, float ex,
+ float ey);
+ void arcTo(const VRectF &rect, float startAngle, float sweepLength,
+ bool forceMoveTo);
+ void close();
+ void reset();
+ void reserve(size_t pts, size_t elms);
+ size_t segments() const;
+ void addCircle(float cx, float cy, float radius,
+ VPath::Direction dir = Direction::CW);
+ void addOval(const VRectF &rect, VPath::Direction dir = Direction::CW);
+ void addRoundRect(const VRectF &rect, float rx, float ry,
+ VPath::Direction dir = Direction::CW);
+ void addRoundRect(const VRectF &rect, float roundness,
+ VPath::Direction dir = Direction::CW);
+ void addRect(const VRectF &rect, VPath::Direction dir = Direction::CW);
+ void addPolystar(float points, float innerRadius, float outerRadius,
+ float innerRoundness, float outerRoundness,
+ float startAngle, float cx, float cy,
+ VPath::Direction dir = Direction::CW);
+ void addPolygon(float points, float radius, float roundness,
+ float startAngle, float cx, float cy,
+ VPath::Direction dir = Direction::CW);
+ void addPath(const VPath &path);
+ void addPath(const VPath &path, const VMatrix &m);
+ void transform(const VMatrix &m);
+ float length() const;
+ const std::vector<VPath::Element> &elements() const;
+ const std::vector<VPointF> & points() const;
+ void clone(const VPath &srcPath);
+ bool unique() const { return d.unique();}
+ size_t refCount() const { return d.refCount();}
+
+private:
+ struct VPathData {
+ bool empty() const { return m_elements.empty(); }
+ bool null() const { return empty() && !m_elements.capacity();}
+ void moveTo(float x, float y);
+ void lineTo(float x, float y);
+ void cubicTo(float cx1, float cy1, float cx2, float cy2, float ex, float ey);
+ void close();
+ void reset();
+ void reserve(size_t, size_t);
+ void checkNewSegment();
+ size_t segments() const;
+ void transform(const VMatrix &m);
+ float length() const;
+ void addRoundRect(const VRectF &, float, float, VPath::Direction);
+ void addRoundRect(const VRectF &, float, VPath::Direction);
+ void addRect(const VRectF &, VPath::Direction);
+ void arcTo(const VRectF &, float, float, bool);
+ void addCircle(float, float, float, VPath::Direction);
+ void addOval(const VRectF &, VPath::Direction);
+ void addPolystar(float points, float innerRadius, float outerRadius,
+ float innerRoundness, float outerRoundness,
+ float startAngle, float cx, float cy,
+ VPath::Direction dir = Direction::CW);
+ void addPolygon(float points, float radius, float roundness,
+ float startAngle, float cx, float cy,
+ VPath::Direction dir = Direction::CW);
+ void addPath(const VPathData &path, const VMatrix *m = nullptr);
+ void clone(const VPath::VPathData &o) { *this = o;}
+ const std::vector<VPath::Element> &elements() const
+ {
+ return m_elements;
+ }
+ const std::vector<VPointF> &points() const { return m_points; }
+ std::vector<VPointF> m_points;
+ std::vector<VPath::Element> m_elements;
+ size_t m_segments;
+ VPointF mStartPoint;
+ mutable float mLength{0};
+ mutable bool mLengthDirty{true};
+ bool mNewSegment;
+ };
+
+ vcow_ptr<VPathData> d;
+};
+
+inline bool VPath::empty() const
+{
+ return d->empty();
+}
+
+/*
+ * path is empty as well as null(no memory for data allocated yet).
+ */
+inline bool VPath::null() const
+{
+ return d->null();
+}
+
+inline void VPath::moveTo(const VPointF &p)
+{
+ d.write().moveTo(p.x(), p.y());
+}
+
+inline void VPath::lineTo(const VPointF &p)
+{
+ d.write().lineTo(p.x(), p.y());
+}
+
+inline void VPath::close()
+{
+ d.write().close();
+}
+
+inline void VPath::reset()
+{
+ d.write().reset();
+}
+
+inline void VPath::reserve(size_t pts, size_t elms)
+{
+ d.write().reserve(pts, elms);
+}
+
+inline size_t VPath::segments() const
+{
+ return d->segments();
+}
+
+inline float VPath::length() const
+{
+ return d->length();
+}
+
+inline void VPath::cubicTo(const VPointF &c1, const VPointF &c2,
+ const VPointF &e)
+{
+ d.write().cubicTo(c1.x(), c1.y(), c2.x(), c2.y(), e.x(), e.y());
+}
+
+inline void VPath::lineTo(float x, float y)
+{
+ d.write().lineTo(x, y);
+}
+
+inline void VPath::moveTo(float x, float y)
+{
+ d.write().moveTo(x, y);
+}
+
+inline void VPath::cubicTo(float c1x, float c1y, float c2x, float c2y, float ex,
+ float ey)
+{
+ d.write().cubicTo(c1x, c1y, c2x, c2y, ex, ey);
+}
+
+inline void VPath::transform(const VMatrix &m)
+{
+ d.write().transform(m);
+}
+
+inline void VPath::arcTo(const VRectF &rect, float startAngle,
+ float sweepLength, bool forceMoveTo)
+{
+ d.write().arcTo(rect, startAngle, sweepLength, forceMoveTo);
+}
+
+inline void VPath::addRect(const VRectF &rect, VPath::Direction dir)
+{
+ d.write().addRect(rect, dir);
+}
+
+inline void VPath::addRoundRect(const VRectF &rect, float rx, float ry,
+ VPath::Direction dir)
+{
+ d.write().addRoundRect(rect, rx, ry, dir);
+}
+
+inline void VPath::addRoundRect(const VRectF &rect, float roundness,
+ VPath::Direction dir)
+{
+ d.write().addRoundRect(rect, roundness, dir);
+}
+
+inline void VPath::addCircle(float cx, float cy, float radius,
+ VPath::Direction dir)
+{
+ d.write().addCircle(cx, cy, radius, dir);
+}
+
+inline void VPath::addOval(const VRectF &rect, VPath::Direction dir)
+{
+ d.write().addOval(rect, dir);
+}
+
+inline void VPath::addPolystar(float points, float innerRadius,
+ float outerRadius, float innerRoundness,
+ float outerRoundness, float startAngle, float cx,
+ float cy, VPath::Direction dir)
+{
+ d.write().addPolystar(points, innerRadius, outerRadius, innerRoundness,
+ outerRoundness, startAngle, cx, cy, dir);
+}
+
+inline void VPath::addPolygon(float points, float radius, float roundness,
+ float startAngle, float cx, float cy,
+ VPath::Direction dir)
+{
+ d.write().addPolygon(points, radius, roundness, startAngle, cx, cy, dir);
+}
+
+inline void VPath::addPath(const VPath &path)
+{
+ if (path.empty()) return;
+
+ if (null()) {
+ *this = path;
+ } else {
+ d.write().addPath(path.d.read());
+ }
+}
+
+inline void VPath::addPath(const VPath &path, const VMatrix &m)
+{
+ if (path.empty()) return;
+
+ d.write().addPath(path.d.read(), &m);
+}
+
+inline const std::vector<VPath::Element> &VPath::elements() const
+{
+ return d->elements();
+}
+
+inline const std::vector<VPointF> &VPath::points() const
+{
+ return d->points();
+}
+
+inline void VPath::clone(const VPath &o)
+{
+ d.write().clone(o.d.read());
+}
+
+V_END_NAMESPACE
+
+#endif // VPATH_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vpathmesure.cpp b/vendor/github.com/Benau/go_rlottie/vector_vpathmesure.cpp
new file mode 100644
index 00000000..4288b941
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vpathmesure.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "vector_vpathmesure.h"
+#include <limits>
+#include "vector_vbezier.h"
+#include "vector_vdasher.h"
+
+V_BEGIN_NAMESPACE
+
+/*
+ * start and end value must be normalized to [0 - 1]
+ * Path mesure trims the path from [start --> end]
+ * if start > end it treates as a loop and trims as two segment
+ * [0-->end] and [start --> 1]
+ */
+VPath VPathMesure::trim(const VPath &path)
+{
+ if (vCompare(mStart, mEnd)) return VPath();
+
+ if ((vCompare(mStart, 0.0f) && (vCompare(mEnd, 1.0f))) ||
+ (vCompare(mStart, 1.0f) && (vCompare(mEnd, 0.0f))))
+ return path;
+
+ float length = path.length();
+
+ if (mStart < mEnd) {
+ float array[4] = {
+ 0.0f, length * mStart, // 1st segment
+ (mEnd - mStart) * length,
+ std::numeric_limits<float>::max(), // 2nd segment
+ };
+ VDasher dasher(array, 4);
+ dasher.dashed(path, mScratchObject);
+ return mScratchObject;
+ } else {
+ float array[4] = {
+ length * mEnd, (mStart - mEnd) * length, // 1st segment
+ (1 - mStart) * length,
+ std::numeric_limits<float>::max(), // 2nd segment
+ };
+ VDasher dasher(array, 4);
+ dasher.dashed(path, mScratchObject);
+ return mScratchObject;
+ }
+}
+
+V_END_NAMESPACE
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vpathmesure.h b/vendor/github.com/Benau/go_rlottie/vector_vpathmesure.h
new file mode 100644
index 00000000..43bd0863
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vpathmesure.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VPATHMESURE_H
+#define VPATHMESURE_H
+
+#include "vector_vpath.h"
+
+V_BEGIN_NAMESPACE
+
+class VPathMesure {
+public:
+ void setRange(float start, float end) {mStart = start; mEnd = end;}
+ void setStart(float start){mStart = start;}
+ void setEnd(float end){mEnd = end;}
+ VPath trim(const VPath &path);
+private:
+ float mStart{0.0f};
+ float mEnd{1.0f};
+ VPath mScratchObject;
+};
+
+V_END_NAMESPACE
+
+#endif // VPATHMESURE_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vpoint.h b/vendor/github.com/Benau/go_rlottie/vector_vpoint.h
new file mode 100644
index 00000000..be5c0ff9
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vpoint.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef VPOINT_H
+#define VPOINT_H
+
+#include "vector_vglobal.h"
+
+V_BEGIN_NAMESPACE
+
+class VPointF {
+public:
+ VPointF() = default;
+ constexpr inline VPointF(float x, float y) noexcept : mx(x), my(y) {}
+ constexpr inline float x() const noexcept { return mx; }
+ constexpr inline float y() const noexcept { return my; }
+ inline float & rx() noexcept { return mx; }
+ inline float & ry() noexcept { return my; }
+ inline void setX(float x) { mx = x; }
+ inline void setY(float y) { my = y; }
+ inline VPointF operator-() noexcept { return {-mx, -my}; }
+ inline VPointF & operator+=(const VPointF &p) noexcept;
+ inline VPointF & operator-=(const VPointF &p) noexcept;
+ friend const VPointF operator+(const VPointF &p1, const VPointF &p2)
+ {
+ return VPointF(p1.mx + p2.mx, p1.my + p2.my);
+ }
+ inline friend bool fuzzyCompare(const VPointF &p1, const VPointF &p2);
+ inline friend VDebug & operator<<(VDebug &os, const VPointF &o);
+
+ friend inline VPointF operator-(const VPointF &p1, const VPointF &p2);
+ friend inline const VPointF operator*(const VPointF &, float);
+ friend inline const VPointF operator*(float, const VPointF &);
+ friend inline const VPointF operator/(const VPointF &, float);
+ friend inline const VPointF operator/(float, const VPointF &);
+
+private:
+ float mx{0};
+ float my{0};
+};
+
+inline bool fuzzyCompare(const VPointF &p1, const VPointF &p2)
+{
+ return (vCompare(p1.mx, p2.mx) && vCompare(p1.my, p2.my));
+}
+
+inline VPointF operator-(const VPointF &p1, const VPointF &p2)
+{
+ return {p1.mx - p2.mx, p1.my - p2.my};
+}
+
+inline const VPointF operator*(const VPointF &p, float c)
+{
+ return VPointF(p.mx * c, p.my * c);
+}
+
+inline const VPointF operator*(float c, const VPointF &p)
+{
+ return VPointF(p.mx * c, p.my * c);
+}
+
+inline const VPointF operator/(const VPointF &p, float c)
+{
+ return VPointF(p.mx / c, p.my / c);
+}
+
+inline const VPointF operator/(float c, const VPointF &p)
+{
+ return VPointF(p.mx / c, p.my / c);
+}
+
+inline VDebug &operator<<(VDebug &os, const VPointF &o)
+{
+ os << "{P " << o.x() << "," << o.y() << "}";
+ return os;
+}
+
+inline VPointF &VPointF::operator+=(const VPointF &p) noexcept
+{
+ mx += p.mx;
+ my += p.my;
+ return *this;
+}
+
+inline VPointF &VPointF::operator-=(const VPointF &p) noexcept
+{
+ mx -= p.mx;
+ my -= p.my;
+ return *this;
+}
+
+class VPoint {
+public:
+ VPoint() = default;
+ constexpr inline VPoint(int x, int y) noexcept : mx(x), my(y) {}
+ constexpr inline int x() const noexcept { return mx; }
+ constexpr inline int y() const noexcept { return my; }
+ inline void setX(int x) { mx = x; }
+ inline void setY(int y) { my = y; }
+ inline VPoint & operator+=(const VPoint &p) noexcept;
+ inline VPoint & operator-=(const VPoint &p) noexcept;
+ constexpr inline bool operator==(const VPoint &o) const;
+ constexpr inline bool operator!=(const VPoint &o) const
+ {
+ return !(operator==(o));
+ }
+ friend inline VPoint operator-(const VPoint &p1, const VPoint &p2);
+ inline friend VDebug &operator<<(VDebug &os, const VPoint &o);
+
+private:
+ int mx{0};
+ int my{0};
+};
+inline VDebug &operator<<(VDebug &os, const VPoint &o)
+{
+ os << "{P " << o.x() << "," << o.y() << "}";
+ return os;
+}
+
+inline VPoint operator-(const VPoint &p1, const VPoint &p2)
+{
+ return {p1.mx - p2.mx, p1.my - p2.my};
+}
+
+constexpr inline bool VPoint::operator==(const VPoint &o) const
+{
+ return (mx == o.x() && my == o.y());
+}
+
+inline VPoint &VPoint::operator+=(const VPoint &p) noexcept
+{
+ mx += p.mx;
+ my += p.my;
+ return *this;
+}
+
+inline VPoint &VPoint::operator-=(const VPoint &p) noexcept
+{
+ mx -= p.mx;
+ my -= p.my;
+ return *this;
+}
+
+class VSize {
+public:
+ VSize() = default;
+ constexpr inline VSize(int w, int h) noexcept : mw(w), mh(h) {}
+ bool empty() const {return (mw <= 0 || mh <= 0);}
+ constexpr inline int width() const noexcept { return mw; }
+ constexpr inline int height() const noexcept { return mh; }
+ inline void setWidth(int w) { mw = w; }
+ inline void setHeight(int h) { mh = h; }
+ inline VSize & operator+=(const VSize &p) noexcept;
+ inline VSize & operator-=(const VSize &p) noexcept;
+ constexpr inline bool operator==(const VSize &o) const;
+ constexpr inline bool operator!=(const VSize &o) const
+ {
+ return !(operator==(o));
+ }
+ inline friend VDebug &operator<<(VDebug &os, const VSize &o);
+
+private:
+ int mw{0};
+ int mh{0};
+};
+inline VDebug &operator<<(VDebug &os, const VSize &o)
+{
+ os << "{P " << o.width() << "," << o.height() << "}";
+ return os;
+}
+constexpr inline bool VSize::operator==(const VSize &o) const
+{
+ return (mw == o.width() && mh == o.height());
+}
+
+inline VSize &VSize::operator+=(const VSize &p) noexcept
+{
+ mw += p.mw;
+ mh += p.mh;
+ return *this;
+}
+
+inline VSize &VSize::operator-=(const VSize &p) noexcept
+{
+ mw -= p.mw;
+ mh -= p.mh;
+ return *this;
+}
+
+V_END_NAMESPACE
+
+#endif // VPOINT_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vraster.cpp b/vendor/github.com/Benau/go_rlottie/vector_vraster.cpp
new file mode 100644
index 00000000..656bc32c
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vraster.cpp
@@ -0,0 +1,563 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "vector_vraster.h"
+#include <climits>
+#include <cstring>
+#include <memory>
+#include "config.h"
+#include "vector_freetype_v_ft_raster.h"
+#include "vector_freetype_v_ft_stroker.h"
+#include "vector_vdebug.h"
+#include "vector_vmatrix.h"
+#include "vector_vpath.h"
+#include "vector_vrle.h"
+
+V_BEGIN_NAMESPACE
+
+template <typename T>
+class dyn_array {
+public:
+ explicit dyn_array(size_t size)
+ : mCapacity(size), mData(std::make_unique<T[]>(mCapacity))
+ {
+ }
+ void reserve(size_t size)
+ {
+ if (mCapacity > size) return;
+ mCapacity = size;
+ mData = std::make_unique<T[]>(mCapacity);
+ }
+ T * data() const { return mData.get(); }
+ dyn_array &operator=(dyn_array &&) noexcept = delete;
+
+private:
+ size_t mCapacity{0};
+ std::unique_ptr<T[]> mData{nullptr};
+};
+
+struct FTOutline {
+public:
+ void reset();
+ void grow(size_t, size_t);
+ void convert(const VPath &path);
+ void convert(CapStyle, JoinStyle, float, float);
+ void moveTo(const VPointF &pt);
+ void lineTo(const VPointF &pt);
+ void cubicTo(const VPointF &ctr1, const VPointF &ctr2, const VPointF end);
+ void close();
+ void end();
+ void transform(const VMatrix &m);
+ SW_FT_Pos TO_FT_COORD(float x)
+ {
+ return SW_FT_Pos(x * 64);
+ } // to freetype 26.6 coordinate.
+ SW_FT_Outline ft;
+ bool closed{false};
+ SW_FT_Stroker_LineCap ftCap;
+ SW_FT_Stroker_LineJoin ftJoin;
+ SW_FT_Fixed ftWidth;
+ SW_FT_Fixed ftMiterLimit;
+ dyn_array<SW_FT_Vector> mPointMemory{100};
+ dyn_array<char> mTagMemory{100};
+ dyn_array<short> mContourMemory{10};
+ dyn_array<char> mContourFlagMemory{10};
+};
+
+void FTOutline::reset()
+{
+ ft.n_points = ft.n_contours = 0;
+ ft.flags = 0x0;
+}
+
+void FTOutline::grow(size_t points, size_t segments)
+{
+ reset();
+ mPointMemory.reserve(points + segments);
+ mTagMemory.reserve(points + segments);
+ mContourMemory.reserve(segments);
+ mContourFlagMemory.reserve(segments);
+
+ ft.points = mPointMemory.data();
+ ft.tags = mTagMemory.data();
+ ft.contours = mContourMemory.data();
+ ft.contours_flag = mContourFlagMemory.data();
+}
+
+void FTOutline::convert(const VPath &path)
+{
+ const std::vector<VPath::Element> &elements = path.elements();
+ const std::vector<VPointF> & points = path.points();
+
+ grow(points.size(), path.segments());
+
+ size_t index = 0;
+ for (auto element : elements) {
+ switch (element) {
+ case VPath::Element::MoveTo:
+ moveTo(points[index]);
+ index++;
+ break;
+ case VPath::Element::LineTo:
+ lineTo(points[index]);
+ index++;
+ break;
+ case VPath::Element::CubicTo:
+ cubicTo(points[index], points[index + 1], points[index + 2]);
+ index = index + 3;
+ break;
+ case VPath::Element::Close:
+ close();
+ break;
+ }
+ }
+ end();
+}
+
+void FTOutline::convert(CapStyle cap, JoinStyle join, float width,
+ float miterLimit)
+{
+ // map strokeWidth to freetype. It uses as the radius of the pen not the
+ // diameter
+ width = width / 2.0f;
+ // convert to freetype co-ordinate
+ // IMP: stroker takes radius in 26.6 co-ordinate
+ ftWidth = SW_FT_Fixed(width * (1 << 6));
+ // IMP: stroker takes meterlimit in 16.16 co-ordinate
+ ftMiterLimit = SW_FT_Fixed(miterLimit * (1 << 16));
+
+ // map to freetype capstyle
+ switch (cap) {
+ case CapStyle::Square:
+ ftCap = SW_FT_STROKER_LINECAP_SQUARE;
+ break;
+ case CapStyle::Round:
+ ftCap = SW_FT_STROKER_LINECAP_ROUND;
+ break;
+ default:
+ ftCap = SW_FT_STROKER_LINECAP_BUTT;
+ break;
+ }
+ switch (join) {
+ case JoinStyle::Bevel:
+ ftJoin = SW_FT_STROKER_LINEJOIN_BEVEL;
+ break;
+ case JoinStyle::Round:
+ ftJoin = SW_FT_STROKER_LINEJOIN_ROUND;
+ break;
+ default:
+ ftJoin = SW_FT_STROKER_LINEJOIN_MITER_FIXED;
+ break;
+ }
+}
+
+void FTOutline::moveTo(const VPointF &pt)
+{
+ assert(ft.n_points <= SHRT_MAX - 1);
+
+ ft.points[ft.n_points].x = TO_FT_COORD(pt.x());
+ ft.points[ft.n_points].y = TO_FT_COORD(pt.y());
+ ft.tags[ft.n_points] = SW_FT_CURVE_TAG_ON;
+ if (ft.n_points) {
+ ft.contours[ft.n_contours] = ft.n_points - 1;
+ ft.n_contours++;
+ }
+ // mark the current contour as open
+ // will be updated if ther is a close tag at the end.
+ ft.contours_flag[ft.n_contours] = 1;
+
+ ft.n_points++;
+}
+
+void FTOutline::lineTo(const VPointF &pt)
+{
+ assert(ft.n_points <= SHRT_MAX - 1);
+
+ ft.points[ft.n_points].x = TO_FT_COORD(pt.x());
+ ft.points[ft.n_points].y = TO_FT_COORD(pt.y());
+ ft.tags[ft.n_points] = SW_FT_CURVE_TAG_ON;
+ ft.n_points++;
+}
+
+void FTOutline::cubicTo(const VPointF &cp1, const VPointF &cp2,
+ const VPointF ep)
+{
+ assert(ft.n_points <= SHRT_MAX - 3);
+
+ ft.points[ft.n_points].x = TO_FT_COORD(cp1.x());
+ ft.points[ft.n_points].y = TO_FT_COORD(cp1.y());
+ ft.tags[ft.n_points] = SW_FT_CURVE_TAG_CUBIC;
+ ft.n_points++;
+
+ ft.points[ft.n_points].x = TO_FT_COORD(cp2.x());
+ ft.points[ft.n_points].y = TO_FT_COORD(cp2.y());
+ ft.tags[ft.n_points] = SW_FT_CURVE_TAG_CUBIC;
+ ft.n_points++;
+
+ ft.points[ft.n_points].x = TO_FT_COORD(ep.x());
+ ft.points[ft.n_points].y = TO_FT_COORD(ep.y());
+ ft.tags[ft.n_points] = SW_FT_CURVE_TAG_ON;
+ ft.n_points++;
+}
+void FTOutline::close()
+{
+ assert(ft.n_points <= SHRT_MAX - 1);
+
+ // mark the contour as a close path.
+ ft.contours_flag[ft.n_contours] = 0;
+
+ int index;
+ if (ft.n_contours) {
+ index = ft.contours[ft.n_contours - 1] + 1;
+ } else {
+ index = 0;
+ }
+
+ // make sure atleast 1 point exists in the segment.
+ if (ft.n_points == index) {
+ closed = false;
+ return;
+ }
+
+ ft.points[ft.n_points].x = ft.points[index].x;
+ ft.points[ft.n_points].y = ft.points[index].y;
+ ft.tags[ft.n_points] = SW_FT_CURVE_TAG_ON;
+ ft.n_points++;
+}
+
+void FTOutline::end()
+{
+ assert(ft.n_contours <= SHRT_MAX - 1);
+
+ if (ft.n_points) {
+ ft.contours[ft.n_contours] = ft.n_points - 1;
+ ft.n_contours++;
+ }
+}
+
+static void rleGenerationCb(int count, const SW_FT_Span *spans, void *user)
+{
+ VRle *rle = static_cast<VRle *>(user);
+ auto *rleSpan = reinterpret_cast<const VRle::Span *>(spans);
+ rle->addSpan(rleSpan, count);
+}
+
+static void bboxCb(int x, int y, int w, int h, void *user)
+{
+ VRle *rle = static_cast<VRle *>(user);
+ rle->setBoundingRect({x, y, w, h});
+}
+
+class SharedRle {
+public:
+ SharedRle() = default;
+ VRle &unsafe() { return _rle; }
+ void notify()
+ {
+ {
+ std::lock_guard<std::mutex> lock(_mutex);
+ _ready = true;
+ }
+ _cv.notify_one();
+ }
+ void wait()
+ {
+ if (!_pending) return;
+
+ {
+ std::unique_lock<std::mutex> lock(_mutex);
+ while (!_ready) _cv.wait(lock);
+ }
+
+ _pending = false;
+ }
+
+ VRle &get()
+ {
+ wait();
+ return _rle;
+ }
+
+ void reset()
+ {
+ wait();
+ _ready = false;
+ _pending = true;
+ }
+
+private:
+ VRle _rle;
+ std::mutex _mutex;
+ std::condition_variable _cv;
+ bool _ready{true};
+ bool _pending{false};
+};
+
+struct VRleTask {
+ SharedRle mRle;
+ VPath mPath;
+ float mStrokeWidth;
+ float mMiterLimit;
+ VRect mClip;
+ FillRule mFillRule;
+ CapStyle mCap;
+ JoinStyle mJoin;
+ bool mGenerateStroke;
+
+ VRle &rle() { return mRle.get(); }
+
+ void update(VPath path, FillRule fillRule, const VRect &clip)
+ {
+ mRle.reset();
+ mPath = std::move(path);
+ mFillRule = fillRule;
+ mClip = clip;
+ mGenerateStroke = false;
+ }
+
+ void update(VPath path, CapStyle cap, JoinStyle join, float width,
+ float miterLimit, const VRect &clip)
+ {
+ mRle.reset();
+ mPath = std::move(path);
+ mCap = cap;
+ mJoin = join;
+ mStrokeWidth = width;
+ mMiterLimit = miterLimit;
+ mClip = clip;
+ mGenerateStroke = true;
+ }
+ void render(FTOutline &outRef)
+ {
+ SW_FT_Raster_Params params;
+
+ mRle.unsafe().reset();
+
+ params.flags = SW_FT_RASTER_FLAG_DIRECT | SW_FT_RASTER_FLAG_AA;
+ params.gray_spans = &rleGenerationCb;
+ params.bbox_cb = &bboxCb;
+ params.user = &mRle.unsafe();
+ params.source = &outRef.ft;
+
+ if (!mClip.empty()) {
+ params.flags |= SW_FT_RASTER_FLAG_CLIP;
+
+ params.clip_box.xMin = mClip.left();
+ params.clip_box.yMin = mClip.top();
+ params.clip_box.xMax = mClip.right();
+ params.clip_box.yMax = mClip.bottom();
+ }
+ // compute rle
+ sw_ft_grays_raster.raster_render(nullptr, &params);
+ }
+
+ void operator()(FTOutline &outRef, SW_FT_Stroker &stroker)
+ {
+ if (mPath.points().size() > SHRT_MAX ||
+ mPath.points().size() + mPath.segments() > SHRT_MAX) {
+ return;
+ }
+
+ if (mGenerateStroke) { // Stroke Task
+ outRef.convert(mPath);
+ outRef.convert(mCap, mJoin, mStrokeWidth, mMiterLimit);
+
+ uint points, contors;
+
+ SW_FT_Stroker_Set(stroker, outRef.ftWidth, outRef.ftCap,
+ outRef.ftJoin, outRef.ftMiterLimit);
+ SW_FT_Stroker_ParseOutline(stroker, &outRef.ft);
+ SW_FT_Stroker_GetCounts(stroker, &points, &contors);
+
+ outRef.grow(points, contors);
+
+ SW_FT_Stroker_Export(stroker, &outRef.ft);
+
+ } else { // Fill Task
+ outRef.convert(mPath);
+ int fillRuleFlag = SW_FT_OUTLINE_NONE;
+ switch (mFillRule) {
+ case FillRule::EvenOdd:
+ fillRuleFlag = SW_FT_OUTLINE_EVEN_ODD_FILL;
+ break;
+ default:
+ fillRuleFlag = SW_FT_OUTLINE_NONE;
+ break;
+ }
+ outRef.ft.flags = fillRuleFlag;
+ }
+
+ render(outRef);
+
+ mPath = VPath();
+
+ mRle.notify();
+ }
+};
+
+using VTask = std::shared_ptr<VRleTask>;
+
+#ifdef LOTTIE_THREAD_SUPPORT
+
+#include <thread>
+#include "vector_vtaskqueue.h"
+
+class RleTaskScheduler {
+ const unsigned _count{std::thread::hardware_concurrency()};
+ std::vector<std::thread> _threads;
+ std::vector<TaskQueue<VTask>> _q{_count};
+ std::atomic<unsigned> _index{0};
+
+ void run(unsigned i)
+ {
+ /*
+ * initalize per thread objects.
+ */
+ FTOutline outlineRef;
+ SW_FT_Stroker stroker;
+ SW_FT_Stroker_New(&stroker);
+
+ // Task Loop
+ VTask task;
+ while (true) {
+ bool success = false;
+
+ for (unsigned n = 0; n != _count * 2; ++n) {
+ if (_q[(i + n) % _count].try_pop(task)) {
+ success = true;
+ break;
+ }
+ }
+
+ if (!success && !_q[i].pop(task)) break;
+
+ (*task)(outlineRef, stroker);
+ }
+
+ // cleanup
+ SW_FT_Stroker_Done(stroker);
+ }
+
+ RleTaskScheduler()
+ {
+ for (unsigned n = 0; n != _count; ++n) {
+ _threads.emplace_back([&, n] { run(n); });
+ }
+ }
+
+public:
+ static RleTaskScheduler &instance()
+ {
+ static RleTaskScheduler singleton;
+ return singleton;
+ }
+
+ ~RleTaskScheduler()
+ {
+ for (auto &e : _q) e.done();
+
+ for (auto &e : _threads) e.join();
+ }
+
+ void process(VTask task)
+ {
+ auto i = _index++;
+
+ for (unsigned n = 0; n != _count; ++n) {
+ if (_q[(i + n) % _count].try_push(std::move(task))) return;
+ }
+
+ if (_count > 0) {
+ _q[i % _count].push(std::move(task));
+ }
+ }
+};
+
+#else
+
+class RleTaskScheduler {
+public:
+ FTOutline outlineRef{};
+ SW_FT_Stroker stroker;
+
+public:
+ static RleTaskScheduler &instance()
+ {
+ static RleTaskScheduler singleton;
+ return singleton;
+ }
+
+ RleTaskScheduler() { SW_FT_Stroker_New(&stroker); }
+
+ ~RleTaskScheduler() { SW_FT_Stroker_Done(stroker); }
+
+ void process(VTask task) { (*task)(outlineRef, stroker); }
+};
+#endif
+
+struct VRasterizer::VRasterizerImpl {
+ VRleTask mTask;
+
+ VRle & rle() { return mTask.rle(); }
+ VRleTask &task() { return mTask; }
+};
+
+VRle VRasterizer::rle()
+{
+ if (!d) return VRle();
+ return d->rle();
+}
+
+void VRasterizer::init()
+{
+ if (!d) d = std::make_shared<VRasterizerImpl>();
+}
+
+void VRasterizer::updateRequest()
+{
+ VTask taskObj = VTask(d, &d->task());
+ RleTaskScheduler::instance().process(std::move(taskObj));
+}
+
+void VRasterizer::rasterize(VPath path, FillRule fillRule, const VRect &clip)
+{
+ init();
+ if (path.empty()) {
+ d->rle().reset();
+ return;
+ }
+ d->task().update(std::move(path), fillRule, clip);
+ updateRequest();
+}
+
+void VRasterizer::rasterize(VPath path, CapStyle cap, JoinStyle join,
+ float width, float miterLimit, const VRect &clip)
+{
+ init();
+ if (path.empty() || vIsZero(width)) {
+ d->rle().reset();
+ return;
+ }
+ d->task().update(std::move(path), cap, join, width, miterLimit, clip);
+ updateRequest();
+}
+
+V_END_NAMESPACE
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vraster.h b/vendor/github.com/Benau/go_rlottie/vector_vraster.h
new file mode 100644
index 00000000..45d8ebc3
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vraster.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VRASTER_H
+#define VRASTER_H
+#include <future>
+#include "vector_vglobal.h"
+#include "vector_vrect.h"
+
+V_BEGIN_NAMESPACE
+
+class VPath;
+class VRle;
+
+class VRasterizer
+{
+public:
+ void rasterize(VPath path, FillRule fillRule = FillRule::Winding, const VRect &clip = VRect());
+ void rasterize(VPath path, CapStyle cap, JoinStyle join, float width,
+ float miterLimit, const VRect &clip = VRect());
+ VRle rle();
+private:
+ struct VRasterizerImpl;
+ void init();
+ void updateRequest();
+ std::shared_ptr<VRasterizerImpl> d{nullptr};
+};
+
+V_END_NAMESPACE
+
+#endif // VRASTER_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vrect.cpp b/vendor/github.com/Benau/go_rlottie/vector_vrect.cpp
new file mode 100644
index 00000000..38d29952
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vrect.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "vector_vrect.h"
+#include <algorithm>
+
+VRect VRect::operator&(const VRect &r) const
+{
+ if (empty()) return VRect();
+
+ int l1 = x1;
+ int r1 = x1;
+ if (x2 - x1 + 1 < 0)
+ l1 = x2;
+ else
+ r1 = x2;
+
+ int l2 = r.x1;
+ int r2 = r.x1;
+ if (r.x2 - r.x1 + 1 < 0)
+ l2 = r.x2;
+ else
+ r2 = r.x2;
+
+ if (l1 > r2 || l2 > r1) return VRect();
+
+ int t1 = y1;
+ int b1 = y1;
+ if (y2 - y1 + 1 < 0)
+ t1 = y2;
+ else
+ b1 = y2;
+
+ int t2 = r.y1;
+ int b2 = r.y1;
+ if (r.y2 - r.y1 + 1 < 0)
+ t2 = r.y2;
+ else
+ b2 = r.y2;
+
+ if (t1 > b2 || t2 > b1) return VRect();
+
+ VRect tmp;
+ tmp.x1 = std::max(l1, l2);
+ tmp.x2 = std::min(r1, r2);
+ tmp.y1 = std::max(t1, t2);
+ tmp.y2 = std::min(b1, b2);
+ return tmp;
+}
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vrect.h b/vendor/github.com/Benau/go_rlottie/vector_vrect.h
new file mode 100644
index 00000000..590e2174
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vrect.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VRECT_H
+#define VRECT_H
+#include "vector_vglobal.h"
+#include "vector_vpoint.h"
+
+V_BEGIN_NAMESPACE
+class VRectF;
+
+class VRect {
+public:
+ VRect() = default;
+ VRect(int x, int y, int w, int h):x1(x),y1(y),x2(x+w),y2(y+h){}
+ explicit VRect(VPoint pt, VSize sz):VRect(pt.x(), pt.y(), sz.width(), sz.height()){}
+ operator VRectF() const;
+ V_CONSTEXPR bool empty() const {return x1 >= x2 || y1 >= y2;}
+ V_CONSTEXPR int left() const {return x1;}
+ V_CONSTEXPR int top() const {return y1;}
+ V_CONSTEXPR int right() const {return x2;}
+ V_CONSTEXPR int bottom() const {return y2;}
+ V_CONSTEXPR int width() const {return x2-x1;}
+ V_CONSTEXPR int height() const {return y2-y1;}
+ V_CONSTEXPR int x() const {return x1;}
+ V_CONSTEXPR int y() const {return y1;}
+ VSize size() const {return {width(), height()};}
+ void setLeft(int l) { x1 = l; }
+ void setTop(int t) { y1 = t; }
+ void setRight(int r) { x2 = r; }
+ void setBottom(int b) { y2 = b; }
+ void setWidth(int w) { x2 = x1 + w; }
+ void setHeight(int h) { y2 = y1 + h; }
+ VRect translated(int dx, int dy) const;
+ void translate(int dx, int dy);
+ bool contains(const VRect &r, bool proper = false) const;
+ bool intersects(const VRect &r);
+ friend V_CONSTEXPR inline bool operator==(const VRect &,
+ const VRect &) noexcept;
+ friend V_CONSTEXPR inline bool operator!=(const VRect &,
+ const VRect &) noexcept;
+ friend VDebug & operator<<(VDebug &os, const VRect &o);
+
+ VRect intersected(const VRect &r) const;
+ VRect operator&(const VRect &r) const;
+
+private:
+ int x1{0};
+ int y1{0};
+ int x2{0};
+ int y2{0};
+};
+
+inline VRect VRect::intersected(const VRect &r) const
+{
+ return *this & r;
+}
+
+inline bool VRect::intersects(const VRect &r)
+{
+ return (right() > r.left() && left() < r.right() && bottom() > r.top() &&
+ top() < r.bottom());
+}
+
+inline VDebug &operator<<(VDebug &os, const VRect &o)
+{
+ os << "{R " << o.x() << "," << o.y() << "," << o.width() << ","
+ << o.height() << "}";
+ return os;
+}
+V_CONSTEXPR inline bool operator==(const VRect &r1, const VRect &r2) noexcept
+{
+ return r1.x1 == r2.x1 && r1.x2 == r2.x2 && r1.y1 == r2.y1 && r1.y2 == r2.y2;
+}
+
+V_CONSTEXPR inline bool operator!=(const VRect &r1, const VRect &r2) noexcept
+{
+ return r1.x1 != r2.x1 || r1.x2 != r2.x2 || r1.y1 != r2.y1 || r1.y2 != r2.y2;
+}
+
+inline VRect VRect::translated(int dx, int dy) const
+{
+ return {x1 + dx, y1 + dy, x2 - x1, y2 - y1};
+}
+
+inline void VRect::translate(int dx, int dy)
+{
+ x1 += dx;
+ y1 += dy;
+ x2 += dx;
+ y2 += dy;
+}
+
+inline bool VRect::contains(const VRect &r, bool proper) const
+{
+ return proper ?
+ ((x1 < r.x1) && (x2 > r.x2) && (y1 < r.y1) && (y2 > r.y2)) :
+ ((x1 <= r.x1) && (x2 >= r.x2) && (y1 <= r.y1) && (y2 >= r.y2));
+}
+
+class VRectF {
+public:
+ VRectF() = default;
+
+ VRectF(double x, double y, double w, double h):
+ x1(float(x)),y1(float(y)),
+ x2(float(x+w)),y2(float(y+h)){}
+ operator VRect() const {
+ return {int(left()), int(right()), int(width()), int(height())};
+ }
+
+ V_CONSTEXPR bool empty() const {return x1 >= x2 || y1 >= y2;}
+ V_CONSTEXPR float left() const {return x1;}
+ V_CONSTEXPR float top() const {return y1;}
+ V_CONSTEXPR float right() const {return x2;}
+ V_CONSTEXPR float bottom() const {return y2;}
+ V_CONSTEXPR float width() const {return x2-x1;}
+ V_CONSTEXPR float height() const {return y2-y1;}
+ V_CONSTEXPR float x() const {return x1;}
+ V_CONSTEXPR float y() const {return y1;}
+ V_CONSTEXPR inline VPointF center() const
+ {
+ return {x1 + (x2 - x1) / 2.f, y1 + (y2 - y1) / 2.f};
+ }
+ void setLeft(float l) { x1 = l; }
+ void setTop(float t) { y1 = t; }
+ void setRight(float r) { x2 = r; }
+ void setBottom(float b) { y2 = b; }
+ void setWidth(float w) { x2 = x1 + w; }
+ void setHeight(float h) { y2 = y1 + h; }
+ void translate(float dx, float dy)
+ {
+ x1 += dx;
+ y1 += dy;
+ x2 += dx;
+ y2 += dy;
+ }
+
+private:
+ float x1{0};
+ float y1{0};
+ float x2{0};
+ float y2{0};
+};
+
+inline VRect::operator VRectF() const
+{
+ return {double(left()), double(right()), double(width()), double(height())};
+}
+
+V_END_NAMESPACE
+
+#endif // VRECT_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vrle.cpp b/vendor/github.com/Benau/go_rlottie/vector_vrle.cpp
new file mode 100644
index 00000000..57c7585f
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vrle.cpp
@@ -0,0 +1,748 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in
+ all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "vector_vrle.h"
+#include "vector_vrect.h"
+#include <algorithm>
+#include <array>
+#include <cstdlib>
+#include <cstring>
+#include <limits>
+#include <vector>
+#include "vector_vdebug.h"
+#include "vector_vglobal.h"
+
+V_BEGIN_NAMESPACE
+
+using Result = std::array<VRle::Span, 255>;
+using rle_view = VRle::View;
+static size_t _opGeneric(rle_view &a, rle_view &b, Result &result,
+ VRle::Data::Op op);
+static size_t _opIntersect(const VRect &, rle_view &, Result &);
+static size_t _opIntersect(rle_view &, rle_view &, Result &);
+
+static inline uchar divBy255(int x)
+{
+ return (x + (x >> 8) + 0x80) >> 8;
+}
+
+inline static void copy(const VRle::Span *span, size_t count,
+ std::vector<VRle::Span> &v)
+{
+ // make sure enough memory available
+ if (v.capacity() < v.size() + count) v.reserve(v.size() + count);
+ std::copy(span, span + count, back_inserter(v));
+}
+
+void VRle::Data::addSpan(const VRle::Span *span, size_t count)
+{
+ copy(span, count, mSpans);
+ mBboxDirty = true;
+}
+
+VRect VRle::Data::bbox() const
+{
+ updateBbox();
+ return mBbox;
+}
+
+void VRle::Data::setBbox(const VRect &bbox) const
+{
+ mBboxDirty = false;
+ mBbox = bbox;
+}
+
+void VRle::Data::reset()
+{
+ mSpans.clear();
+ mBbox = VRect();
+ mOffset = VPoint();
+ mBboxDirty = false;
+}
+
+void VRle::Data::clone(const VRle::Data &o)
+{
+ *this = o;
+}
+
+void VRle::Data::translate(const VPoint &p)
+{
+ // take care of last offset if applied
+ mOffset = p - mOffset;
+ int x = mOffset.x();
+ int y = mOffset.y();
+ for (auto &i : mSpans) {
+ i.x = i.x + x;
+ i.y = i.y + y;
+ }
+ updateBbox();
+ mBbox.translate(mOffset.x(), mOffset.y());
+}
+
+void VRle::Data::addRect(const VRect &rect)
+{
+ int x = rect.left();
+ int y = rect.top();
+ int width = rect.width();
+ int height = rect.height();
+
+ mSpans.reserve(size_t(height));
+
+ VRle::Span span;
+ for (int i = 0; i < height; i++) {
+ span.x = x;
+ span.y = y + i;
+ span.len = width;
+ span.coverage = 255;
+ mSpans.push_back(span);
+ }
+ mBbox = rect;
+}
+
+void VRle::Data::updateBbox() const
+{
+ if (!mBboxDirty) return;
+
+ mBboxDirty = false;
+
+ int l = std::numeric_limits<int>::max();
+ const VRle::Span *span = mSpans.data();
+
+ mBbox = VRect();
+ size_t sz = mSpans.size();
+ if (sz) {
+ int t = span[0].y;
+ int b = span[sz - 1].y;
+ int r = 0;
+ for (size_t i = 0; i < sz; i++) {
+ if (span[i].x < l) l = span[i].x;
+ if (span[i].x + span[i].len > r) r = span[i].x + span[i].len;
+ }
+ mBbox = VRect(l, t, r - l, b - t + 1);
+ }
+}
+
+void VRle::Data::operator*=(uchar alpha)
+{
+ for (auto &i : mSpans) {
+ i.coverage = divBy255(i.coverage * alpha);
+ }
+}
+
+void VRle::Data::opIntersect(const VRect &r, VRle::VRleSpanCb cb,
+ void *userData) const
+{
+ if (empty()) return;
+
+ if (r.contains(bbox())) {
+ cb(mSpans.size(), mSpans.data(), userData);
+ return;
+ }
+
+ auto obj = view();
+ Result result;
+ // run till all the spans are processed
+ while (obj.size()) {
+ auto count = _opIntersect(r, obj, result);
+ if (count) cb(count, result.data(), userData);
+ }
+}
+
+// res = a - b;
+void VRle::Data::opSubstract(const VRle::Data &aObj, const VRle::Data &bObj)
+{
+ // if two rle are disjoint
+ if (!aObj.bbox().intersects(bObj.bbox())) {
+ mSpans = aObj.mSpans;
+ } else {
+ auto a = aObj.view();
+ auto b = bObj.view();
+
+ auto aPtr = a.data();
+ auto aEnd = a.data() + a.size();
+ auto bPtr = b.data();
+ auto bEnd = b.data() + b.size();
+
+ // 1. forward a till it intersects with b
+ while ((aPtr != aEnd) && (aPtr->y < bPtr->y)) aPtr++;
+ auto count = aPtr - a.data();
+ if (count) copy(a.data(), count, mSpans);
+
+ // 2. forward b till it intersects with a
+ if (aPtr != aEnd)
+ while ((bPtr != bEnd) && (bPtr->y < aPtr->y)) bPtr++;
+
+ // update a and b object
+ a = {aPtr, size_t(aEnd - aPtr)};
+ b = {bPtr, size_t(bEnd - bPtr)};
+
+ // 3. calculate the intersect region
+ Result result;
+
+ // run till all the spans are processed
+ while (a.size() && b.size()) {
+ auto count = _opGeneric(a, b, result, Op::Substract);
+ if (count) copy(result.data(), count, mSpans);
+ }
+
+ // 4. copy the rest of a
+ if (a.size()) copy(a.data(), a.size(), mSpans);
+ }
+
+ mBboxDirty = true;
+}
+
+void VRle::Data::opGeneric(const VRle::Data &aObj, const VRle::Data &bObj,
+ Op op)
+{
+ // This routine assumes, obj1(span_y) < obj2(span_y).
+
+ auto a = aObj.view();
+ auto b = bObj.view();
+
+ // reserve some space for the result vector.
+ mSpans.reserve(a.size() + b.size());
+
+ // if two rle are disjoint
+ if (!aObj.bbox().intersects(bObj.bbox())) {
+ if (a.data()[0].y < b.data()[0].y) {
+ copy(a.data(), a.size(), mSpans);
+ copy(b.data(), b.size(), mSpans);
+ } else {
+ copy(b.data(), b.size(), mSpans);
+ copy(a.data(), a.size(), mSpans);
+ }
+ } else {
+ auto aPtr = a.data();
+ auto aEnd = a.data() + a.size();
+ auto bPtr = b.data();
+ auto bEnd = b.data() + b.size();
+
+ // 1. forward a till it intersects with b
+ while ((aPtr != aEnd) && (aPtr->y < bPtr->y)) aPtr++;
+
+ auto count = aPtr - a.data();
+ if (count) copy(a.data(), count, mSpans);
+
+ // 2. forward b till it intersects with a
+ if (aPtr != aEnd)
+ while ((bPtr != bEnd) && (bPtr->y < aPtr->y)) bPtr++;
+
+ count = bPtr - b.data();
+ if (count) copy(b.data(), count, mSpans);
+
+ // update a and b object
+ a = {aPtr, size_t(aEnd - aPtr)};
+ b = {bPtr, size_t(bEnd - bPtr)};
+
+ // 3. calculate the intersect region
+ Result result;
+
+ // run till all the spans are processed
+ while (a.size() && b.size()) {
+ auto count = _opGeneric(a, b, result, op);
+ if (count) copy(result.data(), count, mSpans);
+ }
+ // 3. copy the rest
+ if (b.size()) copy(b.data(), b.size(), mSpans);
+ if (a.size()) copy(a.data(), a.size(), mSpans);
+ }
+
+ mBboxDirty = true;
+}
+
+static inline V_ALWAYS_INLINE void _opIntersectPrepare(VRle::View &a,
+ VRle::View &b)
+{
+ auto aPtr = a.data();
+ auto aEnd = a.data() + a.size();
+ auto bPtr = b.data();
+ auto bEnd = b.data() + b.size();
+
+ // 1. advance a till it intersects with b
+ while ((aPtr != aEnd) && (aPtr->y < bPtr->y)) aPtr++;
+
+ // 2. advance b till it intersects with a
+ if (aPtr != aEnd)
+ while ((bPtr != bEnd) && (bPtr->y < aPtr->y)) bPtr++;
+
+ // update a and b object
+ a = {aPtr, size_t(aEnd - aPtr)};
+ b = {bPtr, size_t(bEnd - bPtr)};
+}
+
+void VRle::Data::opIntersect(VRle::View a, VRle::View b)
+{
+ _opIntersectPrepare(a, b);
+ Result result;
+ while (a.size()) {
+ auto count = _opIntersect(a, b, result);
+ if (count) copy(result.data(), count, mSpans);
+ }
+
+ updateBbox();
+}
+
+static void _opIntersect(rle_view a, rle_view b, VRle::VRleSpanCb cb,
+ void *userData)
+{
+ if (!cb) return;
+
+ _opIntersectPrepare(a, b);
+ Result result;
+ while (a.size()) {
+ auto count = _opIntersect(a, b, result);
+ if (count) cb(count, result.data(), userData);
+ }
+}
+
+/*
+ * This function will clip a rle list with another rle object
+ * tmp_clip : The rle list that will be use to clip the rle
+ * tmp_obj : holds the list of spans that has to be clipped
+ * result : will hold the result after the processing
+ * NOTE: if the algorithm runs out of the result buffer list
+ * it will stop and update the tmp_obj with the span list
+ * that are yet to be processed as well as the tpm_clip object
+ * with the unprocessed clip spans.
+ */
+
+static size_t _opIntersect(rle_view &obj, rle_view &clip, Result &result)
+{
+ auto out = result.data();
+ auto available = result.max_size();
+ auto spans = obj.data();
+ auto end = obj.data() + obj.size();
+ auto clipSpans = clip.data();
+ auto clipEnd = clip.data() + clip.size();
+ int sx1, sx2, cx1, cx2, x, len;
+
+ while (available && spans < end) {
+ if (clipSpans >= clipEnd) {
+ spans = end;
+ break;
+ }
+ if (clipSpans->y > spans->y) {
+ ++spans;
+ continue;
+ }
+ if (spans->y != clipSpans->y) {
+ ++clipSpans;
+ continue;
+ }
+ // assert(spans->y == (clipSpans->y + clip_offset_y));
+ sx1 = spans->x;
+ sx2 = sx1 + spans->len;
+ cx1 = clipSpans->x;
+ cx2 = cx1 + clipSpans->len;
+
+ if (cx1 < sx1 && cx2 < sx1) {
+ ++clipSpans;
+ continue;
+ } else if (sx1 < cx1 && sx2 < cx1) {
+ ++spans;
+ continue;
+ }
+ x = std::max(sx1, cx1);
+ len = std::min(sx2, cx2) - x;
+ if (len) {
+ out->x = std::max(sx1, cx1);
+ out->len = (std::min(sx2, cx2) - out->x);
+ out->y = spans->y;
+ out->coverage = divBy255(spans->coverage * clipSpans->coverage);
+ ++out;
+ --available;
+ }
+ if (sx2 < cx2) {
+ ++spans;
+ } else {
+ ++clipSpans;
+ }
+ }
+
+ // update the obj view yet to be processed
+ obj = {spans, size_t(end - spans)};
+
+ // update the clip view yet to be processed
+ clip = {clipSpans, size_t(clipEnd - clipSpans)};
+
+ return result.max_size() - available;
+}
+
+/*
+ * This function will clip a rle list with a given rect
+ * clip : The clip rect that will be use to clip the rle
+ * tmp_obj : holds the list of spans that has to be clipped
+ * result : will hold the result after the processing
+ * NOTE: if the algorithm runs out of the result buffer list
+ * it will stop and update the tmp_obj with the span list
+ * that are yet to be processed
+ */
+static size_t _opIntersect(const VRect &clip, rle_view &obj, Result &result)
+{
+ auto out = result.data();
+ auto available = result.max_size();
+ auto ptr = obj.data();
+ auto end = obj.data() + obj.size();
+
+ const auto minx = clip.left();
+ const auto miny = clip.top();
+ const auto maxx = clip.right() - 1;
+ const auto maxy = clip.bottom() - 1;
+
+ while (available && ptr < end) {
+ const auto &span = *ptr;
+ if (span.y > maxy) {
+ ptr = end; // update spans so that we can breakout
+ break;
+ }
+ if (span.y < miny || span.x > maxx || span.x + span.len <= minx) {
+ ++ptr;
+ continue;
+ }
+ if (span.x < minx) {
+ out->len = std::min(span.len - (minx - span.x), maxx - minx + 1);
+ out->x = minx;
+ } else {
+ out->x = span.x;
+ out->len = std::min(span.len, ushort(maxx - span.x + 1));
+ }
+ if (out->len != 0) {
+ out->y = span.y;
+ out->coverage = span.coverage;
+ ++out;
+ --available;
+ }
+ ++ptr;
+ }
+
+ // update the span list that yet to be processed
+ obj = {ptr, size_t(end - ptr)};
+
+ return result.max_size() - available;
+}
+
+static void blitXor(VRle::Span *spans, int count, uchar *buffer, int offsetX)
+{
+ while (count--) {
+ int x = spans->x + offsetX;
+ int l = spans->len;
+ uchar *ptr = buffer + x;
+ while (l--) {
+ int da = *ptr;
+ *ptr = divBy255((255 - spans->coverage) * (da) +
+ spans->coverage * (255 - da));
+ ptr++;
+ }
+ spans++;
+ }
+}
+
+static void blitDestinationOut(VRle::Span *spans, int count, uchar *buffer,
+ int offsetX)
+{
+ while (count--) {
+ int x = spans->x + offsetX;
+ int l = spans->len;
+ uchar *ptr = buffer + x;
+ while (l--) {
+ *ptr = divBy255((255 - spans->coverage) * (*ptr));
+ ptr++;
+ }
+ spans++;
+ }
+}
+
+static void blitSrcOver(VRle::Span *spans, int count, uchar *buffer,
+ int offsetX)
+{
+ while (count--) {
+ int x = spans->x + offsetX;
+ int l = spans->len;
+ uchar *ptr = buffer + x;
+ while (l--) {
+ *ptr = spans->coverage + divBy255((255 - spans->coverage) * (*ptr));
+ ptr++;
+ }
+ spans++;
+ }
+}
+
+void blitSrc(VRle::Span *spans, int count, uchar *buffer, int offsetX)
+{
+ while (count--) {
+ int x = spans->x + offsetX;
+ int l = spans->len;
+ uchar *ptr = buffer + x;
+ while (l--) {
+ *ptr = std::max(spans->coverage, *ptr);
+ ptr++;
+ }
+ spans++;
+ }
+}
+
+size_t bufferToRle(uchar *buffer, int size, int offsetX, int y, VRle::Span *out)
+{
+ size_t count = 0;
+ uchar value = buffer[0];
+ int curIndex = 0;
+
+ // size = offsetX < 0 ? size + offsetX : size;
+ for (int i = 0; i < size; i++) {
+ uchar curValue = buffer[0];
+ if (value != curValue) {
+ if (value) {
+ out->y = y;
+ out->x = offsetX + curIndex;
+ out->len = i - curIndex;
+ out->coverage = value;
+ out++;
+ count++;
+ }
+ curIndex = i;
+ value = curValue;
+ }
+ buffer++;
+ }
+ if (value) {
+ out->y = y;
+ out->x = offsetX + curIndex;
+ out->len = size - curIndex;
+ out->coverage = value;
+ count++;
+ }
+ return count;
+}
+
+struct SpanMerger {
+ explicit SpanMerger(VRle::Data::Op op)
+ {
+ switch (op) {
+ case VRle::Data::Op::Add:
+ _blitter = &blitSrcOver;
+ break;
+ case VRle::Data::Op::Xor:
+ _blitter = &blitXor;
+ break;
+ case VRle::Data::Op::Substract:
+ _blitter = &blitDestinationOut;
+ break;
+ }
+ }
+ using blitter = void (*)(VRle::Span *, int, uchar *, int);
+ blitter _blitter;
+ std::array<VRle::Span, 256> _result;
+ std::array<uchar, 1024> _buffer;
+ VRle::Span * _aStart{nullptr};
+ VRle::Span * _bStart{nullptr};
+
+ void revert(VRle::Span *&aPtr, VRle::Span *&bPtr)
+ {
+ aPtr = _aStart;
+ bPtr = _bStart;
+ }
+ VRle::Span *data() { return _result.data(); }
+ size_t merge(VRle::Span *&aPtr, const VRle::Span *aEnd, VRle::Span *&bPtr,
+ const VRle::Span *bEnd);
+};
+
+size_t SpanMerger::merge(VRle::Span *&aPtr, const VRle::Span *aEnd,
+ VRle::Span *&bPtr, const VRle::Span *bEnd)
+{
+ assert(aPtr->y == bPtr->y);
+
+ _aStart = aPtr;
+ _bStart = bPtr;
+ int lb = std::min(aPtr->x, bPtr->x);
+ int y = aPtr->y;
+
+ while (aPtr < aEnd && aPtr->y == y) aPtr++;
+ while (bPtr < bEnd && bPtr->y == y) bPtr++;
+
+ int ub = std::max((aPtr - 1)->x + (aPtr - 1)->len,
+ (bPtr - 1)->x + (bPtr - 1)->len);
+ int length = (lb < 0) ? ub + lb : ub - lb;
+
+ if (length <= 0 || size_t(length) >= _buffer.max_size()) {
+ // can't handle merge . skip
+ return 0;
+ }
+
+ // clear buffer
+ memset(_buffer.data(), 0, length);
+
+ // blit a to buffer
+ blitSrc(_aStart, aPtr - _aStart, _buffer.data(), -lb);
+
+ // blit b to buffer
+ _blitter(_bStart, bPtr - _bStart, _buffer.data(), -lb);
+
+ // convert buffer to span
+ return bufferToRle(_buffer.data(), length, lb, y, _result.data());
+}
+
+static size_t _opGeneric(rle_view &a, rle_view &b, Result &result,
+ VRle::Data::Op op)
+{
+ SpanMerger merger{op};
+
+ auto out = result.data();
+ size_t available = result.max_size();
+ auto aPtr = a.data();
+ auto aEnd = a.data() + a.size();
+ auto bPtr = b.data();
+ auto bEnd = b.data() + b.size();
+
+ // only logic change for substract operation.
+ const bool keep = op != (VRle::Data::Op::Substract);
+
+ while (available && aPtr < aEnd && bPtr < bEnd) {
+ if (aPtr->y < bPtr->y) {
+ *out++ = *aPtr++;
+ available--;
+ } else if (bPtr->y < aPtr->y) {
+ if (keep) {
+ *out++ = *bPtr;
+ available--;
+ }
+ bPtr++;
+ } else { // same y
+ auto count = merger.merge(aPtr, aEnd, bPtr, bEnd);
+ if (available >= count) {
+ if (count) {
+ memcpy(out, merger.data(), count * sizeof(VRle::Span));
+ out += count;
+ available -= count;
+ }
+ } else {
+ // not enough space try next time.
+ merger.revert(aPtr, bPtr);
+ break;
+ }
+ }
+ }
+ // update the span list that yet to be processed
+ a = {aPtr, size_t(aEnd - aPtr)};
+ b = {bPtr, size_t(bEnd - bPtr)};
+
+ return result.max_size() - available;
+}
+
+/*
+ * this api makes use of thread_local temporary
+ * buffer to avoid creating intermediate temporary rle buffer
+ * the scratch buffer object will grow its size on demand
+ * so that future call won't need any more memory allocation.
+ * this function is thread safe as it uses thread_local variable
+ * which is unique per thread.
+ */
+static vthread_local VRle::Data Scratch_Object;
+
+VRle VRle::opGeneric(const VRle &o, Data::Op op) const
+{
+ if (empty()) return o;
+ if (o.empty()) return *this;
+
+ Scratch_Object.reset();
+ Scratch_Object.opGeneric(d.read(), o.d.read(), op);
+
+ VRle result;
+ result.d.write() = Scratch_Object;
+
+ return result;
+}
+
+VRle VRle::operator-(const VRle &o) const
+{
+ if (empty()) return {};
+ if (o.empty()) return *this;
+
+ Scratch_Object.reset();
+ Scratch_Object.opSubstract(d.read(), o.d.read());
+
+ VRle result;
+ result.d.write() = Scratch_Object;
+
+ return result;
+}
+
+VRle VRle::operator&(const VRle &o) const
+{
+ if (empty() || o.empty()) return {};
+
+ Scratch_Object.reset();
+ Scratch_Object.opIntersect(d.read().view(), o.d.read().view());
+
+ VRle result;
+ result.d.write() = Scratch_Object;
+
+ return result;
+}
+
+void VRle::operator&=(const VRle &o)
+{
+ if (empty()) return;
+ if (o.empty()) {
+ reset();
+ return;
+ }
+ Scratch_Object.reset();
+ Scratch_Object.opIntersect(d.read().view(), o.d.read().view());
+ d.write() = Scratch_Object;
+}
+
+VRle operator-(const VRect &rect, const VRle &o)
+{
+ if (rect.empty()) return {};
+
+ Scratch_Object.reset();
+ Scratch_Object.addRect(rect);
+
+ VRle result;
+ result.d.write().opSubstract(Scratch_Object, o.d.read());
+
+ return result;
+}
+
+VRle operator&(const VRect &rect, const VRle &o)
+{
+ if (rect.empty() || o.empty()) return {};
+
+ Scratch_Object.reset();
+ Scratch_Object.addRect(rect);
+
+ VRle result;
+ result.d.write().opIntersect(Scratch_Object.view(), o.d.read().view());
+
+ return result;
+}
+
+void VRle::intersect(const VRle &clip, VRleSpanCb cb, void *userData) const
+{
+ if (empty() || clip.empty()) return;
+
+ _opIntersect(d.read().view(), clip.d.read().view(), cb, userData);
+}
+
+V_END_NAMESPACE
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vrle.h b/vendor/github.com/Benau/go_rlottie/vector_vrle.h
new file mode 100644
index 00000000..a47893fc
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vrle.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VRLE_H
+#define VRLE_H
+
+#include <vector>
+#include "vector_vcowptr.h"
+#include "vector_vglobal.h"
+#include "vector_vpoint.h"
+#include "vector_vrect.h"
+
+V_BEGIN_NAMESPACE
+
+class VRle {
+public:
+ struct Span {
+ short x{0};
+ short y{0};
+ ushort len{0};
+ uchar coverage{0};
+ };
+ using VRleSpanCb = void (*)(size_t count, const VRle::Span *spans,
+ void *userData);
+ bool empty() const { return d->empty(); }
+ VRect boundingRect() const { return d->bbox(); }
+ void setBoundingRect(const VRect &bbox) { d->setBbox(bbox); }
+ void addSpan(const VRle::Span *span, size_t count)
+ {
+ d.write().addSpan(span, count);
+ }
+
+ void reset() { d.write().reset(); }
+ void translate(const VPoint &p) { d.write().translate(p); }
+
+ void operator*=(uchar alpha) { d.write() *= alpha; }
+
+ void intersect(const VRect &r, VRleSpanCb cb, void *userData) const;
+ void intersect(const VRle &rle, VRleSpanCb cb, void *userData) const;
+
+ void operator&=(const VRle &o);
+ VRle operator&(const VRle &o) const;
+ VRle operator-(const VRle &o) const;
+ VRle operator+(const VRle &o) const { return opGeneric(o, Data::Op::Add); }
+ VRle operator^(const VRle &o) const { return opGeneric(o, Data::Op::Xor); }
+
+ friend VRle operator-(const VRect &rect, const VRle &o);
+ friend VRle operator&(const VRect &rect, const VRle &o);
+
+ bool unique() const { return d.unique(); }
+ size_t refCount() const { return d.refCount(); }
+ void clone(const VRle &o) { d.write().clone(o.d.read()); }
+
+public:
+ struct View {
+ Span * _data;
+ size_t _size;
+ View(const Span *data, size_t sz) : _data((Span *)data), _size(sz) {}
+ Span * data() { return _data; }
+ size_t size() { return _size; }
+ };
+ struct Data {
+ enum class Op { Add, Xor, Substract };
+ VRle::View view() const
+ {
+ return VRle::View(mSpans.data(), mSpans.size());
+ }
+ bool empty() const { return mSpans.empty(); }
+ void addSpan(const VRle::Span *span, size_t count);
+ void updateBbox() const;
+ VRect bbox() const;
+ void setBbox(const VRect &bbox) const;
+ void reset();
+ void translate(const VPoint &p);
+ void operator*=(uchar alpha);
+ void opGeneric(const VRle::Data &, const VRle::Data &, Op code);
+ void opSubstract(const VRle::Data &, const VRle::Data &);
+ void opIntersect(VRle::View a, VRle::View b);
+ void opIntersect(const VRect &, VRle::VRleSpanCb, void *) const;
+ void addRect(const VRect &rect);
+ void clone(const VRle::Data &);
+
+ std::vector<VRle::Span> mSpans;
+ VPoint mOffset;
+ mutable VRect mBbox;
+ mutable bool mBboxDirty = true;
+ };
+
+private:
+ VRle opGeneric(const VRle &o, Data::Op opcode) const;
+
+ vcow_ptr<Data> d;
+};
+
+inline void VRle::intersect(const VRect &r, VRleSpanCb cb, void *userData) const
+{
+ d->opIntersect(r, cb, userData);
+}
+
+V_END_NAMESPACE
+
+#endif // VRLE_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vsharedptr.h b/vendor/github.com/Benau/go_rlottie/vector_vsharedptr.h
new file mode 100644
index 00000000..fc0c419c
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vsharedptr.h
@@ -0,0 +1,123 @@
+#ifndef VSHAREDPTR_H
+#define VSHAREDPTR_H
+
+#include <cassert>
+#include <memory>
+#include <atomic>
+
+template <typename T, typename Rc>
+class vshared_ptr {
+ struct model {
+ Rc mRef{1};
+
+ model() = default;
+
+ template <class... Args>
+ explicit model(Args&&... args) : mValue(std::forward<Args>(args)...){}
+ explicit model(const T& other) : mValue(other){}
+
+ T mValue;
+ };
+ model* mModel{nullptr};
+
+public:
+ using element_type = T;
+
+ vshared_ptr() = default;
+
+ ~vshared_ptr()
+ {
+ unref();
+ }
+
+ template <class... Args>
+ explicit vshared_ptr(Args&&... args) : mModel(new model(std::forward<Args>(args)...))
+ {
+ }
+
+ vshared_ptr(const vshared_ptr& x) noexcept : vshared_ptr()
+ {
+ if (x.mModel) {
+ mModel = x.mModel;
+ ++mModel->mRef;
+ }
+ }
+
+ vshared_ptr(vshared_ptr&& x) noexcept : vshared_ptr()
+ {
+ if (x.mModel) {
+ mModel = x.mModel;
+ x.mModel = nullptr;
+ }
+ }
+
+ auto operator=(const vshared_ptr& x) noexcept -> vshared_ptr&
+ {
+ unref();
+ mModel = x.mModel;
+ ref();
+ return *this;
+ }
+
+ auto operator=(vshared_ptr&& x) noexcept -> vshared_ptr&
+ {
+ unref();
+ mModel = x.mModel;
+ x.mModel = nullptr;
+ return *this;
+ }
+
+ operator bool() const noexcept {
+ return mModel != nullptr;
+ }
+
+ auto operator*() const noexcept -> element_type& { return read(); }
+
+ auto operator-> () const noexcept -> element_type* { return &read(); }
+
+ std::size_t refCount() const noexcept
+ {
+ assert(mModel);
+
+ return mModel->mRef;
+ }
+
+ bool unique() const noexcept
+ {
+ assert(mModel);
+
+ return mModel->mRef == 1;
+ }
+
+private:
+
+ auto read() const noexcept -> element_type&
+ {
+ assert(mModel);
+
+ return mModel->mValue;
+ }
+
+ void ref()
+ {
+ if (mModel) ++mModel->mRef;
+ }
+
+ void unref()
+ {
+ if (mModel && (--mModel->mRef == 0)) {
+ delete mModel;
+ mModel = nullptr;
+ }
+ }
+};
+
+// atomic ref counted pointer implementation.
+template < typename T>
+using arc_ptr = vshared_ptr<T, std::atomic<std::size_t>>;
+
+// ref counter pointer implementation.
+template < typename T>
+using rc_ptr = vshared_ptr<T, std::size_t>;
+
+#endif // VSHAREDPTR_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vstackallocator.h b/vendor/github.com/Benau/go_rlottie/vector_vstackallocator.h
new file mode 100644
index 00000000..a305b739
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vstackallocator.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VSTACK_ALLOCATOR_H
+#define VSTACK_ALLOCATOR_H
+
+#include <cstddef>
+#include <cassert>
+
+template <std::size_t N, std::size_t alignment = alignof(std::max_align_t)>
+class arena
+{
+ alignas(alignment) char buf_[N];
+ char* ptr_;
+
+public:
+ ~arena() {ptr_ = nullptr;}
+ arena() noexcept : ptr_(buf_) {}
+ arena(const arena&) = delete;
+ arena& operator=(const arena&) = delete;
+
+ template <std::size_t ReqAlign> char* allocate(std::size_t n);
+ void deallocate(char* p, std::size_t n) noexcept;
+
+ static constexpr std::size_t size() noexcept {return N;}
+ std::size_t used() const noexcept {return static_cast<std::size_t>(ptr_ - buf_);}
+ void reset() noexcept {ptr_ = buf_;}
+
+private:
+ static
+ std::size_t
+ align_up(std::size_t n) noexcept
+ {return (n + (alignment-1)) & ~(alignment-1);}
+
+ bool
+ pointer_in_buffer(char* p) noexcept
+ {return buf_ <= p && p <= buf_ + N;}
+};
+
+template <std::size_t N, std::size_t alignment>
+template <std::size_t ReqAlign>
+char*
+arena<N, alignment>::allocate(std::size_t n)
+{
+ static_assert(ReqAlign <= alignment, "alignment is too small for this arena");
+ assert(pointer_in_buffer(ptr_) && "stack_alloc has outlived arena");
+ auto const aligned_n = align_up(n);
+ if (static_cast<decltype(aligned_n)>(buf_ + N - ptr_) >= aligned_n)
+ {
+ char* r = ptr_;
+ ptr_ += aligned_n;
+ return r;
+ }
+
+ static_assert(alignment <= alignof(std::max_align_t), "you've chosen an "
+ "alignment that is larger than alignof(std::max_align_t), and "
+ "cannot be guaranteed by normal operator new");
+ return static_cast<char*>(::operator new(n));
+}
+
+template <std::size_t N, std::size_t alignment>
+void
+arena<N, alignment>::deallocate(char* p, std::size_t n) noexcept
+{
+ assert(pointer_in_buffer(ptr_) && "stack_alloc has outlived arena");
+ if (pointer_in_buffer(p))
+ {
+ n = align_up(n);
+ if (p + n == ptr_)
+ ptr_ = p;
+ }
+ else
+ ::operator delete(p);
+}
+
+template <class T, std::size_t N, std::size_t Align = alignof(std::max_align_t)>
+class stack_alloc
+{
+public:
+ using value_type = T;
+ static auto constexpr alignment = Align;
+ static auto constexpr size = N;
+ using arena_type = arena<size, alignment>;
+
+private:
+ arena_type& a_;
+
+public:
+ stack_alloc(const stack_alloc&) = default;
+ stack_alloc& operator=(const stack_alloc&) = delete;
+
+ stack_alloc(arena_type& a) noexcept : a_(a)
+ {
+ static_assert(size % alignment == 0,
+ "size N needs to be a multiple of alignment Align");
+ }
+ template <class U>
+ stack_alloc(const stack_alloc<U, N, alignment>& a) noexcept
+ : a_(a.a_) {}
+
+ template <class _Up> struct rebind {using other = stack_alloc<_Up, N, alignment>;};
+
+ T* allocate(std::size_t n)
+ {
+ return reinterpret_cast<T*>(a_.template allocate<alignof(T)>(n*sizeof(T)));
+ }
+ void deallocate(T* p, std::size_t n) noexcept
+ {
+ a_.deallocate(reinterpret_cast<char*>(p), n*sizeof(T));
+ }
+
+ template <class T1, std::size_t N1, std::size_t A1,
+ class U, std::size_t M, std::size_t A2>
+ friend
+ bool
+ operator==(const stack_alloc<T1, N1, A1>& x, const stack_alloc<U, M, A2>& y) noexcept;
+
+ template <class U, std::size_t M, std::size_t A> friend class stack_alloc;
+};
+
+template <class T, std::size_t N, std::size_t A1, class U, std::size_t M, std::size_t A2>
+inline
+bool
+operator==(const stack_alloc<T, N, A1>& x, const stack_alloc<U, M, A2>& y) noexcept
+{
+ return N == M && A1 == A2 && &x.a_ == &y.a_;
+}
+
+template <class T, std::size_t N, std::size_t A1, class U, std::size_t M, std::size_t A2>
+inline
+bool
+operator!=(const stack_alloc<T, N, A1>& x, const stack_alloc<U, M, A2>& y) noexcept
+{
+ return !(x == y);
+}
+
+#endif // VSTACK_ALLOCATOR_H
diff --git a/vendor/github.com/Benau/go_rlottie/vector_vtaskqueue.h b/vendor/github.com/Benau/go_rlottie/vector_vtaskqueue.h
new file mode 100644
index 00000000..e505c2f4
--- /dev/null
+++ b/vendor/github.com/Benau/go_rlottie/vector_vtaskqueue.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef VTASKQUEUE_H
+#define VTASKQUEUE_H
+
+#include <deque>
+
+template <typename Task>
+class TaskQueue {
+ using lock_t = std::unique_lock<std::mutex>;
+ std::deque<Task> _q;
+ bool _done{false};
+ std::mutex _mutex;
+ std::condition_variable _ready;
+
+public:
+ bool try_pop(Task &task)
+ {
+ lock_t lock{_mutex, std::try_to_lock};
+ if (!lock || _q.empty()) return false;
+ task = std::move(_q.front());
+ _q.pop_front();
+ return true;
+ }
+
+ bool try_push(Task &&task)
+ {
+ {
+ lock_t lock{_mutex, std::try_to_lock};
+ if (!lock) return false;
+ _q.push_back(std::move(task));
+ }
+ _ready.notify_one();
+ return true;
+ }
+
+ void done()
+ {
+ {
+ lock_t lock{_mutex};
+ _done = true;
+ }
+ _ready.notify_all();
+ }
+
+ bool pop(Task &task)
+ {
+ lock_t lock{_mutex};
+ while (_q.empty() && !_done) _ready.wait(lock);
+ if (_q.empty()) return false;
+ task = std::move(_q.front());
+ _q.pop_front();
+ return true;
+ }
+
+ void push(Task &&task)
+ {
+ {
+ lock_t lock{_mutex};
+ _q.push_back(std::move(task));
+ }
+ _ready.notify_one();
+ }
+
+};
+
+#endif // VTASKQUEUE_H
diff --git a/vendor/github.com/Benau/tgsconverter/LICENSE b/vendor/github.com/Benau/tgsconverter/LICENSE
new file mode 100644
index 00000000..86fd0417
--- /dev/null
+++ b/vendor/github.com/Benau/tgsconverter/LICENSE
@@ -0,0 +1,24 @@
+The MIT License
+
+Copyright (c) 2021, (see AUTHORS)
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/Benau/tgsconverter/libtgsconverter/apng.go b/vendor/github.com/Benau/tgsconverter/libtgsconverter/apng.go
new file mode 100644
index 00000000..78541533
--- /dev/null
+++ b/vendor/github.com/Benau/tgsconverter/libtgsconverter/apng.go
@@ -0,0 +1,51 @@
+package libtgsconverter
+
+import "bytes"
+import "image"
+
+import "github.com/kettek/apng"
+import "github.com/av-elier/go-decimal-to-rational"
+
+type toapng struct {
+ apng apng.APNG
+ prev_frame *image.RGBA
+}
+
+func(to_apng *toapng) init(w uint, h uint, options ConverterOptions) {
+}
+
+func(to_apng *toapng) SupportsAnimation() bool {
+ return true
+}
+
+func (to_apng *toapng) AddFrame(image *image.RGBA, fps uint) error {
+ if to_apng.prev_frame != nil && sameImage(to_apng.prev_frame, image) {
+ var idx = len(to_apng.apng.Frames) - 1
+ var prev_fps = float64(to_apng.apng.Frames[idx].DelayNumerator) / float64(to_apng.apng.Frames[idx].DelayDenominator)
+ prev_fps += 1.0 / float64(fps)
+ rat := dectofrac.NewRatP(prev_fps, 0.001)
+ to_apng.apng.Frames[idx].DelayNumerator = uint16(rat.Num().Int64())
+ to_apng.apng.Frames[idx].DelayDenominator = uint16(rat.Denom().Int64())
+ return nil
+ }
+ f := apng.Frame{}
+ f.Image = image
+ f.DelayNumerator = 1
+ f.DelayDenominator = uint16(fps)
+ f.DisposeOp = apng.DISPOSE_OP_BACKGROUND
+ f.BlendOp = apng.BLEND_OP_SOURCE
+ f.IsDefault = false
+ to_apng.apng.Frames = append(to_apng.apng.Frames, f)
+ to_apng.prev_frame = image
+ return nil
+}
+
+func (to_apng *toapng) Result() []byte {
+ var data []byte
+ w := bytes.NewBuffer(data)
+ err := apng.Encode(w, to_apng.apng)
+ if err != nil {
+ return nil
+ }
+ return w.Bytes()
+}
diff --git a/vendor/github.com/Benau/tgsconverter/libtgsconverter/gif.go b/vendor/github.com/Benau/tgsconverter/libtgsconverter/gif.go
new file mode 100644
index 00000000..c4f5c5d9
--- /dev/null
+++ b/vendor/github.com/Benau/tgsconverter/libtgsconverter/gif.go
@@ -0,0 +1,81 @@
+package libtgsconverter
+
+import "bytes"
+
+import "image"
+import "image/color"
+import "image/gif"
+
+type togif struct {
+ gif gif.GIF
+ images []image.Image
+ prev_frame *image.RGBA
+}
+
+func(to_gif *togif) init(w uint, h uint, options ConverterOptions) {
+ to_gif.gif.Config.Width = int(w)
+ to_gif.gif.Config.Height = int(h)
+}
+
+func(to_gif *togif) SupportsAnimation() bool {
+ return true
+}
+
+func (to_gif *togif) AddFrame(image *image.RGBA, fps uint) error {
+ var fps_int = int(1.0 / float32(fps) * 100.)
+ if to_gif.prev_frame != nil && sameImage(to_gif.prev_frame, image) {
+ to_gif.gif.Delay[len(to_gif.gif.Delay) - 1] += fps_int
+ return nil
+ }
+ to_gif.gif.Image = append(to_gif.gif.Image, nil)
+ to_gif.gif.Delay = append(to_gif.gif.Delay, fps_int)
+ to_gif.gif.Disposal = append(to_gif.gif.Disposal, gif.DisposalBackground)
+ to_gif.images = append(to_gif.images, image)
+ to_gif.prev_frame = image
+ return nil
+}
+
+func (to_gif *togif) Result() []byte {
+ q := medianCutQuantizer{mode, nil, false}
+ p := q.quantizeMultiple(make([]color.Color, 0, 256), to_gif.images)
+ // Add transparent entry finally
+ var trans_idx uint8 = 0
+ if q.reserveTransparent {
+ trans_idx = uint8(len(p))
+ }
+ var id_map = make(map[uint32]uint8)
+ for i, img := range to_gif.images {
+ pi := image.NewPaletted(img.Bounds(), p)
+ for y := 0; y < img.Bounds().Dy(); y++ {
+ for x := 0; x < img.Bounds().Dx(); x++ {
+ c := img.At(x, y)
+ cr, cg, cb, ca := c.RGBA()
+ cid := (cr >> 8) << 16 | cg | (cb >> 8)
+ if q.reserveTransparent && ca == 0 {
+ pi.Pix[pi.PixOffset(x, y)] = trans_idx
+ } else if val, ok := id_map[cid]; ok {
+ pi.Pix[pi.PixOffset(x, y)] = val
+ } else {
+ val := uint8(p.Index(c))
+ pi.Pix[pi.PixOffset(x, y)] = val
+ id_map[cid] = val
+ }
+ }
+ }
+ to_gif.gif.Image[i] = pi
+ }
+ if q.reserveTransparent {
+ p = append(p, color.RGBA{0, 0, 0, 0})
+ }
+ for _, img := range to_gif.gif.Image {
+ img.Palette = p
+ }
+ to_gif.gif.Config.ColorModel = p
+ var data []byte
+ w := bytes.NewBuffer(data)
+ err := gif.EncodeAll(w, &to_gif.gif)
+ if err != nil {
+ return nil
+ }
+ return w.Bytes()
+}
diff --git a/vendor/github.com/Benau/tgsconverter/libtgsconverter/imagewriter.go b/vendor/github.com/Benau/tgsconverter/libtgsconverter/imagewriter.go
new file mode 100644
index 00000000..9549e337
--- /dev/null
+++ b/vendor/github.com/Benau/tgsconverter/libtgsconverter/imagewriter.go
@@ -0,0 +1,40 @@
+package libtgsconverter
+
+import "image"
+
+type imageWriter interface {
+ init(w uint, h uint, options ConverterOptions)
+ SupportsAnimation() bool
+ AddFrame(image *image.RGBA, fps uint) error
+ Result() []byte
+}
+
+func sameImage(a *image.RGBA, b *image.RGBA) bool {
+ if len(a.Pix) != len(b.Pix) {
+ return false
+ }
+ for i, v := range a.Pix {
+ if v != b.Pix[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func newImageWriter(extension string, w uint, h uint, options ConverterOptions) imageWriter {
+ var writer imageWriter
+ switch extension {
+ case "apng":
+ writer = &toapng{}
+ case "gif":
+ writer = &togif{}
+ case "png":
+ writer = &topng{}
+ case "webp":
+ writer = &towebp{}
+ default:
+ return nil
+ }
+ writer.init(w, h, options)
+ return writer
+}
diff --git a/vendor/github.com/Benau/tgsconverter/libtgsconverter/lib.go b/vendor/github.com/Benau/tgsconverter/libtgsconverter/lib.go
new file mode 100644
index 00000000..af6f8ab0
--- /dev/null
+++ b/vendor/github.com/Benau/tgsconverter/libtgsconverter/lib.go
@@ -0,0 +1,160 @@
+package libtgsconverter
+
+import "bytes"
+import "errors"
+import "compress/gzip"
+import "image"
+import "io/ioutil"
+
+import "github.com/Benau/go_rlottie"
+
+type ConverterOptions interface {
+ SetExtension(ext string)
+ SetFPS(fps uint)
+ SetScale(scale float32)
+ SetWebpQuality(webp_quality float32)
+ GetExtension() string
+ GetFPS() uint
+ GetScale() float32
+ GetWebpQuality() float32
+}
+
+type converter_options struct {
+ // apng, gif, png or webp
+ extension string
+ // Frame per second of output image (if you specify apng, gif or webp)
+ fps uint
+ // Scale of image result
+ scale float32
+ // Webp encoder quality (0 to 100)
+ webpQuality float32
+}
+
+func(opt *converter_options) SetExtension(ext string) {
+ opt.extension = ext
+}
+
+func(opt *converter_options) SetFPS(fps uint) {
+ opt.fps = fps
+}
+
+func(opt *converter_options) SetScale(scale float32) {
+ opt.scale = scale
+}
+
+func(opt *converter_options) SetWebpQuality(webp_quality float32) {
+ opt.webpQuality = webp_quality
+}
+
+func(opt *converter_options) GetExtension() string {
+ return opt.extension
+}
+
+func(opt *converter_options) GetFPS() uint {
+ return opt.fps
+}
+
+func(opt *converter_options) GetScale() float32 {
+ return opt.scale
+}
+
+func(opt *converter_options) GetWebpQuality() float32 {
+ return opt.webpQuality
+}
+
+func NewConverterOptions() ConverterOptions {
+ return &converter_options{"png", 30, 1.0, 75}
+}
+
+func imageFromBuffer(p []byte, w uint, h uint) *image.RGBA {
+ // rlottie use ARGB32_Premultiplied
+ for i := 0; i < len(p); i += 4 {
+ p[i + 0], p[i + 2] = p[i + 2], p[i + 0]
+ }
+ m := image.NewRGBA(image.Rect(0, 0, int(w), int(h)))
+ m.Pix = p
+ m.Stride = int(w) * 4
+ return m
+}
+
+var disabled_cache = false
+func ImportFromData(data []byte, options ConverterOptions) ([]byte, error) {
+ if !disabled_cache {
+ disabled_cache = true
+ go_rlottie.LottieConfigureModelCacheSize(0)
+ }
+ z, err := gzip.NewReader(bytes.NewReader(data))
+ if err != nil {
+ return nil, errors.New("Failed to create gzip reader:" + err.Error())
+ }
+ uncompressed, err := ioutil.ReadAll(z)
+ if err != nil {
+ return nil, errors.New("Failed to read gzip archive")
+ }
+ z.Close()
+
+ animation := go_rlottie.LottieAnimationFromData(string(uncompressed[:]), "", "")
+ if animation == nil {
+ return nil, errors.New("Failed to import lottie animation data")
+ }
+
+ w, h := go_rlottie.LottieAnimationGetSize(animation)
+ w = uint(float32(w) * options.GetScale())
+ h = uint(float32(h) * options.GetScale())
+
+ frame_rate := go_rlottie.LottieAnimationGetFramerate(animation)
+ frame_count := go_rlottie.LottieAnimationGetTotalframe(animation)
+ duration := float32(frame_count) / float32(frame_rate)
+ var desired_framerate = float32(options.GetFPS())
+ // Most (Gif) player doesn't support ~60fps (found in most tgs)
+ if desired_framerate > 50. {
+ desired_framerate = 50.
+ }
+ step := 1.0 / desired_framerate
+
+ writer := newImageWriter(options.GetExtension(), w, h, options)
+ if writer == nil {
+ return nil, errors.New("Failed create imagewriter")
+ }
+
+ var i float32
+ for i = 0.; i < duration; i += step {
+ frame := go_rlottie.LottieAnimationGetFrameAtPos(animation, i / duration)
+ buf := make([]byte, w * h * 4)
+ go_rlottie.LottieAnimationRender(animation, frame, buf, w, h, w * 4)
+ m := imageFromBuffer(buf, w, h)
+ err := writer.AddFrame(m, uint(desired_framerate))
+ if err != nil {
+ return nil, errors.New("Failed to add frame:" + err.Error())
+ }
+ if !writer.SupportsAnimation() {
+ break
+ }
+ }
+ go_rlottie.LottieAnimationDestroy(animation)
+ return writer.Result(), nil
+}
+
+func ImportFromFile(path string, options ConverterOptions) ([]byte, error) {
+ tgs, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, errors.New("Error when opening file:" + err.Error())
+ }
+ return ImportFromData(tgs, options)
+}
+
+func SupportsExtension(extension string) (bool) {
+ switch extension {
+ case "apng":
+ fallthrough
+ case "gif":
+ fallthrough
+ case "png":
+ fallthrough
+ case "webp":
+ return true
+ default:
+ return false
+ }
+ return false
+}
diff --git a/vendor/github.com/Benau/tgsconverter/libtgsconverter/png.go b/vendor/github.com/Benau/tgsconverter/libtgsconverter/png.go
new file mode 100644
index 00000000..cf492ea4
--- /dev/null
+++ b/vendor/github.com/Benau/tgsconverter/libtgsconverter/png.go
@@ -0,0 +1,30 @@
+package libtgsconverter
+
+import "bytes"
+import "image"
+import "image/png"
+
+type topng struct {
+ result []byte
+}
+
+func(to_png *topng) init(w uint, h uint, options ConverterOptions) {
+}
+
+func(to_png *topng) SupportsAnimation() bool {
+ return false
+}
+
+func (to_png *topng) AddFrame(image *image.RGBA, fps uint) error {
+ var data []byte
+ w := bytes.NewBuffer(data)
+ if err := png.Encode(w, image); err != nil {
+ return err
+ }
+ to_png.result = w.Bytes()
+ return nil
+}
+
+func (to_png *topng) Result() []byte {
+ return to_png.result
+}
diff --git a/vendor/github.com/Benau/tgsconverter/libtgsconverter/quantize_bucket.go b/vendor/github.com/Benau/tgsconverter/libtgsconverter/quantize_bucket.go
new file mode 100644
index 00000000..1f00c685
--- /dev/null
+++ b/vendor/github.com/Benau/tgsconverter/libtgsconverter/quantize_bucket.go
@@ -0,0 +1,119 @@
+package libtgsconverter
+
+import "image/color"
+
+type colorAxis uint8
+
+// Color axis constants
+const (
+ red colorAxis = iota
+ green
+ blue
+)
+
+type colorPriority struct {
+ p uint32
+ color.RGBA
+}
+
+func (c colorPriority) axis(span colorAxis) uint8 {
+ switch span {
+ case red:
+ return c.R
+ case green:
+ return c.G
+ default:
+ return c.B
+ }
+}
+
+type colorBucket []colorPriority
+
+func (cb colorBucket) partition() (colorBucket, colorBucket) {
+ mean, span := cb.span()
+ left, right := 0, len(cb)-1
+ for left < right {
+ cb[left], cb[right] = cb[right], cb[left]
+ for cb[left].axis(span) < mean && left < right {
+ left++
+ }
+ for cb[right].axis(span) >= mean && left < right {
+ right--
+ }
+ }
+ if left == 0 {
+ return cb[:1], cb[1:]
+ }
+ if left == len(cb)-1 {
+ return cb[:len(cb)-1], cb[len(cb)-1:]
+ }
+ return cb[:left], cb[left:]
+}
+
+func (cb colorBucket) mean() color.RGBA {
+ var r, g, b uint64
+ var p uint64
+ for _, c := range cb {
+ p += uint64(c.p)
+ r += uint64(c.R) * uint64(c.p)
+ g += uint64(c.G) * uint64(c.p)
+ b += uint64(c.B) * uint64(c.p)
+ }
+ return color.RGBA{uint8(r / p), uint8(g / p), uint8(b / p), 255}
+}
+
+type constraint struct {
+ min uint8
+ max uint8
+ vals [256]uint64
+}
+
+func (c *constraint) update(index uint8, p uint32) {
+ if index < c.min {
+ c.min = index
+ }
+ if index > c.max {
+ c.max = index
+ }
+ c.vals[index] += uint64(p)
+}
+
+func (c *constraint) span() uint8 {
+ return c.max - c.min
+}
+
+func (cb colorBucket) span() (uint8, colorAxis) {
+ var R, G, B constraint
+ R.min = 255
+ G.min = 255
+ B.min = 255
+ var p uint64
+ for _, c := range cb {
+ R.update(c.R, c.p)
+ G.update(c.G, c.p)
+ B.update(c.B, c.p)
+ p += uint64(c.p)
+ }
+ var toCount *constraint
+ var span colorAxis
+ if R.span() > G.span() && R.span() > B.span() {
+ span = red
+ toCount = &R
+ } else if G.span() > B.span() {
+ span = green
+ toCount = &G
+ } else {
+ span = blue
+ toCount = &B
+ }
+ var counted uint64
+ var i int
+ var c uint64
+ for i, c = range toCount.vals {
+ if counted > p/2 || counted+c == p {
+ break
+ }
+ counted += c
+ }
+ return uint8(i), span
+}
diff --git a/vendor/github.com/Benau/tgsconverter/libtgsconverter/quantize_mediancut.go b/vendor/github.com/Benau/tgsconverter/libtgsconverter/quantize_mediancut.go
new file mode 100644
index 00000000..850708b9
--- /dev/null
+++ b/vendor/github.com/Benau/tgsconverter/libtgsconverter/quantize_mediancut.go
@@ -0,0 +1,209 @@
+package libtgsconverter
+
+import (
+ "image"
+ "image/color"
+ "sync"
+)
+
+type bucketPool struct {
+ sync.Pool
+ maxCap int
+ m sync.Mutex
+}
+
+func (p *bucketPool) getBucket(c int) colorBucket {
+ p.m.Lock()
+ if p.maxCap > c {
+ p.maxCap = p.maxCap * 99 / 100
+ }
+ if p.maxCap < c {
+ p.maxCap = c
+ }
+ maxCap := p.maxCap
+ p.m.Unlock()
+ val := p.Pool.Get()
+ if val == nil || cap(val.(colorBucket)) < c {
+ return make(colorBucket, maxCap)[0:c]
+ }
+ slice := val.(colorBucket)
+ slice = slice[0:c]
+ for i := range slice {
+ slice[i] = colorPriority{}
+ }
+ return slice
+}
+
+var bpool bucketPool
+
+// aggregationType specifies the type of aggregation to be done
+type aggregationType uint8
+
+const (
+ // Mode - pick the highest priority value
+ mode aggregationType = iota
+ // Mean - weighted average all values
+ mean
+)
+
+// medianCutQuantizer implements the go draw.Quantizer interface using the Median Cut method
+type medianCutQuantizer struct {
+ // The type of aggregation to be used to find final colors
+ aggregation aggregationType
+ // The weighting function to use on each pixel
+ weighting func(image.Image, int, int) uint32
+ // Whether need to add a transparent entry after conversion
+ reserveTransparent bool
+}
+
+//bucketize takes a bucket and performs median cut on it to obtain the target number of grouped buckets
+func bucketize(colors colorBucket, num int) (buckets []colorBucket) {
+ if len(colors) == 0 || num == 0 {
+ return nil
+ }
+ bucket := colors
+ buckets = make([]colorBucket, 1, num*2)
+ buckets[0] = bucket
+
+ for len(buckets) < num && len(buckets) < len(colors) { // Limit to palette capacity or number of colors
+ bucket, buckets = buckets[0], buckets[1:]
+ if len(bucket) < 2 {
+ buckets = append(buckets, bucket)
+ continue
+ } else if len(bucket) == 2 {
+ buckets = append(buckets, bucket[:1], bucket[1:])
+ continue
+ }
+
+ left, right := bucket.partition()
+ buckets = append(buckets, left, right)
+ }
+ return
+}
+
+// palettize finds a single color to represent a set of color buckets
+func (q* medianCutQuantizer) palettize(p color.Palette, buckets []colorBucket) color.Palette {
+ for _, bucket := range buckets {
+ switch q.aggregation {
+ case mean:
+ mean := bucket.mean()
+ p = append(p, mean)
+ case mode:
+ var best colorPriority
+ for _, c := range bucket {
+ if c.p > best.p {
+ best = c
+ }
+ }
+ p = append(p, best.RGBA)
+ }
+ }
+ return p
+}
+
+// quantizeSlice expands the provided bucket and then palettizes the result
+func (q* medianCutQuantizer) quantizeSlice(p color.Palette, colors []colorPriority) color.Palette {
+ numColors := cap(p) - len(p)
+ reserveTransparent := q.reserveTransparent
+ if reserveTransparent {
+ numColors--
+ }
+ buckets := bucketize(colors, numColors)
+ p = q.palettize(p, buckets)
+ return p
+}
+
+func colorAt(m image.Image, x int, y int) color.RGBA {
+ switch i := m.(type) {
+ case *image.YCbCr:
+ yi := i.YOffset(x, y)
+ ci := i.COffset(x, y)
+ c := color.YCbCr{
+ i.Y[yi],
+ i.Cb[ci],
+ i.Cr[ci],
+ }
+ return color.RGBA{c.Y, c.Cb, c.Cr, 255}
+ case *image.RGBA:
+ ci := i.PixOffset(x, y)
+ return color.RGBA{i.Pix[ci+0], i.Pix[ci+1], i.Pix[ci+2], i.Pix[ci+3]}
+ default:
+ return color.RGBAModel.Convert(i.At(x, y)).(color.RGBA)
+ }
+}
+
+// buildBucketMultiple creates a prioritized color slice with all the colors in
+// the images.
+func (q* medianCutQuantizer) buildBucketMultiple(ms []image.Image) (bucket colorBucket) {
+ if len(ms) < 1 {
+ return colorBucket{}
+ }
+
+ bounds := ms[0].Bounds()
+ size := (bounds.Max.X - bounds.Min.X) * (bounds.Max.Y - bounds.Min.Y) * 2
+ sparseBucket := bpool.getBucket(size)
+
+ for _, m := range ms {
+ for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
+ for x := bounds.Min.X; x < bounds.Max.X; x++ {
+ priority := uint32(1)
+ if q.weighting != nil {
+ priority = q.weighting(m, x, y)
+ }
+ c := colorAt(m, x, y)
+ if c.A == 0 {
+ if !q.reserveTransparent {
+ q.reserveTransparent = true
+ }
+ continue
+ }
+ if priority != 0 {
+ index := int(c.R)<<16 | int(c.G)<<8 | int(c.B)
+ for i := 1; ; i++ {
+ p := &sparseBucket[index%size]
+ if p.p == 0 || p.RGBA == c {
+ *p = colorPriority{p.p + priority, c}
+ break
+ }
+ index += 1 + i
+ }
+ }
+ }
+ }
+ }
+
+ bucket = sparseBucket[:0]
+ switch ms[0].(type) {
+ case *image.YCbCr:
+ for _, p := range sparseBucket {
+ if p.p != 0 {
+ r, g, b := color.YCbCrToRGB(p.R, p.G, p.B)
+ bucket = append(bucket, colorPriority{p.p, color.RGBA{r, g, b, p.A}})
+ }
+ }
+ default:
+ for _, p := range sparseBucket {
+ if p.p != 0 {
+ bucket = append(bucket, p)
+ }
+ }
+ }
+ return
+}
+
+// Quantize quantizes an image to a palette and returns the palette
+func (q* medianCutQuantizer) quantize(p color.Palette, m image.Image) color.Palette {
+ // Package quantize offers an implementation of the draw.Quantize interface using an optimized Median Cut method,
+ // including advanced functionality for fine-grained control of color priority
+ bucket := q.buildBucketMultiple([]image.Image{m})
+ defer bpool.Put(bucket)
+ return q.quantizeSlice(p, bucket)
+}
+
+// QuantizeMultiple quantizes several images at once to a palette and returns
+// the palette
+func (q* medianCutQuantizer) quantizeMultiple(p color.Palette, m []image.Image) color.Palette {
+ bucket := q.buildBucketMultiple(m)
+ defer bpool.Put(bucket)
+ return q.quantizeSlice(p, bucket)
+}
diff --git a/vendor/github.com/Benau/tgsconverter/libtgsconverter/webp.go b/vendor/github.com/Benau/tgsconverter/libtgsconverter/webp.go
new file mode 100644
index 00000000..60e9887c
--- /dev/null
+++ b/vendor/github.com/Benau/tgsconverter/libtgsconverter/webp.go
@@ -0,0 +1,39 @@
+package libtgsconverter
+
+import "bytes"
+import "image"
+
+import "github.com/sizeofint/webpanimation"
+
+type towebp struct {
+ timestamp int
+ webpanim *webpanimation.WebpAnimation
+ config webpanimation.WebPConfig
+}
+
+func(to_webp *towebp) init(w uint, h uint, options ConverterOptions) {
+ to_webp.timestamp = 0
+ to_webp.webpanim = webpanimation.NewWebpAnimation(int(w), int(h), 0)
+ to_webp.config = webpanimation.NewWebpConfig()
+ to_webp.config.SetQuality(options.GetWebpQuality())
+}
+
+func(to_webp *towebp) SupportsAnimation() bool {
+ return true
+}
+
+func (to_webp *towebp) AddFrame(image *image.RGBA, fps uint) error {
+ err := to_webp.webpanim.AddFrame(image, to_webp.timestamp, to_webp.config)
+ to_webp.timestamp += int((1.0 / float32(fps)) * 1000.)
+ return err
+}
+
+func (to_webp *towebp) Result() []byte {
+ var buf bytes.Buffer
+ err := to_webp.webpanim.Encode(&buf)
+ if err != nil {
+ return nil
+ }
+ to_webp.webpanim.ReleaseMemory()
+ return buf.Bytes()
+}