From 441276b030c50899b0f95bc5dca524bfd97dc9c1 Mon Sep 17 00:00:00 2001 From: re2zero Date: Thu, 17 Apr 2025 14:40:52 +0800 Subject: [PATCH 1/4] refactor: [build]Enable cmake build Enable build by cmake and refactor deepin-pdfium library. Log: Enable cmake build. --- 3rdparty/deepin-pdfium/CMakeLists.txt | 153 +++ 3rdparty/deepin-pdfium/deepin-pdfium.pc.in | 10 + 3rdparty/deepin-pdfium/include/dpdfannot.h | 22 +- 3rdparty/deepin-pdfium/include/dpdfdoc.h | 6 +- 3rdparty/deepin-pdfium/include/dpdfglobal.h | 5 + 3rdparty/deepin-pdfium/include/dpdfpage.h | 12 +- .../misc/deepin-pdfiumConfig.cmake.in | 11 + .../src/3rdparty/pdfium/CMakeLists.txt | 152 +++ .../src/3rdparty/pdfium/core.cmake | 633 ++++++++++++ .../src/3rdparty/pdfium/fpdfsdk.cmake | 119 +++ .../src/3rdparty/pdfium/fx_agg.cmake | 33 + .../src/3rdparty/pdfium/fx_base.cmake | 62 ++ .../src/3rdparty/pdfium/fx_skia.cmake | 8 + .../src/3rdparty/pdfium/fxjs.cmake | 13 + .../base/allocator/partition_allocator/OWNERS | 8 + .../address_space_randomization.cc | 70 ++ .../address_space_randomization.h | 203 ++++ .../base/allocator/partition_allocator/oom.h | 40 + .../partition_allocator/oom_callback.cc | 29 + .../partition_allocator/oom_callback.h | 26 + .../partition_allocator/page_allocator.cc | 259 +++++ .../partition_allocator/page_allocator.h | 197 ++++ .../page_allocator_constants.h | 66 ++ .../page_allocator_internal.h | 20 + .../page_allocator_internals_posix.h | 218 ++++ .../page_allocator_internals_win.h | 145 +++ .../partition_allocator/partition_alloc.cc | 852 ++++++++++++++++ .../partition_allocator/partition_alloc.h | 532 ++++++++++ .../partition_alloc_constants.h | 200 ++++ .../partition_allocator/partition_bucket.cc | 566 +++++++++++ .../partition_allocator/partition_bucket.h | 130 +++ .../partition_allocator/partition_cookie.h | 72 ++ .../partition_direct_map_extent.h | 35 + .../partition_freelist_entry.h | 73 ++ .../partition_allocator/partition_oom.cc | 26 + .../partition_allocator/partition_oom.h | 30 + .../partition_allocator/partition_page.cc | 168 +++ .../partition_allocator/partition_page.h | 316 ++++++ .../partition_root_base.cc | 42 + .../partition_allocator/partition_root_base.h | 201 ++++ .../allocator/partition_allocator/random.cc | 96 ++ .../allocator/partition_allocator/random.h | 29 + .../partition_allocator/spin_lock.cc | 109 ++ .../allocator/partition_allocator/spin_lock.h | 52 + .../3rdparty/pdfium/pdfium/base/base_export.h | 3 + .../src/3rdparty/pdfium/pdfium/base/bits.h | 187 ++++ .../pdfium/pdfium/base/compiler_specific.h | 183 ++++ .../pdfium/pdfium/base/containers/adapters.h | 54 + .../pdfium/pdfium/base/debug/alias.cc | 30 + .../3rdparty/pdfium/pdfium/base/debug/alias.h | 34 + .../pdfium/pdfium/base/immediate_crash.h | 168 +++ .../src/3rdparty/pdfium/pdfium/base/logging.h | 42 + .../pdfium/base/memory/aligned_memory.cc | 50 + .../pdfium/base/memory/aligned_memory.h | 84 ++ .../pdfium/pdfium/base/no_destructor.h | 100 ++ .../pdfium/pdfium/base/numerics/OWNERS | 3 + .../pdfium/base/numerics/checked_math.h | 395 ++++++++ .../pdfium/base/numerics/checked_math_impl.h | 579 +++++++++++ .../pdfium/base/numerics/clamped_math.h | 266 +++++ .../pdfium/base/numerics/clamped_math_impl.h | 343 +++++++ .../pdfium/base/numerics/safe_conversions.h | 360 +++++++ .../base/numerics/safe_conversions_arm_impl.h | 53 + .../base/numerics/safe_conversions_impl.h | 853 ++++++++++++++++ .../pdfium/pdfium/base/numerics/safe_math.h | 12 + .../pdfium/base/numerics/safe_math_arm_impl.h | 124 +++ .../base/numerics/safe_math_clang_gcc_impl.h | 159 +++ .../base/numerics/safe_math_shared_impl.h | 242 +++++ .../3rdparty/pdfium/pdfium/base/optional.h | 958 ++++++++++++++++++ .../3rdparty/pdfium/pdfium/base/ptr_util.h | 22 + .../src/3rdparty/pdfium/pdfium/base/span.h | 352 +++++++ .../3rdparty/pdfium/pdfium/base/stl_util.h | 169 +++ .../pdfium/pdfium/base/sys_byteorder.h | 141 +++ .../pdfium/pdfium/base/template_util.h | 177 ++++ .../pdfium/pdfium/base/test/scoped_locale.cc | 25 + .../pdfium/pdfium/base/test/scoped_locale.h | 30 + .../pdfium/pdfium/build/build_config.h | 15 +- .../pdfium/core/fxcodec/flate/flatemodule.cpp | 8 +- .../pdfium/core/fxcodec/icc/iccmodule.h | 4 - .../pdfium/core/fxcodec/jpeg/jpeg_common.h | 9 - .../pdfium/core/fxcodec/jpx/cjpx_decoder.cpp | 15 +- .../pdfium/core/fxcodec/jpx/cjpx_decoder.h | 6 +- .../core/fxcodec/jpx/jpx_decode_utils.h | 4 - .../pdfium/pdfium/core/fxcrt/fx_extension.h | 4 - .../pdfium/core/fxge/freetype/fx_freetype.cpp | 2 + .../pdfium/fpdfsdk/cpdfsdk_actionhandler.cpp | 3 + .../fpdfsdk/cpdfsdk_formfillenvironment.cpp | 2 + .../fpdfsdk/cpdfsdk_formfillenvironment.h | 4 + .../fpdfsdk/cpdfsdk_interactiveform.cpp | 9 +- .../pdfium/pdfium/fpdfsdk/fpdf_view.cpp | 12 +- 3rdparty/deepin-pdfium/src/dpdfannot.cpp | 4 + 3rdparty/deepin-pdfium/src/dpdfdoc.cpp | 12 +- 3rdparty/deepin-pdfium/src/dpdfglobal.cpp | 4 + 3rdparty/deepin-pdfium/src/dpdfpage.cpp | 8 + CMakeLists.back.in | 114 --- CMakeLists.txt | 93 ++ cmake/translation-generate.cmake | 39 + debian/rules | 23 +- htmltopdf/CMakeLists.txt | 53 + reader/CMakeLists.txt | 117 +++ 99 files changed, 12616 insertions(+), 195 deletions(-) create mode 100644 3rdparty/deepin-pdfium/CMakeLists.txt create mode 100644 3rdparty/deepin-pdfium/deepin-pdfium.pc.in create mode 100644 3rdparty/deepin-pdfium/misc/deepin-pdfiumConfig.cmake.in create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/CMakeLists.txt create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/core.cmake create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/fpdfsdk.cmake create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/fx_agg.cmake create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/fx_base.cmake create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/fx_skia.cmake create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/fxjs.cmake create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/OWNERS create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/address_space_randomization.cc create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/address_space_randomization.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/oom.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/oom_callback.cc create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/oom_callback.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator.cc create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator_constants.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator_internal.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator_internals_posix.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator_internals_win.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_alloc.cc create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_alloc.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_alloc_constants.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_bucket.cc create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_bucket.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_cookie.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_direct_map_extent.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_freelist_entry.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_oom.cc create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_oom.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_page.cc create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_page.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_root_base.cc create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_root_base.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/random.cc create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/random.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/spin_lock.cc create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/spin_lock.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/base_export.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/bits.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/compiler_specific.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/containers/adapters.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/debug/alias.cc create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/debug/alias.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/immediate_crash.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/logging.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/memory/aligned_memory.cc create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/memory/aligned_memory.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/no_destructor.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/OWNERS create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/checked_math.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/checked_math_impl.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/clamped_math.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/clamped_math_impl.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/safe_conversions.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/safe_conversions_arm_impl.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/safe_conversions_impl.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/safe_math.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/safe_math_arm_impl.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/safe_math_clang_gcc_impl.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/safe_math_shared_impl.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/optional.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/ptr_util.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/span.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/stl_util.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/sys_byteorder.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/template_util.h create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/test/scoped_locale.cc create mode 100644 3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/test/scoped_locale.h delete mode 100644 CMakeLists.back.in create mode 100644 CMakeLists.txt create mode 100644 cmake/translation-generate.cmake create mode 100644 htmltopdf/CMakeLists.txt create mode 100644 reader/CMakeLists.txt diff --git a/3rdparty/deepin-pdfium/CMakeLists.txt b/3rdparty/deepin-pdfium/CMakeLists.txt new file mode 100644 index 000000000..c41e6e02b --- /dev/null +++ b/3rdparty/deepin-pdfium/CMakeLists.txt @@ -0,0 +1,153 @@ +cmake_minimum_required(VERSION 3.15) + +set(VERSION "1.0.0" CACHE STRING "PDF rendering library based on PDFium") + +project(deepin-pdfium-reader + VERSION ${VERSION} + DESCRIPTION "PDF rendering library based on PDFium" + LANGUAGES CXX C +) + +# 设置C++标准 +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +set(CMAKE_AUTOMOC ON) +set(CMAKE_AUTORCC ON) +set(CMAKE_AUTOUIC ON) + +# 编译选项 +add_compile_options( + -fstack-protector-strong + -D_FORTITY_SOURCE=1 + -z noexecstack + -pie + -fPIC + -Wno-unused-parameter +) + +# 添加链接选项 +add_link_options( + -z lazy +) + +# 获取系统页大小 +execute_process( + COMMAND getconf PAGESIZE + OUTPUT_VARIABLE SYSTEM_PAGE_SIZE + OUTPUT_STRIP_TRAILING_WHITESPACE +) +add_definitions(-DSYSTEMPAGESIZE=${SYSTEM_PAGE_SIZE}) + +# 查找Qt6依赖包 +find_package(Qt${QT_DESIRED_VERSION} REQUIRED COMPONENTS Core Gui) +find_package(PkgConfig REQUIRED) +pkg_check_modules(DEPS REQUIRED + chardet + lcms2 + freetype2 + libopenjp2 +) + +# PDFium 第三方库 +add_subdirectory(src/3rdparty/pdfium) + +# 定义导出宏 +# add_definitions(-DBUILD_DEEPDF_LIB) + +# 主库目标 +add_library(${PROJECT_NAME} SHARED + include/dpdfglobal.h + include/dpdfdoc.h + include/dpdfpage.h + include/dpdfannot.h + src/dpdfglobal.cpp + src/dpdfdoc.cpp + src/dpdfpage.cpp + src/dpdfannot.cpp +) + +# 设置库的版本信息和 SOVERSION +set_target_properties(${PROJECT_NAME} PROPERTIES + VERSION "${PROJECT_VERSION}" + SOVERSION "${PROJECT_VERSION_MAJOR}" +) + +target_include_directories(${PROJECT_NAME} + PUBLIC + $ + $ + PRIVATE + ${DEPS_INCLUDE_DIRS} +) + +target_link_libraries(${PROJECT_NAME} + PRIVATE + pdfium + Qt${QT_DESIRED_VERSION}::Core + Qt${QT_DESIRED_VERSION}::Gui + ${DEPS_LIBRARIES} + z + jpeg + icuuc +) + +# 安装 +include(GNUInstallDirs) + +install(TARGETS ${PROJECT_NAME} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + +if (EXPORT_DEV_FILES) +install(FILES + include/dpdfglobal.h + include/dpdfdoc.h + include/dpdfpage.h + include/dpdfannot.h + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/deepin-pdfium +) + +# pkg-config +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/deepin-pdfium.pc.in + ${CMAKE_CURRENT_BINARY_DIR}/deepin-pdfium.pc + @ONLY +) + +install( + FILES ${CMAKE_CURRENT_BINARY_DIR}/deepin-pdfium.pc + DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig +) + +include(CMakePackageConfigHelpers) + +set(INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR}/deepin-pdfium) +set(LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR}) + +configure_package_config_file( + misc/deepin-pdfiumConfig.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/deepin-pdfiumConfig.cmake + INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/deepin-pdfium + PATH_VARS INCLUDE_INSTALL_DIR LIB_INSTALL_DIR +) + +install(FILES + ${CMAKE_CURRENT_BINARY_DIR}/deepin-pdfiumConfig.cmake + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/deepin-pdfium +) + +install(TARGETS ${PROJECT_NAME} + EXPORT deepin-pdfiumTargets + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} +) + +install(EXPORT deepin-pdfiumTargets + FILE deepin-pdfiumTargets.cmake + NAMESPACE deepin-pdfium:: + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/deepin-pdfium +) +endif (EXPORT_DEV_FILES) diff --git a/3rdparty/deepin-pdfium/deepin-pdfium.pc.in b/3rdparty/deepin-pdfium/deepin-pdfium.pc.in new file mode 100644 index 000000000..1c5b68c6f --- /dev/null +++ b/3rdparty/deepin-pdfium/deepin-pdfium.pc.in @@ -0,0 +1,10 @@ +prefix=@CMAKE_INSTALL_PREFIX@ +exec_prefix=@CMAKE_INSTALL_PREFIX@ +libdir=${exec_prefix}/@CMAKE_INSTALL_LIBDIR@ +includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@/deepin-pdfium + +Name: deepin-pdfium +Description: PDF rendering library based on PDFium +Version: @PROJECT_VERSION@ +Libs: -L${libdir} -ldeepin-pdfium +Cflags: -I${includedir} \ No newline at end of file diff --git a/3rdparty/deepin-pdfium/include/dpdfannot.h b/3rdparty/deepin-pdfium/include/dpdfannot.h index 261251af3..e56cf1e91 100644 --- a/3rdparty/deepin-pdfium/include/dpdfannot.h +++ b/3rdparty/deepin-pdfium/include/dpdfannot.h @@ -1,3 +1,7 @@ +// SPDX-FileCopyrightText: 2023 UnionTech Software Technology Co., Ltd. +// +// SPDX-License-Identifier: LGPL-3.0-or-later + #ifndef DPDFANNOT_H #define DPDFANNOT_H @@ -8,7 +12,7 @@ #include #include -class DEEPDF_EXPORT DPdfAnnot +class DPdfAnnot { public: enum AnnotType { @@ -48,7 +52,7 @@ class DEEPDF_EXPORT DPdfAnnot QString m_text; }; -class DEEPDF_EXPORT DPdfTextAnnot : public DPdfAnnot +class DPdfTextAnnot : public DPdfAnnot { friend class DPdfPage; friend class DPdfPagePrivate; @@ -65,7 +69,7 @@ class DEEPDF_EXPORT DPdfTextAnnot : public DPdfAnnot QRectF m_rect; }; -class DEEPDF_EXPORT DPdfSquareAnnot : public DPdfAnnot +class DPdfSquareAnnot : public DPdfAnnot { public: DPdfSquareAnnot(); @@ -80,7 +84,7 @@ class DEEPDF_EXPORT DPdfSquareAnnot : public DPdfAnnot QRectF m_rect; }; -class DEEPDF_EXPORT DPdfCIRCLEAnnot : public DPdfAnnot +class DPdfCIRCLEAnnot : public DPdfAnnot { friend class DPdfPage; friend class DPdfPagePrivate; @@ -100,7 +104,7 @@ class DEEPDF_EXPORT DPdfCIRCLEAnnot : public DPdfAnnot QRectF m_rect; }; -class DEEPDF_EXPORT DPdfHightLightAnnot : public DPdfAnnot +class DPdfHightLightAnnot : public DPdfAnnot { friend class DPdfPage; friend class DPdfPagePrivate; @@ -122,7 +126,7 @@ class DEEPDF_EXPORT DPdfHightLightAnnot : public DPdfAnnot QColor m_color; }; -class DEEPDF_EXPORT DPdfLinkAnnot : public DPdfAnnot +class DPdfLinkAnnot : public DPdfAnnot { public: DPdfLinkAnnot(); @@ -171,7 +175,7 @@ class DEEPDF_EXPORT DPdfLinkAnnot : public DPdfAnnot float m_top = 0; }; -class DEEPDF_EXPORT DPdfWidgetAnnot : public DPdfAnnot +class DPdfWidgetAnnot : public DPdfAnnot { public: DPdfWidgetAnnot(); @@ -182,7 +186,7 @@ class DEEPDF_EXPORT DPdfWidgetAnnot : public DPdfAnnot }; -class DEEPDF_EXPORT DPdfUnknownAnnot : public DPdfAnnot +class DPdfUnknownAnnot : public DPdfAnnot { public: DPdfUnknownAnnot(); @@ -193,7 +197,7 @@ class DEEPDF_EXPORT DPdfUnknownAnnot : public DPdfAnnot }; -class DEEPDF_EXPORT DPdfUnderlineAnnot : public DPdfAnnot +class DPdfUnderlineAnnot : public DPdfAnnot { public: DPdfUnderlineAnnot(); diff --git a/3rdparty/deepin-pdfium/include/dpdfdoc.h b/3rdparty/deepin-pdfium/include/dpdfdoc.h index 22704696c..2aeae04be 100755 --- a/3rdparty/deepin-pdfium/include/dpdfdoc.h +++ b/3rdparty/deepin-pdfium/include/dpdfdoc.h @@ -1,3 +1,7 @@ +// SPDX-FileCopyrightText: 2023 UnionTech Software Technology Co., Ltd. +// +// SPDX-License-Identifier: LGPL-3.0-or-later + #ifndef DPDFDOC_H #define DPDFDOC_H @@ -13,7 +17,7 @@ class DPdfPage; class DPdfDocHandler; class DPdfDocPrivate; -class DEEPDF_EXPORT DPdfDoc : public QObject +class DPdfDoc : public QObject { Q_OBJECT Q_DECLARE_PRIVATE(DPdfDoc) diff --git a/3rdparty/deepin-pdfium/include/dpdfglobal.h b/3rdparty/deepin-pdfium/include/dpdfglobal.h index bbc6c2b55..7d37fe1a0 100755 --- a/3rdparty/deepin-pdfium/include/dpdfglobal.h +++ b/3rdparty/deepin-pdfium/include/dpdfglobal.h @@ -1,7 +1,12 @@ +// SPDX-FileCopyrightText: 2023 UnionTech Software Technology Co., Ltd. +// +// SPDX-License-Identifier: LGPL-3.0-or-later + #ifndef DPDFGLOBAL_H #define DPDFGLOBAL_H #include +#include #include #include #include diff --git a/3rdparty/deepin-pdfium/include/dpdfpage.h b/3rdparty/deepin-pdfium/include/dpdfpage.h index 045db1f20..fccb585bc 100755 --- a/3rdparty/deepin-pdfium/include/dpdfpage.h +++ b/3rdparty/deepin-pdfium/include/dpdfpage.h @@ -1,3 +1,7 @@ +// SPDX-FileCopyrightText: 2023 UnionTech Software Technology Co., Ltd. +// +// SPDX-License-Identifier: LGPL-3.0-or-later + #ifndef DPDFPAGE_H #define DPDFPAGE_H @@ -10,7 +14,7 @@ class DPdfAnnot; class DPdfPagePrivate; class DPdfDocHandler; -class DEEPDF_EXPORT DPdfPage : public QObject +class DPdfPage : public QObject { Q_OBJECT Q_DECLARE_PRIVATE(DPdfPage) @@ -178,19 +182,19 @@ class DEEPDF_EXPORT DPdfPage : public QObject * @brief 添加注释时触发 ,在需要的时候可以重新获取annotations() * @param annot 新增加的annot */ - void annotAdded(DPdfAnnot *dAnnot); + void annotAdded(DPdfAnnot *annot); /** * @brief 注释被更新时触发 ,在需要的时候可以重新获取annotations() * @param annot 被更新的annot */ - void annotUpdated(DPdfAnnot *dAnnot); + void annotUpdated(DPdfAnnot *annot); /** * @brief 注释被删除时触发 ,在需要的时候可以重新获取annotations() * @param annot 被移除的annot 注意这个已经是个将要被析构后的地址 只用于做匹配移除 */ - void annotRemoved(DPdfAnnot *dAnnot); + void annotRemoved(DPdfAnnot *annot); private: DPdfPage(DPdfDocHandler *handler, int pageIndex, qreal xRes = 72, qreal yRes = 72); diff --git a/3rdparty/deepin-pdfium/misc/deepin-pdfiumConfig.cmake.in b/3rdparty/deepin-pdfium/misc/deepin-pdfiumConfig.cmake.in new file mode 100644 index 000000000..f5665fc75 --- /dev/null +++ b/3rdparty/deepin-pdfium/misc/deepin-pdfiumConfig.cmake.in @@ -0,0 +1,11 @@ +@PACKAGE_INIT@ + +set_and_check(deepin-pdfium_INCLUDE_DIR "@PACKAGE_INCLUDE_INSTALL_DIR@") +set_and_check(deepin-pdfium_LIBRARY "@PACKAGE_LIB_INSTALL_DIR@") + +check_required_components(deepin-pdfium) + +include(CMakeFindDependencyMacro) +find_dependency(Qt@QT_VERSION_MAJOR@ COMPONENTS Core Gui REQUIRED) + +include("${CMAKE_CURRENT_LIST_DIR}/deepin-pdfiumTargets.cmake") \ No newline at end of file diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/CMakeLists.txt b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/CMakeLists.txt new file mode 100644 index 000000000..3e5dbc769 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/CMakeLists.txt @@ -0,0 +1,152 @@ +add_library(pdfium STATIC) + +# # 基本定义 +# target_compile_definitions(pdfium +# PRIVATE +# # thrid_party 定义 +# # DEFINE_PS_TABLES_DATA +# # FT_CONFIG_OPTION_ADOBE_GLYPH_LIST +# # 系统库定义 +# USE_SYSTEM_LIBPNG +# USE_SYSTEM_ICUUC +# USE_SYSTEM_LCMS2 +# USE_SYSTEM_LIBOPENJPEG2 +# USE_SYSTEM_FREETYPE +# ) +target_compile_definitions(pdfium + PRIVATE + # thrid_party 定义 + DEFINE_PS_TABLES_DATA +) + +# 架构相关定义 +if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") + target_compile_definitions(pdfium PRIVATE + _FX_CPU_=_FX_X64_ + ARCH_CPU_ARM64 + ) + target_compile_options(pdfium PRIVATE -fPIC) +elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "mips64") + target_compile_definitions(pdfium PRIVATE _MIPS_ARCH_LOONGSON) + target_compile_options(pdfium PRIVATE + -O3 + -ftree-vectorize + -march=loongson3a + -mhard-float + -mno-micromips + -mno-mips16 + -flax-vector-conversions + -mloongson-ext2 + -mloongson-mmi + ) +elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "loongarch64") + target_compile_definitions(pdfium PRIVATE _MIPS_ARCH_LOONGSON) +elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "sw_64|sw64") + target_compile_definitions(pdfium PRIVATE ARCH_CPU_SW64) + target_compile_options(pdfium PRIVATE -fPIC) +endif() + +file(GLOB_RECURSE PUB_HDR + ${CMAKE_CURRENT_SOURCE_DIR}/pdfium/public/*.h +) + +# fpdfsdk +file(GLOB_RECURSE SDK_HDR + ${CMAKE_CURRENT_SOURCE_DIR}/pdfium/fpdfsdk/*.h +) +file(GLOB_RECURSE SDK_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/pdfium/fpdfsdk/*.cpp +) + +# core +file(GLOB_RECURSE CORE_HDR + ${CMAKE_CURRENT_SOURCE_DIR}/pdfium/core/*.h +) +file(GLOB_RECURSE CORE_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/pdfium/core/*.cpp +) + +# # 源文件 +# target_sources(pdfium +# PRIVATE +# # Public headers +# ${PUB_HDRS} +# ) + + +# # 源文件 +# target_sources(pdfium +# PRIVATE +# # Public headers +# pdfium/public/cpp/fpdf_deleters.h +# pdfium/public/cpp/fpdf_scopers.h +# pdfium/public/fpdf_annot.h +# pdfium/public/fpdf_attachment.h +# pdfium/public/fpdf_catalog.h +# pdfium/public/fpdf_dataavail.h +# pdfium/public/fpdf_doc.h +# pdfium/public/fpdf_edit.h +# pdfium/public/fpdf_ext.h +# pdfium/public/fpdf_flatten.h +# pdfium/public/fpdf_formfill.h +# pdfium/public/fpdf_fwlevent.h +# pdfium/public/fpdf_javascript.h +# pdfium/public/fpdf_ppo.h +# pdfium/public/fpdf_progressive.h +# pdfium/public/fpdf_save.h +# pdfium/public/fpdf_searchex.h +# pdfium/public/fpdf_signature.h +# pdfium/public/fpdf_structtree.h +# pdfium/public/fpdf_sysfontinfo.h +# pdfium/public/fpdf_text.h +# pdfium/public/fpdf_thumbnail.h +# pdfium/public/fpdf_transformpage.h +# pdfium/public/fpdfview.h +# ) + +# 包含子模块 +#include(fx_freetype.cmake) +include(fpdfsdk.cmake) +include(core.cmake) +#include(fx_libopenjpeg.cmake) +include(fx_agg.cmake) +# include(fxjs.cmake) +# #include(fx_lcms2.cmake) +include(fx_skia.cmake) +include(fx_base.cmake) + +# target_include_directories(pdfium +# PUBLIC +# ${CMAKE_CURRENT_SOURCE_DIR}/pdfium +# ${CMAKE_CURRENT_SOURCE_DIR}/pdfium/third_party/freetype/include +# PRIVATE +# ${CMAKE_CURRENT_SOURCE_DIR}/pdfium/third_party +# ) + +target_include_directories(pdfium + PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}/pdfium +) + +# 查找系统依赖 +find_package(PkgConfig REQUIRED) +pkg_check_modules(PDFIUM_DEPS REQUIRED + libopenjp2 + lcms2 + freetype2 + zlib + libpng + libjpeg +) + +# 链接系统依赖 +target_link_libraries(pdfium + PRIVATE + ${PDFIUM_DEPS_LIBRARIES} + icuuc +) + +target_include_directories(pdfium + PRIVATE + ${PDFIUM_DEPS_INCLUDE_DIRS} +) \ No newline at end of file diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/core.cmake b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/core.cmake new file mode 100644 index 000000000..ce7649199 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/core.cmake @@ -0,0 +1,633 @@ +target_sources(pdfium + PRIVATE + # Core files + pdfium/core/fdrm/fx_crypt.h + pdfium/core/fpdfapi/cmaps/CNS1/cmaps_cns1.h + pdfium/core/fpdfapi/cmaps/GB1/cmaps_gb1.h + pdfium/core/fpdfapi/cmaps/Japan1/cmaps_japan1.h + pdfium/core/fpdfapi/cmaps/Korea1/cmaps_korea1.h + pdfium/core/fpdfapi/cmaps/fpdf_cmaps.h + pdfium/core/fpdfapi/edit/cpdf_contentstream_write_utils.h + pdfium/core/fpdfapi/edit/cpdf_creator.h + pdfium/core/fpdfapi/edit/cpdf_pagecontentgenerator.h + pdfium/core/fpdfapi/edit/cpdf_pagecontentmanager.h + pdfium/core/fpdfapi/edit/cpdf_stringarchivestream.h + pdfium/core/fpdfapi/font/cfx_cttgsubtable.h + pdfium/core/fpdfapi/font/cfx_stockfontarray.h + pdfium/core/fpdfapi/font/cpdf_cid2unicodemap.h + pdfium/core/fpdfapi/font/cpdf_cidfont.h + pdfium/core/fpdfapi/font/cpdf_cmap.h + pdfium/core/fpdfapi/font/cpdf_cmapmanager.h + pdfium/core/fpdfapi/font/cpdf_cmapparser.h + pdfium/core/fpdfapi/font/cpdf_font.h + pdfium/core/fpdfapi/font/cpdf_fontencoding.h + pdfium/core/fpdfapi/font/cpdf_fontglobals.h + pdfium/core/fpdfapi/font/cpdf_simplefont.h + pdfium/core/fpdfapi/font/cpdf_tounicodemap.h + pdfium/core/fpdfapi/font/cpdf_truetypefont.h + pdfium/core/fpdfapi/font/cpdf_type1font.h + pdfium/core/fpdfapi/font/cpdf_type3char.h + pdfium/core/fpdfapi/font/cpdf_type3font.h + pdfium/core/fpdfapi/page/cpdf_allstates.h + pdfium/core/fpdfapi/page/cpdf_annotcontext.h + pdfium/core/fpdfapi/page/cpdf_clippath.h + pdfium/core/fpdfapi/page/cpdf_color.h + pdfium/core/fpdfapi/page/cpdf_colorspace.h + pdfium/core/fpdfapi/page/cpdf_colorstate.h + pdfium/core/fpdfapi/page/cpdf_contentmarkitem.h + pdfium/core/fpdfapi/page/cpdf_contentmarks.h + pdfium/core/fpdfapi/page/cpdf_contentparser.h + pdfium/core/fpdfapi/page/cpdf_devicecs.h + pdfium/core/fpdfapi/page/cpdf_dib.h + pdfium/core/fpdfapi/page/cpdf_docpagedata.h + pdfium/core/fpdfapi/page/cpdf_expintfunc.h + pdfium/core/fpdfapi/page/cpdf_form.h + pdfium/core/fpdfapi/page/cpdf_formobject.h + pdfium/core/fpdfapi/page/cpdf_function.h + pdfium/core/fpdfapi/page/cpdf_generalstate.h + pdfium/core/fpdfapi/page/cpdf_graphicstates.h + pdfium/core/fpdfapi/page/cpdf_iccprofile.h + pdfium/core/fpdfapi/page/cpdf_image.h + pdfium/core/fpdfapi/page/cpdf_imageobject.h + pdfium/core/fpdfapi/page/cpdf_meshstream.h + pdfium/core/fpdfapi/page/cpdf_occontext.h + pdfium/core/fpdfapi/page/cpdf_page.h + pdfium/core/fpdfapi/page/cpdf_pagemodule.h + pdfium/core/fpdfapi/page/cpdf_pageobject.h + pdfium/core/fpdfapi/page/cpdf_pageobjectholder.h + pdfium/core/fpdfapi/page/cpdf_path.h + pdfium/core/fpdfapi/page/cpdf_pathobject.h + pdfium/core/fpdfapi/page/cpdf_pattern.h + pdfium/core/fpdfapi/page/cpdf_patterncs.h + pdfium/core/fpdfapi/page/cpdf_psengine.h + pdfium/core/fpdfapi/page/cpdf_psfunc.h + pdfium/core/fpdfapi/page/cpdf_sampledfunc.h + pdfium/core/fpdfapi/page/cpdf_shadingobject.h + pdfium/core/fpdfapi/page/cpdf_shadingpattern.h + pdfium/core/fpdfapi/page/cpdf_stitchfunc.h + pdfium/core/fpdfapi/page/cpdf_streamcontentparser.h + pdfium/core/fpdfapi/page/cpdf_streamparser.h + pdfium/core/fpdfapi/page/cpdf_textobject.h + pdfium/core/fpdfapi/page/cpdf_textstate.h + pdfium/core/fpdfapi/page/cpdf_tilingpattern.h + pdfium/core/fpdfapi/page/cpdf_transferfunc.h + pdfium/core/fpdfapi/page/cpdf_transferfuncdib.h + pdfium/core/fpdfapi/page/cpdf_transparency.h + pdfium/core/fpdfapi/page/ipdf_page.h + pdfium/core/fpdfapi/parser/cfdf_document.h + pdfium/core/fpdfapi/parser/cpdf_array.h + pdfium/core/fpdfapi/parser/cpdf_boolean.h + pdfium/core/fpdfapi/parser/cpdf_cross_ref_avail.h + pdfium/core/fpdfapi/parser/cpdf_cross_ref_table.h + pdfium/core/fpdfapi/parser/cpdf_crypto_handler.h + pdfium/core/fpdfapi/parser/cpdf_data_avail.h + pdfium/core/fpdfapi/parser/cpdf_dictionary.h + pdfium/core/fpdfapi/parser/cpdf_document.h + pdfium/core/fpdfapi/parser/cpdf_encryptor.h + pdfium/core/fpdfapi/parser/cpdf_flateencoder.h + pdfium/core/fpdfapi/parser/cpdf_hint_tables.h + pdfium/core/fpdfapi/parser/cpdf_indirect_object_holder.h + pdfium/core/fpdfapi/parser/cpdf_linearized_header.h + pdfium/core/fpdfapi/parser/cpdf_name.h + pdfium/core/fpdfapi/parser/cpdf_null.h + pdfium/core/fpdfapi/parser/cpdf_number.h + pdfium/core/fpdfapi/parser/cpdf_object.h + pdfium/core/fpdfapi/parser/cpdf_object_avail.h + pdfium/core/fpdfapi/parser/cpdf_object_stream.h + pdfium/core/fpdfapi/parser/cpdf_object_walker.h + pdfium/core/fpdfapi/parser/cpdf_page_object_avail.h + pdfium/core/fpdfapi/parser/cpdf_parser.h + pdfium/core/fpdfapi/parser/cpdf_read_validator.h + pdfium/core/fpdfapi/parser/cpdf_reference.h + pdfium/core/fpdfapi/parser/cpdf_security_handler.h + pdfium/core/fpdfapi/parser/cpdf_simple_parser.h + pdfium/core/fpdfapi/parser/cpdf_stream.h + pdfium/core/fpdfapi/parser/cpdf_stream_acc.h + pdfium/core/fpdfapi/parser/cpdf_string.h + pdfium/core/fpdfapi/parser/cpdf_syntax_parser.h + pdfium/core/fpdfapi/parser/fpdf_parser_decode.h + pdfium/core/fpdfapi/parser/fpdf_parser_utility.h + pdfium/core/fpdfapi/render/charposlist.h + pdfium/core/fpdfapi/render/cpdf_devicebuffer.h + pdfium/core/fpdfapi/render/cpdf_docrenderdata.h + pdfium/core/fpdfapi/render/cpdf_imagecacheentry.h + pdfium/core/fpdfapi/render/cpdf_imageloader.h + pdfium/core/fpdfapi/render/cpdf_imagerenderer.h + pdfium/core/fpdfapi/render/cpdf_pagerendercache.h + pdfium/core/fpdfapi/render/cpdf_pagerendercontext.h + pdfium/core/fpdfapi/render/cpdf_progressiverenderer.h + pdfium/core/fpdfapi/render/cpdf_rendercontext.h + pdfium/core/fpdfapi/render/cpdf_renderoptions.h + pdfium/core/fpdfapi/render/cpdf_rendershading.h + pdfium/core/fpdfapi/render/cpdf_renderstatus.h + pdfium/core/fpdfapi/render/cpdf_rendertiling.h + pdfium/core/fpdfapi/render/cpdf_scaledrenderbuffer.h + pdfium/core/fpdfapi/render/cpdf_textrenderer.h + pdfium/core/fpdfapi/render/cpdf_type3cache.h + pdfium/core/fpdfapi/render/cpdf_type3glyphmap.h + pdfium/core/fpdfdoc/cba_fontmap.h + pdfium/core/fpdfdoc/cline.h + pdfium/core/fpdfdoc/cpdf_aaction.h + pdfium/core/fpdfdoc/cpdf_action.h + pdfium/core/fpdfdoc/cpdf_annot.h + pdfium/core/fpdfdoc/cpdf_annotlist.h + pdfium/core/fpdfdoc/cpdf_apsettings.h + pdfium/core/fpdfdoc/cpdf_bookmark.h + pdfium/core/fpdfdoc/cpdf_bookmarktree.h + pdfium/core/fpdfdoc/cpdf_color_utils.h + pdfium/core/fpdfdoc/cpdf_defaultappearance.h + pdfium/core/fpdfdoc/cpdf_dest.h + pdfium/core/fpdfdoc/cpdf_filespec.h + pdfium/core/fpdfdoc/cpdf_formcontrol.h + pdfium/core/fpdfdoc/cpdf_formfield.h + pdfium/core/fpdfdoc/cpdf_icon.h + pdfium/core/fpdfdoc/cpdf_iconfit.h + pdfium/core/fpdfdoc/cpdf_interactiveform.h + pdfium/core/fpdfdoc/cpdf_link.h + pdfium/core/fpdfdoc/cpdf_linklist.h + pdfium/core/fpdfdoc/cpdf_metadata.h + pdfium/core/fpdfdoc/cpdf_nametree.h + pdfium/core/fpdfdoc/cpdf_numbertree.h + pdfium/core/fpdfdoc/cpdf_pagelabel.h + pdfium/core/fpdfdoc/cpdf_structelement.h + pdfium/core/fpdfdoc/cpdf_structtree.h + pdfium/core/fpdfdoc/cpdf_variabletext.h + pdfium/core/fpdfdoc/cpdf_viewerpreferences.h + pdfium/core/fpdfdoc/cpvt_floatrect.h + pdfium/core/fpdfdoc/cpvt_fontmap.h + pdfium/core/fpdfdoc/cpvt_generateap.h + pdfium/core/fpdfdoc/cpvt_line.h + pdfium/core/fpdfdoc/cpvt_lineinfo.h + pdfium/core/fpdfdoc/cpvt_word.h + pdfium/core/fpdfdoc/cpvt_wordinfo.h + pdfium/core/fpdfdoc/cpvt_wordplace.h + pdfium/core/fpdfdoc/cpvt_wordrange.h + pdfium/core/fpdfdoc/csection.h + pdfium/core/fpdfdoc/ctypeset.h + pdfium/core/fpdfdoc/ipvt_fontmap.h + pdfium/core/fpdftext/cpdf_linkextract.h + pdfium/core/fpdftext/cpdf_textpage.h + pdfium/core/fpdftext/cpdf_textpagefind.h + pdfium/core/fpdftext/unicodenormalizationdata.h + pdfium/core/fxcodec/basic/basicmodule.h + pdfium/core/fxcodec/cfx_codec_memory.h + pdfium/core/fxcodec/fax/faxmodule.h + pdfium/core/fxcodec/flate/flatemodule.h + pdfium/core/fxcodec/fx_codec.h + pdfium/core/fxcodec/fx_codec_def.h + pdfium/core/fxcodec/icc/iccmodule.h + pdfium/core/fxcodec/jbig2/JBig2_ArithDecoder.h + pdfium/core/fxcodec/jbig2/JBig2_ArithIntDecoder.h + pdfium/core/fxcodec/jbig2/JBig2_BitStream.h + pdfium/core/fxcodec/jbig2/JBig2_Context.h + pdfium/core/fxcodec/jbig2/JBig2_Define.h + pdfium/core/fxcodec/jbig2/JBig2_DocumentContext.h + pdfium/core/fxcodec/jbig2/JBig2_GrdProc.h + pdfium/core/fxcodec/jbig2/JBig2_GrrdProc.h + pdfium/core/fxcodec/jbig2/JBig2_HtrdProc.h + pdfium/core/fxcodec/jbig2/JBig2_HuffmanDecoder.h + pdfium/core/fxcodec/jbig2/JBig2_HuffmanTable.h + pdfium/core/fxcodec/jbig2/JBig2_Image.h + pdfium/core/fxcodec/jbig2/JBig2_Page.h + pdfium/core/fxcodec/jbig2/JBig2_PatternDict.h + pdfium/core/fxcodec/jbig2/JBig2_PddProc.h + pdfium/core/fxcodec/jbig2/JBig2_SddProc.h + pdfium/core/fxcodec/jbig2/JBig2_Segment.h + pdfium/core/fxcodec/jbig2/JBig2_SymbolDict.h + pdfium/core/fxcodec/jbig2/JBig2_TrdProc.h + pdfium/core/fxcodec/jbig2/jbig2_decoder.h + pdfium/core/fxcodec/jpeg/jpeg_common.h + pdfium/core/fxcodec/jpeg/jpegmodule.h + pdfium/core/fxcodec/jpx/cjpx_decoder.h + pdfium/core/fxcodec/jpx/jpx_decode_utils.h + pdfium/core/fxcodec/scanlinedecoder.h + pdfium/core/fxcrt/xml/cfx_xmlchardata.h + pdfium/core/fxcrt/xml/cfx_xmldocument.h + pdfium/core/fxcrt/xml/cfx_xmlelement.h + pdfium/core/fxcrt/xml/cfx_xmlinstruction.h + pdfium/core/fxcrt/xml/cfx_xmlnode.h + pdfium/core/fxcrt/xml/cfx_xmlparser.h + pdfium/core/fxcrt/xml/cfx_xmltext.h + pdfium/core/fxcrt/autonuller.h + pdfium/core/fxcrt/autorestorer.h + pdfium/core/fxcrt/byteorder.h + pdfium/core/fxcrt/bytestring.h + pdfium/core/fxcrt/cfx_binarybuf.h + pdfium/core/fxcrt/cfx_bitstream.h + pdfium/core/fxcrt/cfx_datetime.h + pdfium/core/fxcrt/cfx_fixedbufgrow.h + pdfium/core/fxcrt/cfx_readonlymemorystream.h + pdfium/core/fxcrt/cfx_seekablestreamproxy.h + pdfium/core/fxcrt/cfx_timer.h + pdfium/core/fxcrt/cfx_utf8decoder.h + pdfium/core/fxcrt/cfx_utf8encoder.h + pdfium/core/fxcrt/cfx_widetextbuf.h + pdfium/core/fxcrt/fileaccess_iface.h + pdfium/core/fxcrt/fx_bidi.h + pdfium/core/fxcrt/fx_codepage.h + pdfium/core/fxcrt/fx_coordinates.h + pdfium/core/fxcrt/fx_extension.h + pdfium/core/fxcrt/fx_memory.h + pdfium/core/fxcrt/fx_memory_wrappers.h + pdfium/core/fxcrt/fx_number.h + pdfium/core/fxcrt/fx_random.h + pdfium/core/fxcrt/fx_safe_types.h + pdfium/core/fxcrt/fx_stream.h + pdfium/core/fxcrt/fx_string.h + pdfium/core/fxcrt/fx_system.h + pdfium/core/fxcrt/fx_unicode.h + pdfium/core/fxcrt/maybe_owned.h + pdfium/core/fxcrt/observed_ptr.h + pdfium/core/fxcrt/pauseindicator_iface.h + pdfium/core/fxcrt/retain_ptr.h + pdfium/core/fxcrt/retained_tree_node.h + pdfium/core/fxcrt/shared_copy_on_write.h + pdfium/core/fxcrt/string_data_template.h + pdfium/core/fxcrt/string_pool_template.h + pdfium/core/fxcrt/string_view_template.h + pdfium/core/fxcrt/timerhandler_iface.h + pdfium/core/fxcrt/tree_node.h + pdfium/core/fxcrt/unowned_ptr.h + pdfium/core/fxcrt/weak_ptr.h + pdfium/core/fxcrt/widestring.h + pdfium/core/fxge/dib/cfx_bitmapcomposer.h + pdfium/core/fxge/dib/cfx_bitmapstorer.h + pdfium/core/fxge/dib/cfx_cmyk_to_srgb.h + pdfium/core/fxge/dib/cfx_dibbase.h + pdfium/core/fxge/dib/cfx_dibitmap.h + pdfium/core/fxge/dib/cfx_imagerenderer.h + pdfium/core/fxge/dib/cfx_imagestretcher.h + pdfium/core/fxge/dib/cfx_imagetransformer.h + pdfium/core/fxge/dib/cfx_scanlinecompositor.h + pdfium/core/fxge/dib/cstretchengine.h + pdfium/core/fxge/dib/scanlinecomposer_iface.h + pdfium/core/fxge/fontdata/chromefontdata/chromefontdata.h + pdfium/core/fxge/cfx_cliprgn.h + pdfium/core/fxge/cfx_color.h + pdfium/core/fxge/cfx_defaultrenderdevice.h + pdfium/core/fxge/cfx_drawutils.h + pdfium/core/fxge/cfx_face.h + pdfium/core/fxge/cfx_fillrenderoptions.h + pdfium/core/fxge/cfx_folderfontinfo.h + pdfium/core/fxge/cfx_font.h + pdfium/core/fxge/cfx_fontcache.h + pdfium/core/fxge/cfx_fontmapper.h + pdfium/core/fxge/cfx_fontmgr.h + pdfium/core/fxge/cfx_gemodule.h + pdfium/core/fxge/cfx_glyphbitmap.h + pdfium/core/fxge/cfx_glyphcache.h + pdfium/core/fxge/cfx_graphstate.h + pdfium/core/fxge/cfx_graphstatedata.h + pdfium/core/fxge/cfx_pathdata.h + pdfium/core/fxge/cfx_renderdevice.h + pdfium/core/fxge/cfx_substfont.h + pdfium/core/fxge/cfx_textrenderoptions.h + pdfium/core/fxge/cfx_unicodeencoding.h + pdfium/core/fxge/fx_dib.h + pdfium/core/fxge/fx_font.h + pdfium/core/fxge/fx_freetype.h + pdfium/core/fxge/render_defines.h + pdfium/core/fxge/renderdevicedriver_iface.h + pdfium/core/fxge/scoped_font_transform.h + pdfium/core/fxge/systemfontinfo_iface.h + pdfium/core/fxge/text_char_pos.h + pdfium/core/fxge/text_glyph_pos.h + pdfium/core/fxge/agg/fx_agg_driver.h + pdfium/core/fxcrt/cfx_fileaccess_posix.h + + # Source files + pdfium/core/fxcrt/cfx_fileaccess_posix.cpp + pdfium/core/fdrm/fx_crypt.cpp + pdfium/core/fdrm/fx_crypt_aes.cpp + pdfium/core/fdrm/fx_crypt_sha.cpp + pdfium/core/fpdfapi/cmaps/CNS1/Adobe-CNS1-UCS2_5.cpp + pdfium/core/fpdfapi/cmaps/CNS1/B5pc-H_0.cpp + pdfium/core/fpdfapi/cmaps/CNS1/B5pc-V_0.cpp + pdfium/core/fpdfapi/cmaps/CNS1/cmaps_cns1.cpp + pdfium/core/fpdfapi/cmaps/CNS1/CNS-EUC-H_0.cpp + pdfium/core/fpdfapi/cmaps/CNS1/CNS-EUC-V_0.cpp + pdfium/core/fpdfapi/cmaps/CNS1/ETen-B5-H_0.cpp + pdfium/core/fpdfapi/cmaps/CNS1/ETen-B5-V_0.cpp + pdfium/core/fpdfapi/cmaps/CNS1/ETenms-B5-H_0.cpp + pdfium/core/fpdfapi/cmaps/CNS1/ETenms-B5-V_0.cpp + pdfium/core/fpdfapi/cmaps/CNS1/HKscs-B5-H_5.cpp + pdfium/core/fpdfapi/cmaps/CNS1/HKscs-B5-V_5.cpp + pdfium/core/fpdfapi/cmaps/CNS1/UniCNS-UCS2-H_3.cpp + pdfium/core/fpdfapi/cmaps/CNS1/UniCNS-UCS2-V_3.cpp + pdfium/core/fpdfapi/cmaps/CNS1/UniCNS-UTF16-H_0.cpp + pdfium/core/fpdfapi/cmaps/GB1/Adobe-GB1-UCS2_5.cpp + pdfium/core/fpdfapi/cmaps/GB1/cmaps_gb1.cpp + pdfium/core/fpdfapi/cmaps/GB1/GB-EUC-H_0.cpp + pdfium/core/fpdfapi/cmaps/GB1/GB-EUC-V_0.cpp + pdfium/core/fpdfapi/cmaps/GB1/GBK-EUC-H_2.cpp + pdfium/core/fpdfapi/cmaps/GB1/GBK-EUC-V_2.cpp + pdfium/core/fpdfapi/cmaps/GB1/GBK2K-H_5.cpp + pdfium/core/fpdfapi/cmaps/GB1/GBK2K-V_5.cpp + pdfium/core/fpdfapi/cmaps/GB1/GBKp-EUC-H_2.cpp + pdfium/core/fpdfapi/cmaps/GB1/GBKp-EUC-V_2.cpp + pdfium/core/fpdfapi/cmaps/GB1/GBpc-EUC-H_0.cpp + pdfium/core/fpdfapi/cmaps/GB1/GBpc-EUC-V_0.cpp + pdfium/core/fpdfapi/cmaps/GB1/UniGB-UCS2-H_4.cpp + pdfium/core/fpdfapi/cmaps/GB1/UniGB-UCS2-V_4.cpp + pdfium/core/fpdfapi/cmaps/Japan1/83pv-RKSJ-H_1.cpp + pdfium/core/fpdfapi/cmaps/Japan1/90ms-RKSJ-H_2.cpp + pdfium/core/fpdfapi/cmaps/Japan1/90ms-RKSJ-V_2.cpp + pdfium/core/fpdfapi/cmaps/Japan1/90msp-RKSJ-H_2.cpp + pdfium/core/fpdfapi/cmaps/Japan1/90msp-RKSJ-V_2.cpp + pdfium/core/fpdfapi/cmaps/Japan1/90pv-RKSJ-H_1.cpp + pdfium/core/fpdfapi/cmaps/Japan1/Add-RKSJ-H_1.cpp + pdfium/core/fpdfapi/cmaps/Japan1/Add-RKSJ-V_1.cpp + pdfium/core/fpdfapi/cmaps/Japan1/Adobe-Japan1-UCS2_4.cpp + pdfium/core/fpdfapi/cmaps/Japan1/cmaps_japan1.cpp + pdfium/core/fpdfapi/cmaps/Japan1/EUC-H_1.cpp + pdfium/core/fpdfapi/cmaps/Japan1/EUC-V_1.cpp + pdfium/core/fpdfapi/cmaps/Japan1/Ext-RKSJ-H_2.cpp + pdfium/core/fpdfapi/cmaps/Japan1/Ext-RKSJ-V_2.cpp + pdfium/core/fpdfapi/cmaps/Japan1/H_1.cpp + pdfium/core/fpdfapi/cmaps/Japan1/UniJIS-UCS2-H_4.cpp + pdfium/core/fpdfapi/cmaps/Japan1/UniJIS-UCS2-HW-H_4.cpp + pdfium/core/fpdfapi/cmaps/Japan1/UniJIS-UCS2-HW-V_4.cpp + pdfium/core/fpdfapi/cmaps/Japan1/UniJIS-UCS2-V_4.cpp + pdfium/core/fpdfapi/cmaps/Japan1/V_1.cpp + pdfium/core/fpdfapi/cmaps/Korea1/Adobe-Korea1-UCS2_2.cpp + pdfium/core/fpdfapi/cmaps/Korea1/cmaps_korea1.cpp + pdfium/core/fpdfapi/cmaps/Korea1/KSC-EUC-H_0.cpp + pdfium/core/fpdfapi/cmaps/Korea1/KSC-EUC-V_0.cpp + pdfium/core/fpdfapi/cmaps/Korea1/KSCms-UHC-H_1.cpp + pdfium/core/fpdfapi/cmaps/Korea1/KSCms-UHC-HW-H_1.cpp + pdfium/core/fpdfapi/cmaps/Korea1/KSCms-UHC-HW-V_1.cpp + pdfium/core/fpdfapi/cmaps/Korea1/KSCms-UHC-V_1.cpp + pdfium/core/fpdfapi/cmaps/Korea1/KSCpc-EUC-H_0.cpp + pdfium/core/fpdfapi/cmaps/Korea1/UniKS-UCS2-H_1.cpp + pdfium/core/fpdfapi/cmaps/Korea1/UniKS-UCS2-V_1.cpp + pdfium/core/fpdfapi/cmaps/Korea1/UniKS-UTF16-H_0.cpp + pdfium/core/fpdfapi/cmaps/fpdf_cmaps.cpp + pdfium/core/fpdfapi/edit/cpdf_contentstream_write_utils.cpp + pdfium/core/fpdfapi/edit/cpdf_creator.cpp + pdfium/core/fpdfapi/edit/cpdf_pagecontentgenerator.cpp + pdfium/core/fpdfapi/edit/cpdf_pagecontentmanager.cpp + pdfium/core/fpdfapi/edit/cpdf_stringarchivestream.cpp + pdfium/core/fpdfapi/font/cfx_cttgsubtable.cpp + pdfium/core/fpdfapi/font/cfx_stockfontarray.cpp + pdfium/core/fpdfapi/font/cpdf_cid2unicodemap.cpp + pdfium/core/fpdfapi/font/cpdf_cidfont.cpp + pdfium/core/fpdfapi/font/cpdf_cmap.cpp + pdfium/core/fpdfapi/font/cpdf_cmapmanager.cpp + pdfium/core/fpdfapi/font/cpdf_cmapparser.cpp + pdfium/core/fpdfapi/font/cpdf_font.cpp + pdfium/core/fpdfapi/font/cpdf_fontencoding.cpp + pdfium/core/fpdfapi/font/cpdf_fontglobals.cpp + pdfium/core/fpdfapi/font/cpdf_simplefont.cpp + pdfium/core/fpdfapi/font/cpdf_tounicodemap.cpp + pdfium/core/fpdfapi/font/cpdf_truetypefont.cpp + pdfium/core/fpdfapi/font/cpdf_type1font.cpp + pdfium/core/fpdfapi/font/cpdf_type3char.cpp + pdfium/core/fpdfapi/font/cpdf_type3font.cpp + pdfium/core/fpdfapi/page/cpdf_allstates.cpp + pdfium/core/fpdfapi/page/cpdf_annotcontext.cpp + pdfium/core/fpdfapi/page/cpdf_clippath.cpp + pdfium/core/fpdfapi/page/cpdf_color.cpp + pdfium/core/fpdfapi/page/cpdf_colorspace.cpp + pdfium/core/fpdfapi/page/cpdf_colorstate.cpp + pdfium/core/fpdfapi/page/cpdf_contentmarkitem.cpp + pdfium/core/fpdfapi/page/cpdf_contentmarks.cpp + pdfium/core/fpdfapi/page/cpdf_contentparser.cpp + pdfium/core/fpdfapi/page/cpdf_devicecs.cpp + pdfium/core/fpdfapi/page/cpdf_dib.cpp + pdfium/core/fpdfapi/page/cpdf_docpagedata.cpp + pdfium/core/fpdfapi/page/cpdf_expintfunc.cpp + pdfium/core/fpdfapi/page/cpdf_form.cpp + pdfium/core/fpdfapi/page/cpdf_formobject.cpp + pdfium/core/fpdfapi/page/cpdf_function.cpp + pdfium/core/fpdfapi/page/cpdf_generalstate.cpp + pdfium/core/fpdfapi/page/cpdf_graphicstates.cpp + pdfium/core/fpdfapi/page/cpdf_iccprofile.cpp + pdfium/core/fpdfapi/page/cpdf_image.cpp + pdfium/core/fpdfapi/page/cpdf_imageobject.cpp + pdfium/core/fpdfapi/page/cpdf_meshstream.cpp + pdfium/core/fpdfapi/page/cpdf_occontext.cpp + pdfium/core/fpdfapi/page/cpdf_page.cpp + pdfium/core/fpdfapi/page/cpdf_pagemodule.cpp + pdfium/core/fpdfapi/page/cpdf_pageobject.cpp + pdfium/core/fpdfapi/page/cpdf_pageobjectholder.cpp + pdfium/core/fpdfapi/page/cpdf_path.cpp + pdfium/core/fpdfapi/page/cpdf_pathobject.cpp + pdfium/core/fpdfapi/page/cpdf_pattern.cpp + pdfium/core/fpdfapi/page/cpdf_patterncs.cpp + pdfium/core/fpdfapi/page/cpdf_psengine.cpp + pdfium/core/fpdfapi/page/cpdf_psfunc.cpp + pdfium/core/fpdfapi/page/cpdf_sampledfunc.cpp + pdfium/core/fpdfapi/page/cpdf_shadingobject.cpp + pdfium/core/fpdfapi/page/cpdf_shadingpattern.cpp + pdfium/core/fpdfapi/page/cpdf_stitchfunc.cpp + pdfium/core/fpdfapi/page/cpdf_streamcontentparser.cpp + pdfium/core/fpdfapi/page/cpdf_streamparser.cpp + pdfium/core/fpdfapi/page/cpdf_textobject.cpp + pdfium/core/fpdfapi/page/cpdf_textstate.cpp + pdfium/core/fpdfapi/page/cpdf_tilingpattern.cpp + pdfium/core/fpdfapi/page/cpdf_transferfunc.cpp + pdfium/core/fpdfapi/page/cpdf_transferfuncdib.cpp + pdfium/core/fpdfapi/page/cpdf_transparency.cpp + pdfium/core/fpdfapi/parser/cfdf_document.cpp + pdfium/core/fpdfapi/parser/cpdf_array.cpp + pdfium/core/fpdfapi/parser/cpdf_boolean.cpp + pdfium/core/fpdfapi/parser/cpdf_cross_ref_avail.cpp + pdfium/core/fpdfapi/parser/cpdf_cross_ref_table.cpp + pdfium/core/fpdfapi/parser/cpdf_crypto_handler.cpp + pdfium/core/fpdfapi/parser/cpdf_data_avail.cpp + pdfium/core/fpdfapi/parser/cpdf_dictionary.cpp + pdfium/core/fpdfapi/parser/cpdf_document.cpp + pdfium/core/fpdfapi/parser/cpdf_encryptor.cpp + pdfium/core/fpdfapi/parser/cpdf_flateencoder.cpp + pdfium/core/fpdfapi/parser/cpdf_hint_tables.cpp + pdfium/core/fpdfapi/parser/cpdf_indirect_object_holder.cpp + pdfium/core/fpdfapi/parser/cpdf_linearized_header.cpp + pdfium/core/fpdfapi/parser/cpdf_name.cpp + pdfium/core/fpdfapi/parser/cpdf_null.cpp + pdfium/core/fpdfapi/parser/cpdf_number.cpp + pdfium/core/fpdfapi/parser/cpdf_object.cpp + pdfium/core/fpdfapi/parser/cpdf_object_avail.cpp + pdfium/core/fpdfapi/parser/cpdf_object_stream.cpp + pdfium/core/fpdfapi/parser/cpdf_object_walker.cpp + pdfium/core/fpdfapi/parser/cpdf_page_object_avail.cpp + pdfium/core/fpdfapi/parser/cpdf_parser.cpp + pdfium/core/fpdfapi/parser/cpdf_read_validator.cpp + pdfium/core/fpdfapi/parser/cpdf_reference.cpp + pdfium/core/fpdfapi/parser/cpdf_security_handler.cpp + pdfium/core/fpdfapi/parser/cpdf_simple_parser.cpp + pdfium/core/fpdfapi/parser/cpdf_stream.cpp + pdfium/core/fpdfapi/parser/cpdf_stream_acc.cpp + pdfium/core/fpdfapi/parser/cpdf_string.cpp + pdfium/core/fpdfapi/parser/cpdf_syntax_parser.cpp + pdfium/core/fpdfapi/parser/fpdf_parser_decode.cpp + pdfium/core/fpdfapi/parser/fpdf_parser_utility.cpp + pdfium/core/fpdfapi/render/charposlist.cpp + pdfium/core/fpdfapi/render/cpdf_devicebuffer.cpp + pdfium/core/fpdfapi/render/cpdf_docrenderdata.cpp + pdfium/core/fpdfapi/render/cpdf_imagecacheentry.cpp + pdfium/core/fpdfapi/render/cpdf_imageloader.cpp + pdfium/core/fpdfapi/render/cpdf_imagerenderer.cpp + pdfium/core/fpdfapi/render/cpdf_pagerendercache.cpp + pdfium/core/fpdfapi/render/cpdf_pagerendercontext.cpp + pdfium/core/fpdfapi/render/cpdf_progressiverenderer.cpp + pdfium/core/fpdfapi/render/cpdf_rendercontext.cpp + pdfium/core/fpdfapi/render/cpdf_renderoptions.cpp + pdfium/core/fpdfapi/render/cpdf_rendershading.cpp + pdfium/core/fpdfapi/render/cpdf_renderstatus.cpp + pdfium/core/fpdfapi/render/cpdf_rendertiling.cpp + pdfium/core/fpdfapi/render/cpdf_scaledrenderbuffer.cpp + pdfium/core/fpdfapi/render/cpdf_textrenderer.cpp + pdfium/core/fpdfapi/render/cpdf_type3cache.cpp + pdfium/core/fpdfapi/render/cpdf_type3glyphmap.cpp + pdfium/core/fpdfdoc/cba_fontmap.cpp + pdfium/core/fpdfdoc/cline.cpp + pdfium/core/fpdfdoc/cpdf_aaction.cpp + pdfium/core/fpdfdoc/cpdf_action.cpp + pdfium/core/fpdfdoc/cpdf_annot.cpp + pdfium/core/fpdfdoc/cpdf_annotlist.cpp + pdfium/core/fpdfdoc/cpdf_apsettings.cpp + pdfium/core/fpdfdoc/cpdf_bookmark.cpp + pdfium/core/fpdfdoc/cpdf_bookmarktree.cpp + pdfium/core/fpdfdoc/cpdf_color_utils.cpp + pdfium/core/fpdfdoc/cpdf_defaultappearance.cpp + pdfium/core/fpdfdoc/cpdf_dest.cpp + pdfium/core/fpdfdoc/cpdf_filespec.cpp + pdfium/core/fpdfdoc/cpdf_formcontrol.cpp + pdfium/core/fpdfdoc/cpdf_formfield.cpp + pdfium/core/fpdfdoc/cpdf_icon.cpp + pdfium/core/fpdfdoc/cpdf_iconfit.cpp + pdfium/core/fpdfdoc/cpdf_interactiveform.cpp + pdfium/core/fpdfdoc/cpdf_link.cpp + pdfium/core/fpdfdoc/cpdf_linklist.cpp + pdfium/core/fpdfdoc/cpdf_metadata.cpp + pdfium/core/fpdfdoc/cpdf_nametree.cpp + pdfium/core/fpdfdoc/cpdf_numbertree.cpp + pdfium/core/fpdfdoc/cpdf_pagelabel.cpp + pdfium/core/fpdfdoc/cpdf_structelement.cpp + pdfium/core/fpdfdoc/cpdf_structtree.cpp + pdfium/core/fpdfdoc/cpdf_variabletext.cpp + pdfium/core/fpdfdoc/cpdf_viewerpreferences.cpp + pdfium/core/fpdfdoc/cpvt_fontmap.cpp + pdfium/core/fpdfdoc/cpvt_generateap.cpp + pdfium/core/fpdfdoc/cpvt_wordinfo.cpp + pdfium/core/fpdfdoc/csection.cpp + pdfium/core/fpdfdoc/ctypeset.cpp + pdfium/core/fpdftext/cpdf_linkextract.cpp + pdfium/core/fpdftext/cpdf_textpage.cpp + pdfium/core/fpdftext/cpdf_textpagefind.cpp + pdfium/core/fpdftext/unicodenormalizationdata.cpp + pdfium/core/fxcodec/basic/basicmodule.cpp + pdfium/core/fxcodec/cfx_codec_memory.cpp + pdfium/core/fxcodec/fax/faxmodule.cpp + pdfium/core/fxcodec/flate/flatemodule.cpp + pdfium/core/fxcodec/fx_codec.cpp + pdfium/core/fxcodec/icc/iccmodule.cpp + pdfium/core/fxcodec/jbig2/JBig2_ArithDecoder.cpp + pdfium/core/fxcodec/jbig2/JBig2_ArithIntDecoder.cpp + pdfium/core/fxcodec/jbig2/JBig2_BitStream.cpp + pdfium/core/fxcodec/jbig2/JBig2_Context.cpp + pdfium/core/fxcodec/jbig2/JBig2_DocumentContext.cpp + pdfium/core/fxcodec/jbig2/JBig2_GrdProc.cpp + pdfium/core/fxcodec/jbig2/JBig2_GrrdProc.cpp + pdfium/core/fxcodec/jbig2/JBig2_HtrdProc.cpp + pdfium/core/fxcodec/jbig2/JBig2_HuffmanDecoder.cpp + pdfium/core/fxcodec/jbig2/JBig2_HuffmanTable.cpp + pdfium/core/fxcodec/jbig2/JBig2_Image.cpp + pdfium/core/fxcodec/jbig2/JBig2_PatternDict.cpp + pdfium/core/fxcodec/jbig2/JBig2_PddProc.cpp + pdfium/core/fxcodec/jbig2/JBig2_SddProc.cpp + pdfium/core/fxcodec/jbig2/JBig2_Segment.cpp + pdfium/core/fxcodec/jbig2/JBig2_SymbolDict.cpp + pdfium/core/fxcodec/jbig2/JBig2_TrdProc.cpp + pdfium/core/fxcodec/jbig2/jbig2_decoder.cpp + pdfium/core/fxcodec/jpeg/jpeg_common.cpp + pdfium/core/fxcodec/jpeg/jpegmodule.cpp + pdfium/core/fxcodec/jpx/cjpx_decoder.cpp + pdfium/core/fxcodec/jpx/jpx_decode_utils.cpp + pdfium/core/fxcodec/scanlinedecoder.cpp + pdfium/core/fxcrt/xml/cfx_xmlchardata.cpp + pdfium/core/fxcrt/xml/cfx_xmldocument.cpp + pdfium/core/fxcrt/xml/cfx_xmlelement.cpp + pdfium/core/fxcrt/xml/cfx_xmlinstruction.cpp + pdfium/core/fxcrt/xml/cfx_xmlnode.cpp + pdfium/core/fxcrt/xml/cfx_xmlparser.cpp + pdfium/core/fxcrt/xml/cfx_xmltext.cpp + pdfium/core/fxcrt/bytestring.cpp + pdfium/core/fxcrt/cfx_binarybuf.cpp + pdfium/core/fxcrt/cfx_bitstream.cpp + pdfium/core/fxcrt/cfx_datetime.cpp + pdfium/core/fxcrt/cfx_readonlymemorystream.cpp + pdfium/core/fxcrt/cfx_seekablestreamproxy.cpp + pdfium/core/fxcrt/cfx_timer.cpp + pdfium/core/fxcrt/cfx_utf8decoder.cpp + pdfium/core/fxcrt/cfx_utf8encoder.cpp + pdfium/core/fxcrt/cfx_widetextbuf.cpp + pdfium/core/fxcrt/fx_bidi.cpp + pdfium/core/fxcrt/fx_codepage.cpp + pdfium/core/fxcrt/fx_coordinates.cpp + pdfium/core/fxcrt/fx_extension.cpp + pdfium/core/fxcrt/fx_memory.cpp + pdfium/core/fxcrt/fx_number.cpp + pdfium/core/fxcrt/fx_random.cpp + pdfium/core/fxcrt/fx_stream.cpp + pdfium/core/fxcrt/fx_string.cpp + pdfium/core/fxcrt/fx_system.cpp + pdfium/core/fxcrt/fx_unicode.cpp + pdfium/core/fxcrt/observed_ptr.cpp + pdfium/core/fxcrt/string_data_template.cpp + pdfium/core/fxcrt/widestring.cpp + pdfium/core/fxge/dib/cfx_bitmapcomposer.cpp + pdfium/core/fxge/dib/cfx_bitmapstorer.cpp + pdfium/core/fxge/dib/cfx_cmyk_to_srgb.cpp + pdfium/core/fxge/dib/cfx_dibbase.cpp + pdfium/core/fxge/dib/cfx_dibitmap.cpp + pdfium/core/fxge/dib/cfx_imagerenderer.cpp + pdfium/core/fxge/dib/cfx_imagestretcher.cpp + pdfium/core/fxge/dib/cfx_imagetransformer.cpp + pdfium/core/fxge/dib/cfx_scanlinecompositor.cpp + pdfium/core/fxge/dib/cstretchengine.cpp + pdfium/core/fxge/dib/fx_dib_main.cpp + pdfium/core/fxge/fontdata/chromefontdata/FoxitDingbats.cpp + pdfium/core/fxge/fontdata/chromefontdata/FoxitFixed.cpp + pdfium/core/fxge/fontdata/chromefontdata/FoxitFixedBold.cpp + pdfium/core/fxge/fontdata/chromefontdata/FoxitFixedBoldItalic.cpp + pdfium/core/fxge/fontdata/chromefontdata/FoxitFixedItalic.cpp + pdfium/core/fxge/fontdata/chromefontdata/FoxitSans.cpp + pdfium/core/fxge/fontdata/chromefontdata/FoxitSansBold.cpp + pdfium/core/fxge/fontdata/chromefontdata/FoxitSansBoldItalic.cpp + pdfium/core/fxge/fontdata/chromefontdata/FoxitSansItalic.cpp + pdfium/core/fxge/fontdata/chromefontdata/FoxitSansMM.cpp + pdfium/core/fxge/fontdata/chromefontdata/FoxitSerif.cpp + pdfium/core/fxge/fontdata/chromefontdata/FoxitSerifBold.cpp + pdfium/core/fxge/fontdata/chromefontdata/FoxitSerifBoldItalic.cpp + pdfium/core/fxge/fontdata/chromefontdata/FoxitSerifItalic.cpp + pdfium/core/fxge/fontdata/chromefontdata/FoxitSerifMM.cpp + pdfium/core/fxge/fontdata/chromefontdata/FoxitSymbol.cpp + pdfium/core/fxge/freetype/fx_freetype.cpp + pdfium/core/fxge/cfx_cliprgn.cpp + pdfium/core/fxge/cfx_color.cpp + pdfium/core/fxge/cfx_drawutils.cpp + pdfium/core/fxge/cfx_face.cpp + pdfium/core/fxge/cfx_folderfontinfo.cpp + pdfium/core/fxge/cfx_font.cpp + pdfium/core/fxge/cfx_fontcache.cpp + pdfium/core/fxge/cfx_fontmapper.cpp + pdfium/core/fxge/cfx_fontmgr.cpp + pdfium/core/fxge/cfx_gemodule.cpp + pdfium/core/fxge/cfx_glyphbitmap.cpp + pdfium/core/fxge/cfx_glyphcache.cpp + pdfium/core/fxge/cfx_graphstate.cpp + pdfium/core/fxge/cfx_graphstatedata.cpp + pdfium/core/fxge/cfx_pathdata.cpp + pdfium/core/fxge/cfx_renderdevice.cpp + pdfium/core/fxge/cfx_substfont.cpp + pdfium/core/fxge/cfx_unicodeencoding.cpp + pdfium/core/fxge/fx_font.cpp + pdfium/core/fxge/fx_ge_linux.cpp + pdfium/core/fxge/renderdevicedriver_iface.cpp + pdfium/core/fxge/scoped_font_transform.cpp + pdfium/core/fxge/text_char_pos.cpp + pdfium/core/fxge/text_glyph_pos.cpp + pdfium/core/fxge/agg/fx_agg_driver.cpp + pdfium/core/fxcrt/cfx_fileaccess_posix.cpp +) \ No newline at end of file diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/fpdfsdk.cmake b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/fpdfsdk.cmake new file mode 100644 index 000000000..0bfe6cb45 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/fpdfsdk.cmake @@ -0,0 +1,119 @@ +target_sources(pdfium + PRIVATE + # Headers + pdfium/fpdfsdk/formfiller/cffl_button.h + pdfium/fpdfsdk/formfiller/cffl_checkbox.h + pdfium/fpdfsdk/formfiller/cffl_combobox.h + pdfium/fpdfsdk/formfiller/cffl_formfiller.h + pdfium/fpdfsdk/formfiller/cffl_interactiveformfiller.h + pdfium/fpdfsdk/formfiller/cffl_listbox.h + pdfium/fpdfsdk/formfiller/cffl_pushbutton.h + pdfium/fpdfsdk/formfiller/cffl_radiobutton.h + pdfium/fpdfsdk/formfiller/cffl_textfield.h + pdfium/fpdfsdk/formfiller/cffl_textobject.h + pdfium/fpdfsdk/pwl/cpwl_button.h + pdfium/fpdfsdk/pwl/cpwl_caret.h + pdfium/fpdfsdk/pwl/cpwl_combo_box.h + pdfium/fpdfsdk/pwl/cpwl_edit.h + pdfium/fpdfsdk/pwl/cpwl_edit_ctrl.h + pdfium/fpdfsdk/pwl/cpwl_edit_impl.h + pdfium/fpdfsdk/pwl/cpwl_icon.h + pdfium/fpdfsdk/pwl/cpwl_list_box.h + pdfium/fpdfsdk/pwl/cpwl_list_impl.h + pdfium/fpdfsdk/pwl/cpwl_scroll_bar.h + pdfium/fpdfsdk/pwl/cpwl_special_button.h + pdfium/fpdfsdk/pwl/cpwl_wnd.h + pdfium/fpdfsdk/pwl/ipwl_systemhandler.h + pdfium/fpdfsdk/cpdfsdk_actionhandler.h + pdfium/fpdfsdk/cpdfsdk_annot.h + pdfium/fpdfsdk/cpdfsdk_annothandlermgr.h + pdfium/fpdfsdk/cpdfsdk_annotiteration.h + pdfium/fpdfsdk/cpdfsdk_annotiterator.h + pdfium/fpdfsdk/cpdfsdk_appstream.h + pdfium/fpdfsdk/cpdfsdk_baannot.h + pdfium/fpdfsdk/cpdfsdk_baannothandler.h + pdfium/fpdfsdk/cpdfsdk_customaccess.h + pdfium/fpdfsdk/cpdfsdk_fieldaction.h + pdfium/fpdfsdk/cpdfsdk_filewriteadapter.h + pdfium/fpdfsdk/cpdfsdk_formfillenvironment.h + pdfium/fpdfsdk/cpdfsdk_helpers.h + pdfium/fpdfsdk/cpdfsdk_interactiveform.h + pdfium/fpdfsdk/cpdfsdk_pageview.h + pdfium/fpdfsdk/cpdfsdk_pauseadapter.h + pdfium/fpdfsdk/cpdfsdk_renderpage.h + pdfium/fpdfsdk/cpdfsdk_widget.h + pdfium/fpdfsdk/cpdfsdk_widgethandler.h + pdfium/fpdfsdk/ipdfsdk_annothandler.h + + # Source files + pdfium/fpdfsdk/formfiller/cffl_button.cpp + pdfium/fpdfsdk/formfiller/cffl_checkbox.cpp + pdfium/fpdfsdk/formfiller/cffl_combobox.cpp + pdfium/fpdfsdk/formfiller/cffl_formfiller.cpp + pdfium/fpdfsdk/formfiller/cffl_interactiveformfiller.cpp + pdfium/fpdfsdk/formfiller/cffl_listbox.cpp + pdfium/fpdfsdk/formfiller/cffl_pushbutton.cpp + pdfium/fpdfsdk/formfiller/cffl_radiobutton.cpp + pdfium/fpdfsdk/formfiller/cffl_textfield.cpp + pdfium/fpdfsdk/formfiller/cffl_textobject.cpp + pdfium/fpdfsdk/pwl/cpwl_button.cpp + pdfium/fpdfsdk/pwl/cpwl_caret.cpp + pdfium/fpdfsdk/pwl/cpwl_combo_box.cpp + pdfium/fpdfsdk/pwl/cpwl_edit.cpp + pdfium/fpdfsdk/pwl/cpwl_edit_ctrl.cpp + pdfium/fpdfsdk/pwl/cpwl_edit_impl.cpp + pdfium/fpdfsdk/pwl/cpwl_icon.cpp + pdfium/fpdfsdk/pwl/cpwl_list_box.cpp + pdfium/fpdfsdk/pwl/cpwl_list_impl.cpp + pdfium/fpdfsdk/pwl/cpwl_scroll_bar.cpp + pdfium/fpdfsdk/pwl/cpwl_special_button.cpp + pdfium/fpdfsdk/pwl/cpwl_wnd.cpp + pdfium/fpdfsdk/cpdfsdk_actionhandler.cpp + pdfium/fpdfsdk/cpdfsdk_annot.cpp + pdfium/fpdfsdk/cpdfsdk_annothandlermgr.cpp + pdfium/fpdfsdk/cpdfsdk_annotiteration.cpp + pdfium/fpdfsdk/cpdfsdk_annotiterator.cpp + pdfium/fpdfsdk/cpdfsdk_appstream.cpp + pdfium/fpdfsdk/cpdfsdk_baannot.cpp + pdfium/fpdfsdk/cpdfsdk_baannothandler.cpp + pdfium/fpdfsdk/cpdfsdk_customaccess.cpp + pdfium/fpdfsdk/cpdfsdk_fieldaction.cpp + pdfium/fpdfsdk/cpdfsdk_filewriteadapter.cpp + pdfium/fpdfsdk/cpdfsdk_formfillenvironment.cpp + pdfium/fpdfsdk/cpdfsdk_helpers.cpp + pdfium/fpdfsdk/cpdfsdk_interactiveform.cpp + pdfium/fpdfsdk/cpdfsdk_pageview.cpp + pdfium/fpdfsdk/cpdfsdk_pauseadapter.cpp + pdfium/fpdfsdk/cpdfsdk_renderpage.cpp + pdfium/fpdfsdk/cpdfsdk_widget.cpp + pdfium/fpdfsdk/cpdfsdk_widgethandler.cpp + pdfium/fpdfsdk/fpdf_annot.cpp + pdfium/fpdfsdk/fpdf_attachment.cpp + pdfium/fpdfsdk/fpdf_catalog.cpp + pdfium/fpdfsdk/fpdf_dataavail.cpp + pdfium/fpdfsdk/fpdf_doc.cpp + pdfium/fpdfsdk/fpdf_editimg.cpp + pdfium/fpdfsdk/fpdf_editpage.cpp + pdfium/fpdfsdk/fpdf_editpath.cpp + pdfium/fpdfsdk/fpdf_edittext.cpp + pdfium/fpdfsdk/fpdf_ext.cpp + pdfium/fpdfsdk/fpdf_flatten.cpp + pdfium/fpdfsdk/fpdf_formfill.cpp + pdfium/fpdfsdk/fpdf_javascript.cpp + pdfium/fpdfsdk/fpdf_ppo.cpp + pdfium/fpdfsdk/fpdf_progressive.cpp + pdfium/fpdfsdk/fpdf_save.cpp + pdfium/fpdfsdk/fpdf_searchex.cpp + pdfium/fpdfsdk/fpdf_signature.cpp + pdfium/fpdfsdk/fpdf_structtree.cpp + pdfium/fpdfsdk/fpdf_sysfontinfo.cpp + pdfium/fpdfsdk/fpdf_text.cpp + pdfium/fpdfsdk/fpdf_thumbnail.cpp + pdfium/fpdfsdk/fpdf_transformpage.cpp + pdfium/fpdfsdk/fpdf_view.cpp +) + +# target_include_directories(pdfium +# PRIVATE +# pdfium/third_party/freetype/src/include +# ) \ No newline at end of file diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/fx_agg.cmake b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/fx_agg.cmake new file mode 100644 index 000000000..9d4c9aeea --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/fx_agg.cmake @@ -0,0 +1,33 @@ +target_sources(pdfium + PRIVATE + # Headers + pdfium/third_party/agg23/agg_array.h + pdfium/third_party/agg23/agg_basics.h + pdfium/third_party/agg23/agg_clip_liang_barsky.h + pdfium/third_party/agg23/agg_color_gray.h + pdfium/third_party/agg23/agg_conv_adaptor_vcgen.h + pdfium/third_party/agg23/agg_conv_dash.h + pdfium/third_party/agg23/agg_conv_stroke.h + pdfium/third_party/agg23/agg_curves.h + pdfium/third_party/agg23/agg_math.h + pdfium/third_party/agg23/agg_math_stroke.h + pdfium/third_party/agg23/agg_path_storage.h + pdfium/third_party/agg23/agg_pixfmt_gray.h + pdfium/third_party/agg23/agg_rasterizer_scanline_aa.h + pdfium/third_party/agg23/agg_render_scanlines.h + pdfium/third_party/agg23/agg_renderer_base.h + pdfium/third_party/agg23/agg_renderer_scanline.h + pdfium/third_party/agg23/agg_rendering_buffer.h + pdfium/third_party/agg23/agg_scanline_u.h + pdfium/third_party/agg23/agg_shorten_path.h + pdfium/third_party/agg23/agg_vcgen_dash.h + pdfium/third_party/agg23/agg_vcgen_stroke.h + pdfium/third_party/agg23/agg_vertex_sequence.h + + # Source files + pdfium/third_party/agg23/agg_curves.cpp + pdfium/third_party/agg23/agg_path_storage.cpp + pdfium/third_party/agg23/agg_rasterizer_scanline_aa.cpp + pdfium/third_party/agg23/agg_vcgen_dash.cpp + pdfium/third_party/agg23/agg_vcgen_stroke.cpp +) \ No newline at end of file diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/fx_base.cmake b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/fx_base.cmake new file mode 100644 index 000000000..db10c0230 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/fx_base.cmake @@ -0,0 +1,62 @@ +target_sources(pdfium + PRIVATE + # Headers + pdfium/base/allocator/partition_allocator/address_space_randomization.h + pdfium/base/allocator/partition_allocator/oom.h + pdfium/base/allocator/partition_allocator/oom_callback.h + pdfium/base/allocator/partition_allocator/page_allocator.h + pdfium/base/allocator/partition_allocator/page_allocator_constants.h + pdfium/base/allocator/partition_allocator/page_allocator_internal.h + pdfium/base/allocator/partition_allocator/page_allocator_internals_posix.h + pdfium/base/allocator/partition_allocator/page_allocator_internals_win.h + pdfium/base/allocator/partition_allocator/partition_alloc.h + pdfium/base/allocator/partition_allocator/partition_alloc_constants.h + pdfium/base/allocator/partition_allocator/partition_bucket.h + pdfium/base/allocator/partition_allocator/partition_cookie.h + pdfium/base/allocator/partition_allocator/partition_direct_map_extent.h + pdfium/base/allocator/partition_allocator/partition_freelist_entry.h + pdfium/base/allocator/partition_allocator/partition_oom.h + pdfium/base/allocator/partition_allocator/partition_page.h + pdfium/base/allocator/partition_allocator/partition_root_base.h + pdfium/base/allocator/partition_allocator/random.h + pdfium/base/allocator/partition_allocator/spin_lock.h + pdfium/base/containers/adapters.h + pdfium/base/debug/alias.h + pdfium/base/memory/aligned_memory.h + pdfium/base/numerics/checked_math.h + pdfium/base/numerics/checked_math_impl.h + pdfium/base/numerics/clamped_math.h + pdfium/base/numerics/clamped_math_impl.h + pdfium/base/numerics/safe_conversions.h + pdfium/base/numerics/safe_conversions_arm_impl.h + pdfium/base/numerics/safe_conversions_impl.h + pdfium/base/numerics/safe_math.h + pdfium/base/numerics/safe_math_arm_impl.h + pdfium/base/numerics/safe_math_clang_gcc_impl.h + pdfium/base/numerics/safe_math_shared_impl.h + pdfium/base/base_export.h + pdfium/base/bits.h + pdfium/base/compiler_specific.h + pdfium/base/immediate_crash.h + pdfium/base/logging.h + pdfium/base/no_destructor.h + pdfium/base/optional.h + pdfium/base/span.h + pdfium/base/stl_util.h + pdfium/base/sys_byteorder.h + pdfium/base/template_util.h + + # Source files + pdfium/base/allocator/partition_allocator/address_space_randomization.cc + pdfium/base/allocator/partition_allocator/oom_callback.cc + pdfium/base/allocator/partition_allocator/page_allocator.cc + pdfium/base/allocator/partition_allocator/partition_alloc.cc + pdfium/base/allocator/partition_allocator/partition_bucket.cc + pdfium/base/allocator/partition_allocator/partition_oom.cc + pdfium/base/allocator/partition_allocator/partition_page.cc + pdfium/base/allocator/partition_allocator/partition_root_base.cc + pdfium/base/allocator/partition_allocator/random.cc + pdfium/base/allocator/partition_allocator/spin_lock.cc + pdfium/base/debug/alias.cc + pdfium/base/memory/aligned_memory.cc +) \ No newline at end of file diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/fx_skia.cmake b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/fx_skia.cmake new file mode 100644 index 000000000..42e8603b8 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/fx_skia.cmake @@ -0,0 +1,8 @@ +target_sources(pdfium + PRIVATE + # Headers + pdfium/third_party/skia_shared/SkFloatToDecimal.h + + # Source files + pdfium/third_party/skia_shared/SkFloatToDecimal.cpp +) \ No newline at end of file diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/fxjs.cmake b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/fxjs.cmake new file mode 100644 index 000000000..626477611 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/fxjs.cmake @@ -0,0 +1,13 @@ +target_sources(pdfium + PRIVATE + # Headers + pdfium/fxjs/cjs_event_context_stub.h + pdfium/fxjs/cjs_runtimestub.h + pdfium/fxjs/cjs_event_context.h + pdfium/fxjs/ijs_runtime.h + + # Source files + pdfium/fxjs/cjs_event_context_stub.cpp + pdfium/fxjs/cjs_runtimestub.cpp + pdfium/fxjs/ijs_runtime.cpp +) diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/OWNERS b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/OWNERS new file mode 100644 index 000000000..b0a2a850f --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/OWNERS @@ -0,0 +1,8 @@ +ajwong@chromium.org +haraken@chromium.org +palmer@chromium.org +tsepez@chromium.org + +# TEAM: platform-architecture-dev@chromium.org +# Also: security-dev@chromium.org +# COMPONENT: Blink>MemoryAllocator>Partition diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/address_space_randomization.cc b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/address_space_randomization.cc new file mode 100644 index 000000000..c6f268fe6 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/address_space_randomization.cc @@ -0,0 +1,70 @@ +// Copyright 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "third_party/base/allocator/partition_allocator/address_space_randomization.h" + +#include "build/build_config.h" +#include "third_party/base/allocator/partition_allocator/page_allocator.h" +#include "third_party/base/allocator/partition_allocator/random.h" +#include "third_party/base/allocator/partition_allocator/spin_lock.h" +#include "third_party/base/logging.h" + +#if defined(OS_WIN) +#include // Must be in front of other Windows header files. + +#include +#endif + +namespace pdfium { +namespace base { + +void* GetRandomPageBase() { + uintptr_t random = static_cast(RandomValue()); + +#if defined(ARCH_CPU_64_BITS) + random <<= 32ULL; + random |= static_cast(RandomValue()); + +// The kASLRMask and kASLROffset constants will be suitable for the +// OS and build configuration. +#if defined(OS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) + // Windows >= 8.1 has the full 47 bits. Use them where available. + static bool windows_81 = false; + static bool windows_81_initialized = false; + if (!windows_81_initialized) { + windows_81 = IsWindows8Point1OrGreater(); + windows_81_initialized = true; + } + if (!windows_81) { + random &= internal::kASLRMaskBefore8_10; + } else { + random &= internal::kASLRMask; + } + random += internal::kASLROffset; +#else + random &= internal::kASLRMask; + random += internal::kASLROffset; +#endif // defined(OS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) +#else // defined(ARCH_CPU_32_BITS) +#if defined(OS_WIN) + // On win32 host systems the randomization plus huge alignment causes + // excessive fragmentation. Plus most of these systems lack ASLR, so the + // randomization isn't buying anything. In that case we just skip it. + // TODO(palmer): Just dump the randomization when HE-ASLR is present. + static BOOL is_wow64 = -1; + if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64)) + is_wow64 = FALSE; + if (!is_wow64) + return nullptr; +#endif // defined(OS_WIN) + random &= internal::kASLRMask; + random += internal::kASLROffset; +#endif // defined(ARCH_CPU_32_BITS) + + DCHECK_EQ(0ULL, (random & kPageAllocationGranularityOffsetMask)); + return reinterpret_cast(random); +} + +} // namespace base +} // namespace pdfium diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/address_space_randomization.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/address_space_randomization.h new file mode 100644 index 000000000..c6d8ca97b --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/address_space_randomization.h @@ -0,0 +1,203 @@ +// Copyright 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_ + +#include "build/build_config.h" +#include "third_party/base/allocator/partition_allocator/page_allocator.h" +#include "third_party/base/base_export.h" + +namespace pdfium { +namespace base { + +// Calculates a random preferred mapping address. In calculating an address, we +// balance good ASLR against not fragmenting the address space too badly. +BASE_EXPORT void* GetRandomPageBase(); + +namespace internal { + +constexpr uintptr_t AslrAddress(uintptr_t mask) { + return mask & kPageAllocationGranularityBaseMask; +} +constexpr uintptr_t AslrMask(uintptr_t bits) { + return AslrAddress((1ULL << bits) - 1ULL); +} + +// Turn off formatting, because the thicket of nested ifdefs below is +// incomprehensible without indentation. It is also incomprehensible with +// indentation, but the only other option is a combinatorial explosion of +// *_{win,linux,mac,foo}_{32,64}.h files. +// +// clang-format off + +#if defined(ARCH_CPU_64_BITS) + + #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) + + // We shouldn't allocate system pages at all for sanitizer builds. However, + // we do, and if random hint addresses interfere with address ranges + // hard-coded in those tools, bad things happen. This address range is + // copied from TSAN source but works with all tools. See + // https://crbug.com/539863. + constexpr uintptr_t kASLRMask = AslrAddress(0x007fffffffffULL); + constexpr uintptr_t kASLROffset = AslrAddress(0x7e8000000000ULL); + + #elif defined(OS_WIN) + + // Windows 8.10 and newer support the full 48 bit address range. Older + // versions of Windows only support 44 bits. Since kASLROffset is non-zero + // and may cause a carry, use 47 and 43 bit masks. See + // http://www.alex-ionescu.com/?p=246 + constexpr uintptr_t kASLRMask = AslrMask(47); + constexpr uintptr_t kASLRMaskBefore8_10 = AslrMask(43); + // Try not to map pages into the range where Windows loads DLLs by default. + constexpr uintptr_t kASLROffset = 0x80000000ULL; + + #elif defined(OS_APPLE) + + // macOS as of 10.12.5 does not clean up entries in page map levels 3/4 + // [PDP/PML4] created from mmap or mach_vm_allocate, even after the region + // is destroyed. Using a virtual address space that is too large causes a + // leak of about 1 wired [can never be paged out] page per call to mmap. The + // page is only reclaimed when the process is killed. Confine the hint to a + // 39-bit section of the virtual address space. + // + // This implementation adapted from + // https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference + // is that here we clamp to 39 bits, not 32. + // + // TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior + // changes. + constexpr uintptr_t kASLRMask = AslrMask(38); + constexpr uintptr_t kASLROffset = AslrAddress(0x1000000000ULL); + + #elif defined(OS_POSIX) || defined(OS_FUCHSIA) + + #if defined(ARCH_CPU_X86_64) + + // Linux (and macOS) support the full 47-bit user space of x64 processors. + // Use only 46 to allow the kernel a chance to fulfill the request. + constexpr uintptr_t kASLRMask = AslrMask(46); + constexpr uintptr_t kASLROffset = AslrAddress(0); + + #elif defined(ARCH_CPU_ARM64) + + #if defined(OS_ANDROID) + + // Restrict the address range on Android to avoid a large performance + // regression in single-process WebViews. See https://crbug.com/837640. + constexpr uintptr_t kASLRMask = AslrMask(30); + constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL); + + #else + + // ARM64 on Linux has 39-bit user space. Use 38 bits since kASLROffset + // could cause a carry. + constexpr uintptr_t kASLRMask = AslrMask(38); + constexpr uintptr_t kASLROffset = AslrAddress(0x1000000000ULL); + + #endif + + #elif defined(ARCH_CPU_PPC64) + + #if defined(OS_AIX) + + // AIX has 64 bits of virtual addressing, but we limit the address range + // to (a) minimize segment lookaside buffer (SLB) misses; and (b) use + // extra address space to isolate the mmap regions. + constexpr uintptr_t kASLRMask = AslrMask(30); + constexpr uintptr_t kASLROffset = AslrAddress(0x400000000000ULL); + + #elif defined(ARCH_CPU_BIG_ENDIAN) + + // Big-endian Linux PPC has 44 bits of virtual addressing. Use 42. + constexpr uintptr_t kASLRMask = AslrMask(42); + constexpr uintptr_t kASLROffset = AslrAddress(0); + + #else // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN) + + // Little-endian Linux PPC has 48 bits of virtual addressing. Use 46. + constexpr uintptr_t kASLRMask = AslrMask(46); + constexpr uintptr_t kASLROffset = AslrAddress(0); + + #endif // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN) + + #elif defined(ARCH_CPU_S390X) + + // Linux on Z uses bits 22 - 32 for Region Indexing, which translates to + // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a + // chance to fulfill the request. + constexpr uintptr_t kASLRMask = AslrMask(40); + constexpr uintptr_t kASLROffset = AslrAddress(0); + + #elif defined(ARCH_CPU_S390) + + // 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel + // a chance to fulfill the request. + constexpr uintptr_t kASLRMask = AslrMask(29); + constexpr uintptr_t kASLROffset = AslrAddress(0); + + #else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) && + // !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390) + + // For all other POSIX variants, use 30 bits. + constexpr uintptr_t kASLRMask = AslrMask(30); + + #if defined(OS_SOLARIS) + + // For our Solaris/illumos mmap hint, we pick a random address in the + // bottom half of the top half of the address space (that is, the third + // quarter). Because we do not MAP_FIXED, this will be treated only as a + // hint -- the system will not fail to mmap because something else + // happens to already be mapped at our random address. We deliberately + // set the hint high enough to get well above the system's break (that + // is, the heap); Solaris and illumos will try the hint and if that + // fails allocate as if there were no hint at all. The high hint + // prevents the break from getting hemmed in at low values, ceding half + // of the address space to the system heap. + constexpr uintptr_t kASLROffset = AslrAddress(0x80000000ULL); + + #elif defined(OS_AIX) + + // The range 0x30000000 - 0xD0000000 is available on AIX; choose the + // upper range. + constexpr uintptr_t kASLROffset = AslrAddress(0x90000000ULL); + + #else // !defined(OS_SOLARIS) && !defined(OS_AIX) + + // The range 0x20000000 - 0x60000000 is relatively unpopulated across a + // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS + // 10.6 and 10.7. + constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL); + + #endif // !defined(OS_SOLARIS) && !defined(OS_AIX) + + #endif // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) && + // !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390) + + #endif // defined(OS_POSIX) + +#elif defined(ARCH_CPU_32_BITS) + + // This is a good range on 32-bit Windows and Android (the only platforms on + // which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There + // is no issue with carries here. + constexpr uintptr_t kASLRMask = AslrMask(30); + constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL); + +#else + + #error Please tell us about your exotic hardware! Sounds interesting. + +#endif // defined(ARCH_CPU_32_BITS) + +// clang-format on + +} // namespace internal + +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/oom.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/oom.h new file mode 100644 index 000000000..8f3e9c6a5 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/oom.h @@ -0,0 +1,40 @@ +// Copyright (c) 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_ + +#include "third_party/base/allocator/partition_allocator/oom_callback.h" +#include "third_party/base/logging.h" + +#if defined(OS_WIN) +#include +#endif + +// Do not want trivial entry points just calling OOM_CRASH() to be +// commoned up by linker icf/comdat folding. +#define OOM_CRASH_PREVENT_ICF() \ + volatile int oom_crash_inhibit_icf = __LINE__; \ + ALLOW_UNUSED_LOCAL(oom_crash_inhibit_icf) + +// OOM_CRASH() - Specialization of IMMEDIATE_CRASH which will raise a custom +// exception on Windows to signal this is OOM and not a normal assert. +#if defined(OS_WIN) +#define OOM_CRASH(size) \ + do { \ + OOM_CRASH_PREVENT_ICF(); \ + base::internal::RunPartitionAllocOomCallback(); \ + ::RaiseException(0xE0000008, EXCEPTION_NONCONTINUABLE, 0, nullptr); \ + IMMEDIATE_CRASH(); \ + } while (0) +#else +#define OOM_CRASH(size) \ + do { \ + base::internal::RunPartitionAllocOomCallback(); \ + OOM_CRASH_PREVENT_ICF(); \ + IMMEDIATE_CRASH(); \ + } while (0) +#endif + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/oom_callback.cc b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/oom_callback.cc new file mode 100644 index 000000000..914343829 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/oom_callback.cc @@ -0,0 +1,29 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "third_party/base/allocator/partition_allocator/oom_callback.h" + +#include "third_party/base/logging.h" + +namespace pdfium { +namespace base { + +namespace { +PartitionAllocOomCallback g_oom_callback; +} // namespace + +void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) { + DCHECK(!g_oom_callback); + g_oom_callback = callback; +} + +namespace internal { +void RunPartitionAllocOomCallback() { + if (g_oom_callback) + g_oom_callback(); +} +} // namespace internal + +} // namespace base +} // namespace pdfium diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/oom_callback.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/oom_callback.h new file mode 100644 index 000000000..044b167ff --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/oom_callback.h @@ -0,0 +1,26 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_ + +#include "third_party/base/base_export.h" + +namespace pdfium { +namespace base { +typedef void (*PartitionAllocOomCallback)(); +// Registers a callback to be invoked during an OOM_CRASH(). OOM_CRASH is +// invoked by users of PageAllocator (including PartitionAlloc) to signify an +// allocation failure from the platform. +BASE_EXPORT void SetPartitionAllocOomCallback( + PartitionAllocOomCallback callback); + +namespace internal { +BASE_EXPORT void RunPartitionAllocOomCallback(); +} // namespace internal + +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator.cc b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator.cc new file mode 100644 index 000000000..e158bd133 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator.cc @@ -0,0 +1,259 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "third_party/base/allocator/partition_allocator/page_allocator.h" + +#include + +#include + +#include "build/build_config.h" +#include "third_party/base/allocator/partition_allocator/address_space_randomization.h" +#include "third_party/base/allocator/partition_allocator/page_allocator_internal.h" +#include "third_party/base/allocator/partition_allocator/spin_lock.h" +#include "third_party/base/bits.h" +#include "third_party/base/logging.h" +#include "third_party/base/numerics/safe_math.h" + +#if defined(OS_WIN) +#include +#endif + +#if defined(OS_WIN) +#include "third_party/base/allocator/partition_allocator/page_allocator_internals_win.h" +#elif defined(OS_POSIX) || defined(OS_FUCHSIA) +#include "third_party/base/allocator/partition_allocator/page_allocator_internals_posix.h" +#else +#error Platform not supported. +#endif + +namespace pdfium { +namespace base { + +namespace { + +// We may reserve/release address space on different threads. +subtle::SpinLock* GetReserveLock() { + static subtle::SpinLock* s_reserveLock = nullptr; + if (!s_reserveLock) + s_reserveLock = new subtle::SpinLock(); + return s_reserveLock; +} + +// We only support a single block of reserved address space. +void* s_reservation_address = nullptr; +size_t s_reservation_size = 0; + +void* AllocPagesIncludingReserved(void* address, + size_t length, + PageAccessibilityConfiguration accessibility, + PageTag page_tag, + bool commit) { + void* ret = + SystemAllocPages(address, length, accessibility, page_tag, commit); + if (ret == nullptr) { + const bool cant_alloc_length = kHintIsAdvisory || address == nullptr; + if (cant_alloc_length) { + // The system cannot allocate |length| bytes. Release any reserved address + // space and try once more. + ReleaseReservation(); + ret = SystemAllocPages(address, length, accessibility, page_tag, commit); + } + } + return ret; +} + +// Trims |base| to given |trim_length| and |alignment|. +// +// On failure, on Windows, this function returns nullptr and frees |base|. +void* TrimMapping(void* base, + size_t base_length, + size_t trim_length, + uintptr_t alignment, + PageAccessibilityConfiguration accessibility, + bool commit) { + size_t pre_slack = reinterpret_cast(base) & (alignment - 1); + if (pre_slack) { + pre_slack = alignment - pre_slack; + } + size_t post_slack = base_length - pre_slack - trim_length; + DCHECK(base_length >= trim_length || pre_slack || post_slack); + DCHECK(pre_slack < base_length); + DCHECK(post_slack < base_length); + return TrimMappingInternal(base, base_length, trim_length, accessibility, + commit, pre_slack, post_slack); +} + +} // namespace + +void* SystemAllocPages(void* hint, + size_t length, + PageAccessibilityConfiguration accessibility, + PageTag page_tag, + bool commit) { + DCHECK(!(length & kPageAllocationGranularityOffsetMask)); + DCHECK(!(reinterpret_cast(hint) & + kPageAllocationGranularityOffsetMask)); + DCHECK(commit || accessibility == PageInaccessible); + return SystemAllocPagesInternal(hint, length, accessibility, page_tag, + commit); +} + +void* AllocPages(void* address, + size_t length, + size_t align, + PageAccessibilityConfiguration accessibility, + PageTag page_tag, + bool commit) { + DCHECK(length >= kPageAllocationGranularity); + DCHECK(!(length & kPageAllocationGranularityOffsetMask)); + DCHECK(align >= kPageAllocationGranularity); + // Alignment must be power of 2 for masking math to work. + DCHECK(pdfium::base::bits::IsPowerOfTwo(align)); + DCHECK(!(reinterpret_cast(address) & + kPageAllocationGranularityOffsetMask)); + uintptr_t align_offset_mask = align - 1; + uintptr_t align_base_mask = ~align_offset_mask; + DCHECK(!(reinterpret_cast(address) & align_offset_mask)); + + // If the client passed null as the address, choose a good one. + if (address == nullptr) { + address = GetRandomPageBase(); + address = reinterpret_cast(reinterpret_cast(address) & + align_base_mask); + } + + // First try to force an exact-size, aligned allocation from our random base. +#if defined(ARCH_CPU_32_BITS) + // On 32 bit systems, first try one random aligned address, and then try an + // aligned address derived from the value of |ret|. + constexpr int kExactSizeTries = 2; +#else + // On 64 bit systems, try 3 random aligned addresses. + constexpr int kExactSizeTries = 3; +#endif + + for (int i = 0; i < kExactSizeTries; ++i) { + void* ret = AllocPagesIncludingReserved(address, length, accessibility, + page_tag, commit); + if (ret != nullptr) { + // If the alignment is to our liking, we're done. + if (!(reinterpret_cast(ret) & align_offset_mask)) + return ret; + // Free the memory and try again. + FreePages(ret, length); + } else { + // |ret| is null; if this try was unhinted, we're OOM. + if (kHintIsAdvisory || address == nullptr) + return nullptr; + } + +#if defined(ARCH_CPU_32_BITS) + // For small address spaces, try the first aligned address >= |ret|. Note + // |ret| may be null, in which case |address| becomes null. + address = reinterpret_cast( + (reinterpret_cast(ret) + align_offset_mask) & + align_base_mask); +#else // defined(ARCH_CPU_64_BITS) + // Keep trying random addresses on systems that have a large address space. + address = GetRandomPageBase(); + address = reinterpret_cast(reinterpret_cast(address) & + align_base_mask); +#endif + } + + // Make a larger allocation so we can force alignment. + size_t try_length = length + (align - kPageAllocationGranularity); + CHECK(try_length >= length); + void* ret; + + do { + // Continue randomizing only on POSIX. + address = kHintIsAdvisory ? GetRandomPageBase() : nullptr; + ret = AllocPagesIncludingReserved(address, try_length, accessibility, + page_tag, commit); + // The retries are for Windows, where a race can steal our mapping on + // resize. + } while (ret != nullptr && + (ret = TrimMapping(ret, try_length, length, align, accessibility, + commit)) == nullptr); + + return ret; +} + +void FreePages(void* address, size_t length) { + DCHECK(!(reinterpret_cast(address) & + kPageAllocationGranularityOffsetMask)); + DCHECK(!(length & kPageAllocationGranularityOffsetMask)); + FreePagesInternal(address, length); +} + +bool TrySetSystemPagesAccess(void* address, + size_t length, + PageAccessibilityConfiguration accessibility) { + DCHECK(!(length & kSystemPageOffsetMask)); + return TrySetSystemPagesAccessInternal(address, length, accessibility); +} + +void SetSystemPagesAccess(void* address, + size_t length, + PageAccessibilityConfiguration accessibility) { + DCHECK(!(length & kSystemPageOffsetMask)); + SetSystemPagesAccessInternal(address, length, accessibility); +} + +void DecommitSystemPages(void* address, size_t length) { + DCHECK_EQ(0UL, length & kSystemPageOffsetMask); + DecommitSystemPagesInternal(address, length); +} + +bool RecommitSystemPages(void* address, + size_t length, + PageAccessibilityConfiguration accessibility) { + DCHECK_EQ(0UL, length & kSystemPageOffsetMask); + DCHECK(PageInaccessible != accessibility); + return RecommitSystemPagesInternal(address, length, accessibility); +} + +void DiscardSystemPages(void* address, size_t length) { + DCHECK_EQ(0UL, length & kSystemPageOffsetMask); + DiscardSystemPagesInternal(address, length); +} + +bool ReserveAddressSpace(size_t size) { + // To avoid deadlock, call only SystemAllocPages. + subtle::SpinLock::Guard guard(*GetReserveLock()); + if (s_reservation_address == nullptr) { + void* mem = SystemAllocPages(nullptr, size, PageInaccessible, + PageTag::kChromium, false); + if (mem != nullptr) { + // We guarantee this alignment when reserving address space. + DCHECK(!(reinterpret_cast(mem) & + kPageAllocationGranularityOffsetMask)); + s_reservation_address = mem; + s_reservation_size = size; + return true; + } + } + return false; +} + +bool ReleaseReservation() { + // To avoid deadlock, call only FreePages. + subtle::SpinLock::Guard guard(*GetReserveLock()); + if (!s_reservation_address) + return false; + + FreePages(s_reservation_address, s_reservation_size); + s_reservation_address = nullptr; + s_reservation_size = 0; + return true; +} + +uint32_t GetAllocPageErrorCode() { + return s_allocPageErrorCode; +} + +} // namespace base +} // namespace pdfium diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator.h new file mode 100644 index 000000000..c50b908c7 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator.h @@ -0,0 +1,197 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_ + +#include + +#include + +#include "build/build_config.h" +#include "third_party/base/allocator/partition_allocator/page_allocator_constants.h" +#include "third_party/base/base_export.h" +#include "third_party/base/compiler_specific.h" + +namespace pdfium { +namespace base { + +enum PageAccessibilityConfiguration { + PageInaccessible, + PageRead, + PageReadWrite, + PageReadExecute, + // This flag is deprecated and will go away soon. + // TODO(bbudge) Remove this as soon as V8 doesn't need RWX pages. + PageReadWriteExecute, +}; + +// macOS supports tagged memory regions, to help in debugging. On Android, +// these tags are used to name anonymous mappings. +enum class PageTag { + kFirst = 240, // Minimum tag value. + kBlinkGC = 252, // Blink GC pages. + kPartitionAlloc = 253, // PartitionAlloc, no matter the partition. + kChromium = 254, // Chromium page. + kV8 = 255, // V8 heap pages. + kLast = kV8 // Maximum tag value. +}; + +// Allocate one or more pages. +// +// The requested |address| is just a hint; the actual address returned may +// differ. The returned address will be aligned at least to |align| bytes. +// |length| is in bytes, and must be a multiple of |kPageAllocationGranularity|. +// |align| is in bytes, and must be a power-of-two multiple of +// |kPageAllocationGranularity|. +// +// If |address| is null, then a suitable and randomized address will be chosen +// automatically. +// +// |page_accessibility| controls the permission of the allocated pages. +// |page_tag| is used on some platforms to identify the source of the +// allocation. Use PageTag::kChromium as a catch-all category. +// +// This call will return null if the allocation cannot be satisfied. +BASE_EXPORT void* AllocPages(void* address, + size_t length, + size_t align, + PageAccessibilityConfiguration page_accessibility, + PageTag tag, + bool commit = true); + +// Free one or more pages starting at |address| and continuing for |length| +// bytes. +// +// |address| and |length| must match a previous call to |AllocPages|. Therefore, +// |address| must be aligned to |kPageAllocationGranularity| bytes, and |length| +// must be a multiple of |kPageAllocationGranularity|. +BASE_EXPORT void FreePages(void* address, size_t length); + +// Mark one or more system pages, starting at |address| with the given +// |page_accessibility|. |length| must be a multiple of |kSystemPageSize| bytes. +// +// Returns true if the permission change succeeded. In most cases you must +// |CHECK| the result. +BASE_EXPORT WARN_UNUSED_RESULT bool TrySetSystemPagesAccess( + void* address, + size_t length, + PageAccessibilityConfiguration page_accessibility); + +// Mark one or more system pages, starting at |address| with the given +// |page_accessibility|. |length| must be a multiple of |kSystemPageSize| bytes. +// +// Performs a CHECK that the operation succeeds. +BASE_EXPORT void SetSystemPagesAccess( + void* address, + size_t length, + PageAccessibilityConfiguration page_accessibility); + +// Decommit one or more system pages starting at |address| and continuing for +// |length| bytes. |length| must be a multiple of |kSystemPageSize|. +// +// Decommitted means that physical resources (RAM or swap) backing the allocated +// virtual address range are released back to the system, but the address space +// is still allocated to the process (possibly using up page table entries or +// other accounting resources). Any access to a decommitted region of memory +// is an error and will generate a fault. +// +// This operation is not atomic on all platforms. +// +// Note: "Committed memory" is a Windows Memory Subsystem concept that ensures +// processes will not fault when touching a committed memory region. There is +// no analogue in the POSIX memory API where virtual memory pages are +// best-effort allocated resources on the first touch. To create a +// platform-agnostic abstraction, this API simulates the Windows "decommit" +// state by both discarding the region (allowing the OS to avoid swap +// operations) and changing the page protections so accesses fault. +// +// TODO(ajwong): This currently does not change page protections on POSIX +// systems due to a perf regression. Tracked at http://crbug.com/766882. +BASE_EXPORT void DecommitSystemPages(void* address, size_t length); + +// Recommit one or more system pages, starting at |address| and continuing for +// |length| bytes with the given |page_accessibility|. |length| must be a +// multiple of |kSystemPageSize|. +// +// Decommitted system pages must be recommitted with their original permissions +// before they are used again. +// +// Returns true if the recommit change succeeded. In most cases you must |CHECK| +// the result. +BASE_EXPORT WARN_UNUSED_RESULT bool RecommitSystemPages( + void* address, + size_t length, + PageAccessibilityConfiguration page_accessibility); + +// Discard one or more system pages starting at |address| and continuing for +// |length| bytes. |length| must be a multiple of |kSystemPageSize|. +// +// Discarding is a hint to the system that the page is no longer required. The +// hint may: +// - Do nothing. +// - Discard the page immediately, freeing up physical pages. +// - Discard the page at some time in the future in response to memory +// pressure. +// +// Only committed pages should be discarded. Discarding a page does not decommit +// it, and it is valid to discard an already-discarded page. A read or write to +// a discarded page will not fault. +// +// Reading from a discarded page may return the original page content, or a page +// full of zeroes. +// +// Writing to a discarded page is the only guaranteed way to tell the system +// that the page is required again. Once written to, the content of the page is +// guaranteed stable once more. After being written to, the page content may be +// based on the original page content, or a page of zeroes. +BASE_EXPORT void DiscardSystemPages(void* address, size_t length); + +// Rounds up |address| to the next multiple of |kSystemPageSize|. Returns +// 0 for an |address| of 0. +constexpr ALWAYS_INLINE uintptr_t RoundUpToSystemPage(uintptr_t address) { + return (address + kSystemPageOffsetMask) & kSystemPageBaseMask; +} + +// Rounds down |address| to the previous multiple of |kSystemPageSize|. Returns +// 0 for an |address| of 0. +constexpr ALWAYS_INLINE uintptr_t RoundDownToSystemPage(uintptr_t address) { + return address & kSystemPageBaseMask; +} + +// Rounds up |address| to the next multiple of |kPageAllocationGranularity|. +// Returns 0 for an |address| of 0. +constexpr ALWAYS_INLINE uintptr_t +RoundUpToPageAllocationGranularity(uintptr_t address) { + return (address + kPageAllocationGranularityOffsetMask) & + kPageAllocationGranularityBaseMask; +} + +// Rounds down |address| to the previous multiple of +// |kPageAllocationGranularity|. Returns 0 for an |address| of 0. +constexpr ALWAYS_INLINE uintptr_t +RoundDownToPageAllocationGranularity(uintptr_t address) { + return address & kPageAllocationGranularityBaseMask; +} + +// Reserves (at least) |size| bytes of address space, aligned to +// |kPageAllocationGranularity|. This can be called early on to make it more +// likely that large allocations will succeed. Returns true if the reservation +// succeeded, false if the reservation failed or a reservation was already made. +BASE_EXPORT bool ReserveAddressSpace(size_t size); + +// Releases any reserved address space. |AllocPages| calls this automatically on +// an allocation failure. External allocators may also call this on failure. +// +// Returns true when an existing reservation was released. +BASE_EXPORT bool ReleaseReservation(); + +// Returns |errno| (POSIX) or the result of |GetLastError| (Windows) when |mmap| +// (POSIX) or |VirtualAlloc| (Windows) fails. +BASE_EXPORT uint32_t GetAllocPageErrorCode(); + +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator_constants.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator_constants.h new file mode 100644 index 000000000..17e29ecb9 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator_constants.h @@ -0,0 +1,66 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_ + +#include + +#include "build/build_config.h" + +namespace pdfium { +namespace base { +#if defined(OS_WIN) || defined(ARCH_CPU_PPC64) +static constexpr size_t kPageAllocationGranularityShift = 16; // 64KB +#elif defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONGARCH64) +static constexpr size_t kPageAllocationGranularityShift = 14; // 16KB +#elif defined(ARCH_CPU_SW64) +static constexpr size_t kPageAllocationGranularityShift = 13; // 8KB +#elif defined(OS_APPLE) && defined(ARCH_CPU_ARM64) +static constexpr size_t kPageAllocationGranularityShift = 14; // 16KB +#else +static constexpr size_t kPageAllocationGranularityShift = 12; // 4KB +#endif +static constexpr size_t kPageAllocationGranularity = + 1 << kPageAllocationGranularityShift; +static constexpr size_t kPageAllocationGranularityOffsetMask = + kPageAllocationGranularity - 1; +static constexpr size_t kPageAllocationGranularityBaseMask = + ~kPageAllocationGranularityOffsetMask; + +#if defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONGARCH64) +static constexpr size_t kSystemPageSize = 16384; +#elif defined(ARCH_CPU_PPC64) +// Modern ppc64 systems support 4KB and 64KB page sizes. +// Since 64KB is the de-facto standard on the platform +// and binaries compiled for 64KB are likely to work on 4KB systems, +// 64KB is a good choice here. +static constexpr size_t kSystemPageSize = 65536; +#elif defined(OS_APPLE) && defined(ARCH_CPU_ARM64) +static constexpr size_t kSystemPageSize = 16384; +#elif defined(ARCH_CPU_SW64) +static constexpr size_t kSystemPageSize = 8192; +#else +//default page size changed to 64k. +//binaries compiled for 64KB are likely to work on 4KB systems, +//64KB is a good choice here. +//根据系统页大小分配 +#if defined(SYSTEMPAGESIZE) +static constexpr size_t kSystemPageSize = SYSTEMPAGESIZE; +#else +static constexpr size_t kSystemPageSize = 4096; +#endif +#endif +static constexpr size_t kSystemPageOffsetMask = kSystemPageSize - 1; +static_assert((kSystemPageSize & (kSystemPageSize - 1)) == 0, + "kSystemPageSize must be power of 2"); +static constexpr size_t kSystemPageBaseMask = ~kSystemPageOffsetMask; + +static constexpr size_t kPageMetadataShift = 5; // 32 bytes per partition page. +static constexpr size_t kPageMetadataSize = 1 << kPageMetadataShift; + +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator_internal.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator_internal.h new file mode 100644 index 000000000..22843149e --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator_internal.h @@ -0,0 +1,20 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_ + +namespace pdfium { +namespace base { + +void* SystemAllocPages(void* hint, + size_t length, + PageAccessibilityConfiguration accessibility, + PageTag page_tag, + bool commit); + +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator_internals_posix.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator_internals_posix.h new file mode 100644 index 000000000..bfc5592d7 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator_internals_posix.h @@ -0,0 +1,218 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_ + +#include +#include + +#include "build/build_config.h" + +#if defined(OS_APPLE) +#include +#endif +#if defined(OS_ANDROID) +#include +#endif +#if defined(OS_LINUX) || defined(OS_CHROMEOS) +#include + +#include +#endif + +#include "third_party/base/allocator/partition_allocator/page_allocator.h" +#include "third_party/base/logging.h" +#include + +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + +namespace pdfium { +namespace base { + +#if defined(OS_ANDROID) +namespace { +const char* PageTagToName(PageTag tag) { + // Important: All the names should be string literals. As per prctl.h in + // //third_party/android_ndk the kernel keeps a pointer to the name instead + // of copying it. + // + // Having the name in .rodata ensures that the pointer remains valid as + // long as the mapping is alive. + switch (tag) { + case PageTag::kBlinkGC: + return "blink_gc"; + case PageTag::kPartitionAlloc: + return "partition_alloc"; + case PageTag::kChromium: + return "chromium"; + case PageTag::kV8: + return "v8"; + default: + DCHECK(false); + return ""; + } +} +} // namespace +#endif // defined(OS_ANDROID) + +// |mmap| uses a nearby address if the hint address is blocked. +constexpr bool kHintIsAdvisory = true; +std::atomic s_allocPageErrorCode{0}; + +int GetAccessFlags(PageAccessibilityConfiguration accessibility) { + switch (accessibility) { + case PageRead: + return PROT_READ; + case PageReadWrite: + return PROT_READ | PROT_WRITE; + case PageReadExecute: + return PROT_READ | PROT_EXEC; + case PageReadWriteExecute: + return PROT_READ | PROT_WRITE | PROT_EXEC; + default: + NOTREACHED(); + FALLTHROUGH; + case PageInaccessible: + return PROT_NONE; + } +} + +void* SystemAllocPagesInternal(void* hint, + size_t length, + PageAccessibilityConfiguration accessibility, + PageTag page_tag, + bool commit) { +#if defined(OS_APPLE) + // Use a custom tag to make it easier to distinguish Partition Alloc regions + // in vmmap(1). Tags between 240-255 are supported. + DCHECK(PageTag::kFirst <= page_tag); + DCHECK(PageTag::kLast >= page_tag); + int fd = VM_MAKE_TAG(static_cast(page_tag)); +#else + int fd = -1; +#endif + + int access_flag = GetAccessFlags(accessibility); + int map_flags = MAP_ANONYMOUS | MAP_PRIVATE; + + // TODO(https://crbug.com/927411): Remove once Fuchsia uses a native page + // allocator, rather than relying on POSIX compatibility. +#if defined(OS_FUCHSIA) + if (page_tag == PageTag::kV8) { + map_flags |= MAP_JIT; + } +#endif + + void* ret = mmap(hint, length, access_flag, map_flags, fd, 0); + if (ret == MAP_FAILED) { + s_allocPageErrorCode = errno; + ret = nullptr; + } + +#if defined(OS_ANDROID) + // On Android, anonymous mappings can have a name attached to them. This is + // useful for debugging, and double-checking memory attribution. + if (ret) { + // No error checking on purpose, testing only. + prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ret, length, + PageTagToName(page_tag)); + } +#endif + + return ret; +} + +void* TrimMappingInternal(void* base, + size_t base_length, + size_t trim_length, + PageAccessibilityConfiguration accessibility, + bool commit, + size_t pre_slack, + size_t post_slack) { + void* ret = base; + // We can resize the allocation run. Release unneeded memory before and after + // the aligned range. + if (pre_slack) { + int res = munmap(base, pre_slack); + CHECK(!res); + ret = reinterpret_cast(base) + pre_slack; + } + if (post_slack) { + int res = munmap(reinterpret_cast(ret) + trim_length, post_slack); + CHECK(!res); + } + return ret; +} + +bool TrySetSystemPagesAccessInternal( + void* address, + size_t length, + PageAccessibilityConfiguration accessibility) { + return 0 == mprotect(address, length, GetAccessFlags(accessibility)); +} + +void SetSystemPagesAccessInternal( + void* address, + size_t length, + PageAccessibilityConfiguration accessibility) { + CHECK_EQ(0, mprotect(address, length, GetAccessFlags(accessibility))); +} + +void FreePagesInternal(void* address, size_t length) { + CHECK(!munmap(address, length)); +} + +void DecommitSystemPagesInternal(void* address, size_t length) { + // In POSIX, there is no decommit concept. Discarding is an effective way of + // implementing the Windows semantics where the OS is allowed to not swap the + // pages in the region. + // + // TODO(ajwong): Also explore setting PageInaccessible to make the protection + // semantics consistent between Windows and POSIX. This might have a perf cost + // though as both decommit and recommit would incur an extra syscall. + // http://crbug.com/766882 + DiscardSystemPages(address, length); +} + +bool RecommitSystemPagesInternal(void* address, + size_t length, + PageAccessibilityConfiguration accessibility) { +#if defined(OS_APPLE) + // On macOS, to update accounting, we need to make another syscall. For more + // details, see https://crbug.com/823915. + madvise(address, length, MADV_FREE_REUSE); +#endif + + // On POSIX systems, the caller need simply read the memory to recommit it. + // This has the correct behavior because the API requires the permissions to + // be the same as before decommitting and all configurations can read. + return true; +} + +void DiscardSystemPagesInternal(void* address, size_t length) { +#if defined(OS_APPLE) + int ret = madvise(address, length, MADV_FREE_REUSABLE); + if (ret) { + // MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED. + ret = madvise(address, length, MADV_DONTNEED); + } + CHECK(0 == ret); +#else + // We have experimented with other flags, but with suboptimal results. + // + // MADV_FREE (Linux): Makes our memory measurements less predictable; + // performance benefits unclear. + // + // Therefore, we just do the simple thing: MADV_DONTNEED. + CHECK(!madvise(address, length, MADV_DONTNEED)); +#endif +} + +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator_internals_win.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator_internals_win.h new file mode 100644 index 000000000..9174ee867 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/page_allocator_internals_win.h @@ -0,0 +1,145 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_ + +#include "third_party/base/allocator/partition_allocator/oom.h" +#include "third_party/base/allocator/partition_allocator/page_allocator_internal.h" + +namespace pdfium { +namespace base { + +// |VirtualAlloc| will fail if allocation at the hint address is blocked. +constexpr bool kHintIsAdvisory = false; +std::atomic s_allocPageErrorCode{ERROR_SUCCESS}; + +int GetAccessFlags(PageAccessibilityConfiguration accessibility) { + switch (accessibility) { + case PageRead: + return PAGE_READONLY; + case PageReadWrite: + return PAGE_READWRITE; + case PageReadExecute: + return PAGE_EXECUTE_READ; + case PageReadWriteExecute: + return PAGE_EXECUTE_READWRITE; + default: + NOTREACHED(); + FALLTHROUGH; + case PageInaccessible: + return PAGE_NOACCESS; + } +} + +void* SystemAllocPagesInternal(void* hint, + size_t length, + PageAccessibilityConfiguration accessibility, + PageTag page_tag, + bool commit) { + DWORD access_flag = GetAccessFlags(accessibility); + const DWORD type_flags = commit ? (MEM_RESERVE | MEM_COMMIT) : MEM_RESERVE; + void* ret = VirtualAlloc(hint, length, type_flags, access_flag); + if (ret == nullptr) { + s_allocPageErrorCode = GetLastError(); + } + return ret; +} + +void* TrimMappingInternal(void* base, + size_t base_length, + size_t trim_length, + PageAccessibilityConfiguration accessibility, + bool commit, + size_t pre_slack, + size_t post_slack) { + void* ret = base; + if (pre_slack || post_slack) { + // We cannot resize the allocation run. Free it and retry at the aligned + // address within the freed range. + ret = reinterpret_cast(base) + pre_slack; + FreePages(base, base_length); + ret = SystemAllocPages(ret, trim_length, accessibility, PageTag::kChromium, + commit); + } + return ret; +} + +bool TrySetSystemPagesAccessInternal( + void* address, + size_t length, + PageAccessibilityConfiguration accessibility) { + if (accessibility == PageInaccessible) + return VirtualFree(address, length, MEM_DECOMMIT) != 0; + return nullptr != VirtualAlloc(address, length, MEM_COMMIT, + GetAccessFlags(accessibility)); +} + +void SetSystemPagesAccessInternal( + void* address, + size_t length, + PageAccessibilityConfiguration accessibility) { + if (accessibility == PageInaccessible) { + if (!VirtualFree(address, length, MEM_DECOMMIT)) { + // We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash + // report we get the error number. + CHECK_EQ(static_cast(ERROR_SUCCESS), GetLastError()); + } + } else { + if (!VirtualAlloc(address, length, MEM_COMMIT, + GetAccessFlags(accessibility))) { + int32_t error = GetLastError(); + if (error == ERROR_COMMITMENT_LIMIT) + OOM_CRASH(length); + // We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash + // report we get the error number. + CHECK_EQ(ERROR_SUCCESS, error); + } + } +} + +void FreePagesInternal(void* address, size_t length) { + CHECK(VirtualFree(address, 0, MEM_RELEASE)); +} + +void DecommitSystemPagesInternal(void* address, size_t length) { + SetSystemPagesAccess(address, length, PageInaccessible); +} + +bool RecommitSystemPagesInternal(void* address, + size_t length, + PageAccessibilityConfiguration accessibility) { + return TrySetSystemPagesAccess(address, length, accessibility); +} + +void DiscardSystemPagesInternal(void* address, size_t length) { + // On Windows, discarded pages are not returned to the system immediately and + // not guaranteed to be zeroed when returned to the application. + using DiscardVirtualMemoryFunction = + DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size); + static DiscardVirtualMemoryFunction discard_virtual_memory = + reinterpret_cast(-1); + if (discard_virtual_memory == + reinterpret_cast(-1)) + discard_virtual_memory = + reinterpret_cast(GetProcAddress( + GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory")); + // Use DiscardVirtualMemory when available because it releases faster than + // MEM_RESET. + DWORD ret = 1; + if (discard_virtual_memory) { + ret = discard_virtual_memory(address, length); + } + // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on + // failure. + if (ret) { + void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE); + CHECK(ptr); + } +} + +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_alloc.cc b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_alloc.cc new file mode 100644 index 000000000..910891b38 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_alloc.cc @@ -0,0 +1,852 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "third_party/base/allocator/partition_allocator/partition_alloc.h" + +#include + +#include +#include + +#include "third_party/base/allocator/partition_allocator/partition_direct_map_extent.h" +#include "third_party/base/allocator/partition_allocator/partition_oom.h" +#include "third_party/base/allocator/partition_allocator/partition_page.h" +#include "third_party/base/allocator/partition_allocator/spin_lock.h" + +namespace pdfium { +namespace base { + +namespace { + +bool InitializeOnce() { + // We mark the sentinel bucket/page as free to make sure it is skipped by our + // logic to find a new active page. + internal::PartitionBucket::get_sentinel_bucket()->active_pages_head = + internal::PartitionPage::get_sentinel_page(); + return true; +} + +} // namespace + +// Two partition pages are used as guard / metadata page so make sure the super +// page size is bigger. +static_assert(kPartitionPageSize * 4 <= kSuperPageSize, "ok super page size"); +static_assert(!(kSuperPageSize % kPartitionPageSize), "ok super page multiple"); +// Four system pages gives us room to hack out a still-guard-paged piece +// of metadata in the middle of a guard partition page. +static_assert(kSystemPageSize * 4 <= kPartitionPageSize, + "ok partition page size"); +static_assert(!(kPartitionPageSize % kSystemPageSize), + "ok partition page multiple"); +static_assert(sizeof(internal::PartitionPage) <= kPageMetadataSize, + "PartitionPage should not be too big"); +static_assert(sizeof(internal::PartitionBucket) <= kPageMetadataSize, + "PartitionBucket should not be too big"); +static_assert(sizeof(internal::PartitionSuperPageExtentEntry) <= + kPageMetadataSize, + "PartitionSuperPageExtentEntry should not be too big"); +static_assert(kPageMetadataSize * kNumPartitionPagesPerSuperPage <= + kSystemPageSize, + "page metadata fits in hole"); +// Limit to prevent callers accidentally overflowing an int size. +static_assert(kGenericMaxDirectMapped <= + (1UL << 31) + kPageAllocationGranularity, + "maximum direct mapped allocation"); +// Check that some of our zanier calculations worked out as expected. +static_assert(kGenericSmallestBucket == 8, "generic smallest bucket"); +static_assert(kGenericMaxBucketed == 983040, "generic max bucketed"); +static_assert(kMaxSystemPagesPerSlotSpan < (1 << 8), + "System pages per slot span must be less than 128."); + +internal::PartitionRootBase::PartitionRootBase() = default; +internal::PartitionRootBase::~PartitionRootBase() = default; +PartitionRoot::PartitionRoot() = default; +PartitionRoot::~PartitionRoot() = default; +PartitionRootGeneric::PartitionRootGeneric() = default; +PartitionRootGeneric::~PartitionRootGeneric() = default; +PartitionAllocatorGeneric::PartitionAllocatorGeneric() = default; +PartitionAllocatorGeneric::~PartitionAllocatorGeneric() = default; + +subtle::SpinLock* GetLock() { + static subtle::SpinLock* s_initialized_lock = nullptr; + if (!s_initialized_lock) + s_initialized_lock = new subtle::SpinLock(); + return s_initialized_lock; +} + +OomFunction internal::PartitionRootBase::g_oom_handling_function = nullptr; +std::atomic PartitionAllocHooks::hooks_enabled_(false); +subtle::SpinLock PartitionAllocHooks::set_hooks_lock_; +std::atomic + PartitionAllocHooks::allocation_observer_hook_(nullptr); +std::atomic + PartitionAllocHooks::free_observer_hook_(nullptr); +std::atomic + PartitionAllocHooks::allocation_override_hook_(nullptr); +std::atomic + PartitionAllocHooks::free_override_hook_(nullptr); +std::atomic + PartitionAllocHooks::realloc_override_hook_(nullptr); + +void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook, + FreeObserverHook* free_hook) { + subtle::SpinLock::Guard guard(set_hooks_lock_); + + // Chained hooks are not supported. Registering a non-null hook when a + // non-null hook is already registered indicates somebody is trying to + // overwrite a hook. + CHECK((!allocation_observer_hook_ && !free_observer_hook_) || + (!alloc_hook && !free_hook)); + allocation_observer_hook_ = alloc_hook; + free_observer_hook_ = free_hook; + + hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_; +} + +void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook, + FreeOverrideHook* free_hook, + ReallocOverrideHook realloc_hook) { + subtle::SpinLock::Guard guard(set_hooks_lock_); + + CHECK((!allocation_override_hook_ && !free_override_hook_ && + !realloc_override_hook_) || + (!alloc_hook && !free_hook && !realloc_hook)); + allocation_override_hook_ = alloc_hook; + free_override_hook_ = free_hook; + realloc_override_hook_ = realloc_hook; + + hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_; +} + +void PartitionAllocHooks::AllocationObserverHookIfEnabled( + void* address, + size_t size, + const char* type_name) { + if (AllocationObserverHook* hook = + allocation_observer_hook_.load(std::memory_order_relaxed)) { + hook(address, size, type_name); + } +} + +bool PartitionAllocHooks::AllocationOverrideHookIfEnabled( + void** out, + int flags, + size_t size, + const char* type_name) { + if (AllocationOverrideHook* hook = + allocation_override_hook_.load(std::memory_order_relaxed)) { + return hook(out, flags, size, type_name); + } + return false; +} + +void PartitionAllocHooks::FreeObserverHookIfEnabled(void* address) { + if (FreeObserverHook* hook = + free_observer_hook_.load(std::memory_order_relaxed)) { + hook(address); + } +} + +bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) { + if (FreeOverrideHook* hook = + free_override_hook_.load(std::memory_order_relaxed)) { + return hook(address); + } + return false; +} + +void PartitionAllocHooks::ReallocObserverHookIfEnabled(void* old_address, + void* new_address, + size_t size, + const char* type_name) { + // Report a reallocation as a free followed by an allocation. + AllocationObserverHook* allocation_hook = + allocation_observer_hook_.load(std::memory_order_relaxed); + FreeObserverHook* free_hook = + free_observer_hook_.load(std::memory_order_relaxed); + if (allocation_hook && free_hook) { + free_hook(old_address); + allocation_hook(new_address, size, type_name); + } +} +bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out, + void* address) { + if (ReallocOverrideHook* hook = + realloc_override_hook_.load(std::memory_order_relaxed)) { + return hook(out, address); + } + return false; +} + +static void PartitionAllocBaseInit(internal::PartitionRootBase* root) { + DCHECK(!root->initialized); + static bool initialized = InitializeOnce(); + static_cast(initialized); + + // This is a "magic" value so we can test if a root pointer is valid. + root->inverted_self = ~reinterpret_cast(root); + root->initialized = true; +} + +void PartitionAllocGlobalInit(OomFunction on_out_of_memory) { + DCHECK(on_out_of_memory); + internal::PartitionRootBase::g_oom_handling_function = on_out_of_memory; +} + +void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) { + PartitionAllocBaseInit(this); + + num_buckets = bucket_count; + max_allocation = maximum_allocation; + for (size_t i = 0; i < num_buckets; ++i) { + internal::PartitionBucket& bucket = buckets()[i]; + bucket.Init(i == 0 ? kAllocationGranularity : (i << kBucketShift)); + } +} + +void PartitionRootGeneric::Init() { + subtle::SpinLock::Guard guard(lock); + + PartitionAllocBaseInit(this); + + // Precalculate some shift and mask constants used in the hot path. + // Example: malloc(41) == 101001 binary. + // Order is 6 (1 << 6-1) == 32 is highest bit set. + // order_index is the next three MSB == 010 == 2. + // sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01 + // for + // the sub_order_index). + size_t order; + for (order = 0; order <= kBitsPerSizeT; ++order) { + size_t order_index_shift; + if (order < kGenericNumBucketsPerOrderBits + 1) + order_index_shift = 0; + else + order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1); + order_index_shifts[order] = order_index_shift; + size_t sub_order_index_mask; + if (order == kBitsPerSizeT) { + // This avoids invoking undefined behavior for an excessive shift. + sub_order_index_mask = + static_cast(-1) >> (kGenericNumBucketsPerOrderBits + 1); + } else { + sub_order_index_mask = ((static_cast(1) << order) - 1) >> + (kGenericNumBucketsPerOrderBits + 1); + } + order_sub_index_masks[order] = sub_order_index_mask; + } + + // Set up the actual usable buckets first. + // Note that typical values (i.e. min allocation size of 8) will result in + // pseudo buckets (size==9 etc. or more generally, size is not a multiple + // of the smallest allocation granularity). + // We avoid them in the bucket lookup map, but we tolerate them to keep the + // code simpler and the structures more generic. + size_t i, j; + size_t current_size = kGenericSmallestBucket; + size_t current_increment = + kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits; + internal::PartitionBucket* bucket = &buckets[0]; + for (i = 0; i < kGenericNumBucketedOrders; ++i) { + for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { + bucket->Init(current_size); + // Disable psuedo buckets so that touching them faults. + if (current_size % kGenericSmallestBucket) + bucket->active_pages_head = nullptr; + current_size += current_increment; + ++bucket; + } + current_increment <<= 1; + } + DCHECK(current_size == 1 << kGenericMaxBucketedOrder); + DCHECK(bucket == &buckets[0] + kGenericNumBuckets); + + // Then set up the fast size -> bucket lookup table. + bucket = &buckets[0]; + internal::PartitionBucket** bucket_ptr = &bucket_lookups[0]; + for (order = 0; order <= kBitsPerSizeT; ++order) { + for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { + if (order < kGenericMinBucketedOrder) { + // Use the bucket of the finest granularity for malloc(0) etc. + *bucket_ptr++ = &buckets[0]; + } else if (order > kGenericMaxBucketedOrder) { + *bucket_ptr++ = internal::PartitionBucket::get_sentinel_bucket(); + } else { + internal::PartitionBucket* valid_bucket = bucket; + // Skip over invalid buckets. + while (valid_bucket->slot_size % kGenericSmallestBucket) + valid_bucket++; + *bucket_ptr++ = valid_bucket; + bucket++; + } + } + } + DCHECK(bucket == &buckets[0] + kGenericNumBuckets); + DCHECK(bucket_ptr == &bucket_lookups[0] + + ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder)); + // And there's one last bucket lookup that will be hit for e.g. malloc(-1), + // which tries to overflow to a non-existant order. + *bucket_ptr = internal::PartitionBucket::get_sentinel_bucket(); +} + +bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root, + internal::PartitionPage* page, + size_t raw_size) { + DCHECK(page->bucket->is_direct_mapped()); + + raw_size = internal::PartitionCookieSizeAdjustAdd(raw_size); + + // Note that the new size might be a bucketed size; this function is called + // whenever we're reallocating a direct mapped allocation. + size_t new_size = internal::PartitionBucket::get_direct_map_size(raw_size); + if (new_size < kGenericMinDirectMappedDownsize) + return false; + + // bucket->slot_size is the current size of the allocation. + size_t current_size = page->bucket->slot_size; + char* char_ptr = static_cast(internal::PartitionPage::ToPointer(page)); + if (new_size == current_size) { + // No need to move any memory around, but update size and cookie below. + } else if (new_size < current_size) { + size_t map_size = + internal::PartitionDirectMapExtent::FromPage(page)->map_size; + + // Don't reallocate in-place if new size is less than 80 % of the full + // map size, to avoid holding on to too much unused address space. + if ((new_size / kSystemPageSize) * 5 < (map_size / kSystemPageSize) * 4) + return false; + + // Shrink by decommitting unneeded pages and making them inaccessible. + size_t decommit_size = current_size - new_size; + root->DecommitSystemPages(char_ptr + new_size, decommit_size); + SetSystemPagesAccess(char_ptr + new_size, decommit_size, PageInaccessible); + } else if (new_size <= + internal::PartitionDirectMapExtent::FromPage(page)->map_size) { + // Grow within the actually allocated memory. Just need to make the + // pages accessible again. + size_t recommit_size = new_size - current_size; + SetSystemPagesAccess(char_ptr + current_size, recommit_size, PageReadWrite); + root->RecommitSystemPages(char_ptr + current_size, recommit_size); + +#if DCHECK_IS_ON() + memset(char_ptr + current_size, kUninitializedByte, recommit_size); +#endif + } else { + // We can't perform the realloc in-place. + // TODO: support this too when possible. + return false; + } + +#if DCHECK_IS_ON() + // Write a new trailing cookie. + internal::PartitionCookieWriteValue(char_ptr + raw_size - + internal::kCookieSize); +#endif + + page->set_raw_size(raw_size); + DCHECK(page->get_raw_size() == raw_size); + + page->bucket->slot_size = new_size; + return true; +} + +void* PartitionReallocGenericFlags(PartitionRootGeneric* root, + int flags, + void* ptr, + size_t new_size, + const char* type_name) { +#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) + CHECK_MAX_SIZE_OR_RETURN_NULLPTR(new_size, flags); + void* result = realloc(ptr, new_size); + CHECK(result || flags & PartitionAllocReturnNull); + return result; +#else + if (UNLIKELY(!ptr)) + return PartitionAllocGenericFlags(root, flags, new_size, type_name); + if (UNLIKELY(!new_size)) { + root->Free(ptr); + return nullptr; + } + + if (new_size > kGenericMaxDirectMapped) { + if (flags & PartitionAllocReturnNull) + return nullptr; + internal::PartitionExcessiveAllocationSize(new_size); + } + + const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled(); + bool overridden = false; + size_t actual_old_size; + if (UNLIKELY(hooks_enabled)) { + overridden = PartitionAllocHooks::ReallocOverrideHookIfEnabled( + &actual_old_size, ptr); + } + if (LIKELY(!overridden)) { + internal::PartitionPage* page = internal::PartitionPage::FromPointer( + internal::PartitionCookieFreePointerAdjust(ptr)); + bool success = false; + { + subtle::SpinLock::Guard guard{root->lock}; + // TODO(palmer): See if we can afford to make this a CHECK. + DCHECK(root->IsValidPage(page)); + + if (UNLIKELY(page->bucket->is_direct_mapped())) { + // We may be able to perform the realloc in place by changing the + // accessibility of memory pages and, if reducing the size, decommitting + // them. + success = PartitionReallocDirectMappedInPlace(root, page, new_size); + } + } + if (success) { + if (UNLIKELY(hooks_enabled)) { + PartitionAllocHooks::ReallocObserverHookIfEnabled(ptr, ptr, new_size, + type_name); + } + return ptr; + } + + const size_t actual_new_size = root->ActualSize(new_size); + actual_old_size = PartitionAllocGetSize(ptr); + + // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the + // new size is a significant percentage smaller. We could do the same if we + // determine it is a win. + if (actual_new_size == actual_old_size) { + // Trying to allocate a block of size |new_size| would give us a block of + // the same size as the one we've already got, so re-use the allocation + // after updating statistics (and cookies, if present). + page->set_raw_size(internal::PartitionCookieSizeAdjustAdd(new_size)); +#if DCHECK_IS_ON() + // Write a new trailing cookie when it is possible to keep track of + // |new_size| via the raw size pointer. + if (page->get_raw_size_ptr()) + internal::PartitionCookieWriteValue(static_cast(ptr) + new_size); +#endif + return ptr; + } + } + + // This realloc cannot be resized in-place. Sadness. + void* ret = PartitionAllocGenericFlags(root, flags, new_size, type_name); + if (!ret) { + if (flags & PartitionAllocReturnNull) + return nullptr; + internal::PartitionExcessiveAllocationSize(new_size); + } + + size_t copy_size = actual_old_size; + if (new_size < copy_size) + copy_size = new_size; + + memcpy(ret, ptr, copy_size); + root->Free(ptr); + return ret; +#endif +} + +void* PartitionRootGeneric::Realloc(void* ptr, + size_t new_size, + const char* type_name) { + return PartitionReallocGenericFlags(this, 0, ptr, new_size, type_name); +} + +void* PartitionRootGeneric::TryRealloc(void* ptr, + size_t new_size, + const char* type_name) { + return PartitionReallocGenericFlags(this, PartitionAllocReturnNull, ptr, + new_size, type_name); +} + +static size_t PartitionPurgePage(internal::PartitionPage* page, bool discard) { + const internal::PartitionBucket* bucket = page->bucket; + size_t slot_size = bucket->slot_size; + if (slot_size < kSystemPageSize || !page->num_allocated_slots) + return 0; + + size_t bucket_num_slots = bucket->get_slots_per_span(); + size_t discardable_bytes = 0; + + size_t raw_size = page->get_raw_size(); + if (raw_size) { + uint32_t used_bytes = static_cast(RoundUpToSystemPage(raw_size)); + discardable_bytes = bucket->slot_size - used_bytes; + if (discardable_bytes && discard) { + char* ptr = + reinterpret_cast(internal::PartitionPage::ToPointer(page)); + ptr += used_bytes; + DiscardSystemPages(ptr, discardable_bytes); + } + return discardable_bytes; + } + + constexpr size_t kMaxSlotCount = + (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize; + DCHECK(bucket_num_slots <= kMaxSlotCount); + DCHECK(page->num_unprovisioned_slots < bucket_num_slots); + size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots; + char slot_usage[kMaxSlotCount]; +#if !defined(OS_WIN) + // The last freelist entry should not be discarded when using OS_WIN. + // DiscardVirtualMemory makes the contents of discarded memory undefined. + size_t last_slot = static_cast(-1); +#endif + memset(slot_usage, 1, num_slots); + char* ptr = reinterpret_cast(internal::PartitionPage::ToPointer(page)); + // First, walk the freelist for this page and make a bitmap of which slots + // are not in use. + for (internal::PartitionFreelistEntry* entry = page->freelist_head; entry; + /**/) { + size_t slot_index = (reinterpret_cast(entry) - ptr) / slot_size; + DCHECK(slot_index < num_slots); + slot_usage[slot_index] = 0; + entry = internal::EncodedPartitionFreelistEntry::Decode(entry->next); +#if !defined(OS_WIN) + // If we have a slot where the masked freelist entry is 0, we can actually + // discard that freelist entry because touching a discarded page is + // guaranteed to return original content or 0. (Note that this optimization + // won't fire on big-endian machines because the masking function is + // negation.) + if (!internal::PartitionFreelistEntry::Encode(entry)) + last_slot = slot_index; +#endif + } + + // If the slot(s) at the end of the slot span are not in used, we can truncate + // them entirely and rewrite the freelist. + size_t truncated_slots = 0; + while (!slot_usage[num_slots - 1]) { + truncated_slots++; + num_slots--; + DCHECK(num_slots); + } + // First, do the work of calculating the discardable bytes. Don't actually + // discard anything unless the discard flag was passed in. + if (truncated_slots) { + size_t unprovisioned_bytes = 0; + char* begin_ptr = ptr + (num_slots * slot_size); + char* end_ptr = begin_ptr + (slot_size * truncated_slots); + begin_ptr = reinterpret_cast( + RoundUpToSystemPage(reinterpret_cast(begin_ptr))); + // We round the end pointer here up and not down because we're at the end of + // a slot span, so we "own" all the way up the page boundary. + end_ptr = reinterpret_cast( + RoundUpToSystemPage(reinterpret_cast(end_ptr))); + DCHECK(end_ptr <= ptr + bucket->get_bytes_per_span()); + if (begin_ptr < end_ptr) { + unprovisioned_bytes = end_ptr - begin_ptr; + discardable_bytes += unprovisioned_bytes; + } + if (unprovisioned_bytes && discard) { + DCHECK(truncated_slots > 0); + size_t num_new_entries = 0; + page->num_unprovisioned_slots += static_cast(truncated_slots); + + // Rewrite the freelist. + internal::PartitionFreelistEntry* head = nullptr; + internal::PartitionFreelistEntry* back = head; + for (size_t slot_index = 0; slot_index < num_slots; ++slot_index) { + if (slot_usage[slot_index]) + continue; + + auto* entry = reinterpret_cast( + ptr + (slot_size * slot_index)); + if (!head) { + head = entry; + back = entry; + } else { + back->next = internal::PartitionFreelistEntry::Encode(entry); + back = entry; + } + num_new_entries++; +#if !defined(OS_WIN) + last_slot = slot_index; +#endif + } + + page->freelist_head = head; + if (back) + back->next = internal::PartitionFreelistEntry::Encode(nullptr); + + DCHECK(num_new_entries == num_slots - page->num_allocated_slots); + // Discard the memory. + DiscardSystemPages(begin_ptr, unprovisioned_bytes); + } + } + + // Next, walk the slots and for any not in use, consider where the system page + // boundaries occur. We can release any system pages back to the system as + // long as we don't interfere with a freelist pointer or an adjacent slot. + for (size_t i = 0; i < num_slots; ++i) { + if (slot_usage[i]) + continue; + // The first address we can safely discard is just after the freelist + // pointer. There's one quirk: if the freelist pointer is actually NULL, we + // can discard that pointer value too. + char* begin_ptr = ptr + (i * slot_size); + char* end_ptr = begin_ptr + slot_size; +#if !defined(OS_WIN) + if (i != last_slot) + begin_ptr += sizeof(internal::PartitionFreelistEntry); +#else + begin_ptr += sizeof(internal::PartitionFreelistEntry); +#endif + begin_ptr = reinterpret_cast( + RoundUpToSystemPage(reinterpret_cast(begin_ptr))); + end_ptr = reinterpret_cast( + RoundDownToSystemPage(reinterpret_cast(end_ptr))); + if (begin_ptr < end_ptr) { + size_t partial_slot_bytes = end_ptr - begin_ptr; + discardable_bytes += partial_slot_bytes; + if (discard) + DiscardSystemPages(begin_ptr, partial_slot_bytes); + } + } + return discardable_bytes; +} + +static void PartitionPurgeBucket(internal::PartitionBucket* bucket) { + if (bucket->active_pages_head != + internal::PartitionPage::get_sentinel_page()) { + for (internal::PartitionPage* page = bucket->active_pages_head; page; + page = page->next_page) { + DCHECK(page != internal::PartitionPage::get_sentinel_page()); + PartitionPurgePage(page, true); + } + } +} + +void PartitionRoot::PurgeMemory(int flags) { + if (flags & PartitionPurgeDecommitEmptyPages) + DecommitEmptyPages(); + // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages + // here because that flag is only useful for allocations >= system page size. + // We only have allocations that large inside generic partitions at the + // moment. +} + +void PartitionRootGeneric::PurgeMemory(int flags) { + subtle::SpinLock::Guard guard(lock); + if (flags & PartitionPurgeDecommitEmptyPages) + DecommitEmptyPages(); + if (flags & PartitionPurgeDiscardUnusedSystemPages) { + for (size_t i = 0; i < kGenericNumBuckets; ++i) { + internal::PartitionBucket* bucket = &buckets[i]; + if (bucket->slot_size >= kSystemPageSize) + PartitionPurgeBucket(bucket); + } + } +} + +static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out, + internal::PartitionPage* page) { + uint16_t bucket_num_slots = page->bucket->get_slots_per_span(); + + if (page->is_decommitted()) { + ++stats_out->num_decommitted_pages; + return; + } + + stats_out->discardable_bytes += PartitionPurgePage(page, false); + + size_t raw_size = page->get_raw_size(); + if (raw_size) { + stats_out->active_bytes += static_cast(raw_size); + } else { + stats_out->active_bytes += + (page->num_allocated_slots * stats_out->bucket_slot_size); + } + + size_t page_bytes_resident = + RoundUpToSystemPage((bucket_num_slots - page->num_unprovisioned_slots) * + stats_out->bucket_slot_size); + stats_out->resident_bytes += page_bytes_resident; + if (page->is_empty()) { + stats_out->decommittable_bytes += page_bytes_resident; + ++stats_out->num_empty_pages; + } else if (page->is_full()) { + ++stats_out->num_full_pages; + } else { + DCHECK(page->is_active()); + ++stats_out->num_active_pages; + } +} + +static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out, + const internal::PartitionBucket* bucket) { + DCHECK(!bucket->is_direct_mapped()); + stats_out->is_valid = false; + // If the active page list is empty (== + // internal::PartitionPage::get_sentinel_page()), the bucket might still need + // to be reported if it has a list of empty, decommitted or full pages. + if (bucket->active_pages_head == + internal::PartitionPage::get_sentinel_page() && + !bucket->empty_pages_head && !bucket->decommitted_pages_head && + !bucket->num_full_pages) + return; + + memset(stats_out, '\0', sizeof(*stats_out)); + stats_out->is_valid = true; + stats_out->is_direct_map = false; + stats_out->num_full_pages = static_cast(bucket->num_full_pages); + stats_out->bucket_slot_size = bucket->slot_size; + uint16_t bucket_num_slots = bucket->get_slots_per_span(); + size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots; + stats_out->allocated_page_size = bucket->get_bytes_per_span(); + stats_out->active_bytes = bucket->num_full_pages * bucket_useful_storage; + stats_out->resident_bytes = + bucket->num_full_pages * stats_out->allocated_page_size; + + for (internal::PartitionPage* page = bucket->empty_pages_head; page; + page = page->next_page) { + DCHECK(page->is_empty() || page->is_decommitted()); + PartitionDumpPageStats(stats_out, page); + } + for (internal::PartitionPage* page = bucket->decommitted_pages_head; page; + page = page->next_page) { + DCHECK(page->is_decommitted()); + PartitionDumpPageStats(stats_out, page); + } + + if (bucket->active_pages_head != + internal::PartitionPage::get_sentinel_page()) { + for (internal::PartitionPage* page = bucket->active_pages_head; page; + page = page->next_page) { + DCHECK(page != internal::PartitionPage::get_sentinel_page()); + PartitionDumpPageStats(stats_out, page); + } + } +} + +void PartitionRootGeneric::DumpStats(const char* partition_name, + bool is_light_dump, + PartitionStatsDumper* dumper) { + PartitionMemoryStats stats = {0}; + stats.total_mmapped_bytes = + total_size_of_super_pages + total_size_of_direct_mapped_pages; + stats.total_committed_bytes = total_size_of_committed_pages; + + size_t direct_mapped_allocations_total_size = 0; + + static const size_t kMaxReportableDirectMaps = 4096; + + // Allocate on the heap rather than on the stack to avoid stack overflow + // skirmishes (on Windows, in particular). + std::unique_ptr direct_map_lengths = nullptr; + if (!is_light_dump) { + direct_map_lengths = + std::unique_ptr(new uint32_t[kMaxReportableDirectMaps]); + } + + PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets]; + size_t num_direct_mapped_allocations = 0; + { + subtle::SpinLock::Guard guard(lock); + + for (size_t i = 0; i < kGenericNumBuckets; ++i) { + const internal::PartitionBucket* bucket = &buckets[i]; + // Don't report the pseudo buckets that the generic allocator sets up in + // order to preserve a fast size->bucket map (see + // PartitionRootGeneric::Init() for details). + if (!bucket->active_pages_head) + bucket_stats[i].is_valid = false; + else + PartitionDumpBucketStats(&bucket_stats[i], bucket); + if (bucket_stats[i].is_valid) { + stats.total_resident_bytes += bucket_stats[i].resident_bytes; + stats.total_active_bytes += bucket_stats[i].active_bytes; + stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes; + stats.total_discardable_bytes += bucket_stats[i].discardable_bytes; + } + } + + for (internal::PartitionDirectMapExtent* extent = direct_map_list; + extent && num_direct_mapped_allocations < kMaxReportableDirectMaps; + extent = extent->next_extent, ++num_direct_mapped_allocations) { + DCHECK(!extent->next_extent || + extent->next_extent->prev_extent == extent); + size_t slot_size = extent->bucket->slot_size; + direct_mapped_allocations_total_size += slot_size; + if (is_light_dump) + continue; + direct_map_lengths[num_direct_mapped_allocations] = slot_size; + } + } + + if (!is_light_dump) { + // Call |PartitionsDumpBucketStats| after collecting stats because it can + // try to allocate using |PartitionRootGeneric::Alloc()| and it can't + // obtain the lock. + for (size_t i = 0; i < kGenericNumBuckets; ++i) { + if (bucket_stats[i].is_valid) + dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]); + } + + for (size_t i = 0; i < num_direct_mapped_allocations; ++i) { + uint32_t size = direct_map_lengths[i]; + + PartitionBucketMemoryStats mapped_stats = {}; + mapped_stats.is_valid = true; + mapped_stats.is_direct_map = true; + mapped_stats.num_full_pages = 1; + mapped_stats.allocated_page_size = size; + mapped_stats.bucket_slot_size = size; + mapped_stats.active_bytes = size; + mapped_stats.resident_bytes = size; + dumper->PartitionsDumpBucketStats(partition_name, &mapped_stats); + } + } + + stats.total_resident_bytes += direct_mapped_allocations_total_size; + stats.total_active_bytes += direct_mapped_allocations_total_size; + dumper->PartitionDumpTotals(partition_name, &stats); +} + +void PartitionRoot::DumpStats(const char* partition_name, + bool is_light_dump, + PartitionStatsDumper* dumper) { + PartitionMemoryStats stats = {0}; + stats.total_mmapped_bytes = total_size_of_super_pages; + stats.total_committed_bytes = total_size_of_committed_pages; + DCHECK(!total_size_of_direct_mapped_pages); + + static constexpr size_t kMaxReportableBuckets = 4096 / sizeof(void*); + std::unique_ptr memory_stats; + if (!is_light_dump) { + memory_stats = std::unique_ptr( + new PartitionBucketMemoryStats[kMaxReportableBuckets]); + } + + const size_t partition_num_buckets = num_buckets; + DCHECK(partition_num_buckets <= kMaxReportableBuckets); + + for (size_t i = 0; i < partition_num_buckets; ++i) { + PartitionBucketMemoryStats bucket_stats = {0}; + PartitionDumpBucketStats(&bucket_stats, &buckets()[i]); + if (bucket_stats.is_valid) { + stats.total_resident_bytes += bucket_stats.resident_bytes; + stats.total_active_bytes += bucket_stats.active_bytes; + stats.total_decommittable_bytes += bucket_stats.decommittable_bytes; + stats.total_discardable_bytes += bucket_stats.discardable_bytes; + } + if (!is_light_dump) { + if (bucket_stats.is_valid) + memory_stats[i] = bucket_stats; + else + memory_stats[i].is_valid = false; + } + } + if (!is_light_dump) { + // PartitionsDumpBucketStats is called after collecting stats because it + // can use PartitionRoot::Alloc() to allocate and this can affect the + // statistics. + for (size_t i = 0; i < partition_num_buckets; ++i) { + if (memory_stats[i].is_valid) + dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]); + } + } + dumper->PartitionDumpTotals(partition_name, &stats); +} + +} // namespace base +} // namespace pdfium diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_alloc.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_alloc.h new file mode 100644 index 000000000..2dc62b655 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_alloc.h @@ -0,0 +1,532 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_ + +// DESCRIPTION +// PartitionRoot::Alloc() / PartitionRootGeneric::Alloc() and PartitionFree() / +// PartitionRootGeneric::Free() are approximately analagous to malloc() and +// free(). +// +// The main difference is that a PartitionRoot / PartitionRootGeneric object +// must be supplied to these functions, representing a specific "heap partition" +// that will be used to satisfy the allocation. Different partitions are +// guaranteed to exist in separate address spaces, including being separate from +// the main system heap. If the contained objects are all freed, physical memory +// is returned to the system but the address space remains reserved. +// See PartitionAlloc.md for other security properties PartitionAlloc provides. +// +// THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE +// SizeSpecificPartitionAllocator / PartitionAllocatorGeneric classes. To +// minimize the instruction count to the fullest extent possible, the +// PartitionRoot is really just a header adjacent to other data areas provided +// by the allocator class. +// +// The PartitionRoot::Alloc() variant of the API has the following caveats: +// - Allocations and frees against a single partition must be single threaded. +// - Allocations must not exceed a max size, chosen at compile-time via a +// templated parameter to PartitionAllocator. +// - Allocation sizes must be aligned to the system pointer size. +// - Allocations are bucketed exactly according to size. +// +// And for PartitionRootGeneric::Alloc(): +// - Multi-threaded use against a single partition is ok; locking is handled. +// - Allocations of any arbitrary size can be handled (subject to a limit of +// INT_MAX bytes for security reasons). +// - Bucketing is by approximate size, for example an allocation of 4000 bytes +// might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and +// keep worst-case waste to ~10%. +// +// The allocators are designed to be extremely fast, thanks to the following +// properties and design: +// - Just two single (reasonably predicatable) branches in the hot / fast path +// for both allocating and (significantly) freeing. +// - A minimal number of operations in the hot / fast path, with the slow paths +// in separate functions, leading to the possibility of inlining. +// - Each partition page (which is usually multiple physical pages) has a +// metadata structure which allows fast mapping of free() address to an +// underlying bucket. +// - Supports a lock-free API for fast performance in single-threaded cases. +// - The freelist for a given bucket is split across a number of partition +// pages, enabling various simple tricks to try and minimize fragmentation. +// - Fine-grained bucket sizes leading to less waste and better packing. +// +// The following security properties could be investigated in the future: +// - Per-object bucketing (instead of per-size) is mostly available at the API, +// but not used yet. +// - No randomness of freelist entries or bucket position. +// - Better checking for wild pointers in free(). +// - Better freelist masking function to guarantee fault on 32-bit. + +#include +#include + +#include "build/build_config.h" +#include "third_party/base/allocator/partition_allocator/page_allocator.h" +#include "third_party/base/allocator/partition_allocator/partition_alloc_constants.h" +#include "third_party/base/allocator/partition_allocator/partition_bucket.h" +#include "third_party/base/allocator/partition_allocator/partition_cookie.h" +#include "third_party/base/allocator/partition_allocator/partition_page.h" +#include "third_party/base/allocator/partition_allocator/partition_root_base.h" +#include "third_party/base/allocator/partition_allocator/spin_lock.h" +#include "third_party/base/base_export.h" +#include "third_party/base/bits.h" +#include "third_party/base/compiler_specific.h" +#include "third_party/base/logging.h" +#include "third_party/base/stl_util.h" +#include "third_party/base/sys_byteorder.h" + +#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) +#include +#endif + +// We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max +// size as other alloc code. +#define CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags) \ + if (size > kGenericMaxDirectMapped) { \ + if (flags & PartitionAllocReturnNull) { \ + return nullptr; \ + } \ + CHECK(false); \ + } + +namespace pdfium { +namespace base { + +class PartitionStatsDumper; + +enum PartitionPurgeFlags { + // Decommitting the ring list of empty pages is reasonably fast. + PartitionPurgeDecommitEmptyPages = 1 << 0, + // Discarding unused system pages is slower, because it involves walking all + // freelists in all active partition pages of all buckets >= system page + // size. It often frees a similar amount of memory to decommitting the empty + // pages, though. + PartitionPurgeDiscardUnusedSystemPages = 1 << 1, +}; + +// Never instantiate a PartitionRoot directly, instead use PartitionAlloc. +struct BASE_EXPORT PartitionRoot : public internal::PartitionRootBase { + PartitionRoot(); + ~PartitionRoot() override; + // This references the buckets OFF the edge of this struct. All uses of + // PartitionRoot must have the bucket array come right after. + // + // The PartitionAlloc templated class ensures the following is correct. + ALWAYS_INLINE internal::PartitionBucket* buckets() { + return reinterpret_cast(this + 1); + } + ALWAYS_INLINE const internal::PartitionBucket* buckets() const { + return reinterpret_cast(this + 1); + } + + void Init(size_t bucket_count, size_t maximum_allocation); + + ALWAYS_INLINE void* Alloc(size_t size, const char* type_name); + ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name); + + void PurgeMemory(int flags) override; + + void DumpStats(const char* partition_name, + bool is_light_dump, + PartitionStatsDumper* dumper); +}; + +// Never instantiate a PartitionRootGeneric directly, instead use +// PartitionAllocatorGeneric. +struct BASE_EXPORT PartitionRootGeneric : public internal::PartitionRootBase { + PartitionRootGeneric(); + ~PartitionRootGeneric() override; + subtle::SpinLock lock; + // Some pre-computed constants. + size_t order_index_shifts[kBitsPerSizeT + 1] = {}; + size_t order_sub_index_masks[kBitsPerSizeT + 1] = {}; + // The bucket lookup table lets us map a size_t to a bucket quickly. + // The trailing +1 caters for the overflow case for very large allocation + // sizes. It is one flat array instead of a 2D array because in the 2D + // world, we'd need to index array[blah][max+1] which risks undefined + // behavior. + internal::PartitionBucket* + bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + 1] = + {}; + internal::PartitionBucket buckets[kGenericNumBuckets] = {}; + + // Public API. + void Init(); + + ALWAYS_INLINE void* Alloc(size_t size, const char* type_name); + ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name); + ALWAYS_INLINE void Free(void* ptr); + + NOINLINE void* Realloc(void* ptr, size_t new_size, const char* type_name); + // Overload that may return nullptr if reallocation isn't possible. In this + // case, |ptr| remains valid. + NOINLINE void* TryRealloc(void* ptr, size_t new_size, const char* type_name); + + ALWAYS_INLINE size_t ActualSize(size_t size); + + void PurgeMemory(int flags) override; + + void DumpStats(const char* partition_name, + bool is_light_dump, + PartitionStatsDumper* partition_stats_dumper); +}; + +// Struct used to retrieve total memory usage of a partition. Used by +// PartitionStatsDumper implementation. +struct PartitionMemoryStats { + size_t total_mmapped_bytes; // Total bytes mmaped from the system. + size_t total_committed_bytes; // Total size of commmitted pages. + size_t total_resident_bytes; // Total bytes provisioned by the partition. + size_t total_active_bytes; // Total active bytes in the partition. + size_t total_decommittable_bytes; // Total bytes that could be decommitted. + size_t total_discardable_bytes; // Total bytes that could be discarded. +}; + +// Struct used to retrieve memory statistics about a partition bucket. Used by +// PartitionStatsDumper implementation. +struct PartitionBucketMemoryStats { + bool is_valid; // Used to check if the stats is valid. + bool is_direct_map; // True if this is a direct mapping; size will not be + // unique. + uint32_t bucket_slot_size; // The size of the slot in bytes. + uint32_t allocated_page_size; // Total size the partition page allocated from + // the system. + uint32_t active_bytes; // Total active bytes used in the bucket. + uint32_t resident_bytes; // Total bytes provisioned in the bucket. + uint32_t decommittable_bytes; // Total bytes that could be decommitted. + uint32_t discardable_bytes; // Total bytes that could be discarded. + uint32_t num_full_pages; // Number of pages with all slots allocated. + uint32_t num_active_pages; // Number of pages that have at least one + // provisioned slot. + uint32_t num_empty_pages; // Number of pages that are empty + // but not decommitted. + uint32_t num_decommitted_pages; // Number of pages that are empty + // and decommitted. +}; + +// Interface that is passed to PartitionDumpStats and +// PartitionDumpStatsGeneric for using the memory statistics. +class BASE_EXPORT PartitionStatsDumper { + public: + // Called to dump total memory used by partition, once per partition. + virtual void PartitionDumpTotals(const char* partition_name, + const PartitionMemoryStats*) = 0; + + // Called to dump stats about buckets, for each bucket. + virtual void PartitionsDumpBucketStats(const char* partition_name, + const PartitionBucketMemoryStats*) = 0; +}; + +BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory); + +// PartitionAlloc supports setting hooks to observe allocations/frees as they +// occur as well as 'override' hooks that allow overriding those operations. +class BASE_EXPORT PartitionAllocHooks { + public: + // Log allocation and free events. + typedef void AllocationObserverHook(void* address, + size_t size, + const char* type_name); + typedef void FreeObserverHook(void* address); + + // If it returns true, the allocation has been overridden with the pointer in + // *out. + typedef bool AllocationOverrideHook(void** out, + int flags, + size_t size, + const char* type_name); + // If it returns true, then the allocation was overridden and has been freed. + typedef bool FreeOverrideHook(void* address); + // If it returns true, the underlying allocation is overridden and *out holds + // the size of the underlying allocation. + typedef bool ReallocOverrideHook(size_t* out, void* address); + + // To unhook, call Set*Hooks with nullptrs. + static void SetObserverHooks(AllocationObserverHook* alloc_hook, + FreeObserverHook* free_hook); + static void SetOverrideHooks(AllocationOverrideHook* alloc_hook, + FreeOverrideHook* free_hook, + ReallocOverrideHook realloc_hook); + + // Helper method to check whether hooks are enabled. This is an optimization + // so that if a function needs to call observer and override hooks in two + // different places this value can be cached and only loaded once. + static bool AreHooksEnabled() { + return hooks_enabled_.load(std::memory_order_relaxed); + } + + static void AllocationObserverHookIfEnabled(void* address, + size_t size, + const char* type_name); + static bool AllocationOverrideHookIfEnabled(void** out, + int flags, + size_t size, + const char* type_name); + + static void FreeObserverHookIfEnabled(void* address); + static bool FreeOverrideHookIfEnabled(void* address); + + static void ReallocObserverHookIfEnabled(void* old_address, + void* new_address, + size_t size, + const char* type_name); + static bool ReallocOverrideHookIfEnabled(size_t* out, void* address); + + private: + // Single bool that is used to indicate whether observer or allocation hooks + // are set to reduce the numbers of loads required to check whether hooking is + // enabled. + static std::atomic hooks_enabled_; + + // Lock used to synchronize Set*Hooks calls. + static subtle::SpinLock set_hooks_lock_; + + static std::atomic allocation_observer_hook_; + static std::atomic free_observer_hook_; + + static std::atomic allocation_override_hook_; + static std::atomic free_override_hook_; + static std::atomic realloc_override_hook_; +}; + +ALWAYS_INLINE void* PartitionRoot::Alloc(size_t size, const char* type_name) { + return AllocFlags(0, size, type_name); +} + +ALWAYS_INLINE void* PartitionRoot::AllocFlags(int flags, + size_t size, + const char* type_name) { +#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) + CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags); + void* result = malloc(size); + CHECK(result); + return result; +#else + DCHECK(max_allocation == 0 || size <= max_allocation); + void* result; + const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled(); + if (UNLIKELY(hooks_enabled)) { + if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(&result, flags, + size, type_name)) { + PartitionAllocHooks::AllocationObserverHookIfEnabled(result, size, + type_name); + return result; + } + } + size_t requested_size = size; + size = internal::PartitionCookieSizeAdjustAdd(size); + DCHECK(initialized); + size_t index = size >> kBucketShift; + DCHECK(index < num_buckets); + DCHECK(size == index << kBucketShift); + internal::PartitionBucket* bucket = &buckets()[index]; + result = AllocFromBucket(bucket, flags, size); + if (UNLIKELY(hooks_enabled)) { + PartitionAllocHooks::AllocationObserverHookIfEnabled(result, requested_size, + type_name); + } + return result; +#endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) +} + +ALWAYS_INLINE bool PartitionAllocSupportsGetSize() { +#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) + return false; +#else + return true; +#endif +} + +ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) { + // No need to lock here. Only |ptr| being freed by another thread could + // cause trouble, and the caller is responsible for that not happening. + DCHECK(PartitionAllocSupportsGetSize()); + ptr = internal::PartitionCookieFreePointerAdjust(ptr); + internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr); + // TODO(palmer): See if we can afford to make this a CHECK. + DCHECK(internal::PartitionRootBase::IsValidPage(page)); + size_t size = page->bucket->slot_size; + return internal::PartitionCookieSizeAdjustSubtract(size); +} + +ALWAYS_INLINE void PartitionFree(void* ptr) { +#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) + free(ptr); +#else + // TODO(palmer): Check ptr alignment before continuing. Shall we do the check + // inside PartitionCookieFreePointerAdjust? + if (PartitionAllocHooks::AreHooksEnabled()) { + PartitionAllocHooks::FreeObserverHookIfEnabled(ptr); + if (PartitionAllocHooks::FreeOverrideHookIfEnabled(ptr)) + return; + } + + ptr = internal::PartitionCookieFreePointerAdjust(ptr); + internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr); + // TODO(palmer): See if we can afford to make this a CHECK. + DCHECK(internal::PartitionRootBase::IsValidPage(page)); + internal::DeferredUnmap deferred_unmap = page->Free(ptr); + deferred_unmap.Run(); +#endif +} + +ALWAYS_INLINE internal::PartitionBucket* PartitionGenericSizeToBucket( + PartitionRootGeneric* root, + size_t size) { + size_t order = kBitsPerSizeT - bits::CountLeadingZeroBitsSizeT(size); + // The order index is simply the next few bits after the most significant bit. + size_t order_index = (size >> root->order_index_shifts[order]) & + (kGenericNumBucketsPerOrder - 1); + // And if the remaining bits are non-zero we must bump the bucket up. + size_t sub_order_index = size & root->order_sub_index_masks[order]; + internal::PartitionBucket* bucket = + root->bucket_lookups[(order << kGenericNumBucketsPerOrderBits) + + order_index + !!sub_order_index]; + CHECK(bucket); + DCHECK(!bucket->slot_size || bucket->slot_size >= size); + DCHECK(!(bucket->slot_size % kGenericSmallestBucket)); + return bucket; +} + +ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root, + int flags, + size_t size, + const char* type_name) { + DCHECK(flags < PartitionAllocLastFlag << 1); + +#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) + CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags); + const bool zero_fill = flags & PartitionAllocZeroFill; + void* result = zero_fill ? calloc(1, size) : malloc(size); + CHECK(result || flags & PartitionAllocReturnNull); + return result; +#else + DCHECK(root->initialized); + // Only SizeSpecificPartitionAllocator should use max_allocation. + DCHECK(root->max_allocation == 0); + void* result; + const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled(); + if (UNLIKELY(hooks_enabled)) { + if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(&result, flags, + size, type_name)) { + PartitionAllocHooks::AllocationObserverHookIfEnabled(result, size, + type_name); + return result; + } + } + size_t requested_size = size; + size = internal::PartitionCookieSizeAdjustAdd(size); + internal::PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size); + { + subtle::SpinLock::Guard guard(root->lock); + result = root->AllocFromBucket(bucket, flags, size); + } + if (UNLIKELY(hooks_enabled)) { + PartitionAllocHooks::AllocationObserverHookIfEnabled(result, requested_size, + type_name); + } + + return result; +#endif +} + +ALWAYS_INLINE void* PartitionRootGeneric::Alloc(size_t size, + const char* type_name) { + return PartitionAllocGenericFlags(this, 0, size, type_name); +} + +ALWAYS_INLINE void* PartitionRootGeneric::AllocFlags(int flags, + size_t size, + const char* type_name) { + return PartitionAllocGenericFlags(this, flags, size, type_name); +} + +ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) { +#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) + free(ptr); +#else + DCHECK(initialized); + + if (UNLIKELY(!ptr)) + return; + + if (PartitionAllocHooks::AreHooksEnabled()) { + PartitionAllocHooks::FreeObserverHookIfEnabled(ptr); + if (PartitionAllocHooks::FreeOverrideHookIfEnabled(ptr)) + return; + } + + ptr = internal::PartitionCookieFreePointerAdjust(ptr); + internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr); + // TODO(palmer): See if we can afford to make this a CHECK. + DCHECK(IsValidPage(page)); + internal::DeferredUnmap deferred_unmap; + { + subtle::SpinLock::Guard guard(lock); + deferred_unmap = page->Free(ptr); + } + deferred_unmap.Run(); +#endif +} + +BASE_EXPORT void* PartitionReallocGenericFlags(PartitionRootGeneric* root, + int flags, + void* ptr, + size_t new_size, + const char* type_name); + +ALWAYS_INLINE size_t PartitionRootGeneric::ActualSize(size_t size) { +#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) + return size; +#else + DCHECK(initialized); + size = internal::PartitionCookieSizeAdjustAdd(size); + internal::PartitionBucket* bucket = PartitionGenericSizeToBucket(this, size); + if (LIKELY(!bucket->is_direct_mapped())) { + size = bucket->slot_size; + } else if (size > kGenericMaxDirectMapped) { + // Too large to allocate => return the size unchanged. + } else { + size = internal::PartitionBucket::get_direct_map_size(size); + } + return internal::PartitionCookieSizeAdjustSubtract(size); +#endif +} + +template +class SizeSpecificPartitionAllocator { + public: + SizeSpecificPartitionAllocator() { + memset(actual_buckets_, 0, + sizeof(internal::PartitionBucket) * pdfium::size(actual_buckets_)); + } + ~SizeSpecificPartitionAllocator() = default; + static const size_t kMaxAllocation = N - kAllocationGranularity; + static const size_t kNumBuckets = N / kAllocationGranularity; + void init() { partition_root_.Init(kNumBuckets, kMaxAllocation); } + ALWAYS_INLINE PartitionRoot* root() { return &partition_root_; } + + private: + PartitionRoot partition_root_; + internal::PartitionBucket actual_buckets_[kNumBuckets]; +}; + +class BASE_EXPORT PartitionAllocatorGeneric { + public: + PartitionAllocatorGeneric(); + ~PartitionAllocatorGeneric(); + + void init() { partition_root_.Init(); } + ALWAYS_INLINE PartitionRootGeneric* root() { return &partition_root_; } + + private: + PartitionRootGeneric partition_root_; +}; + +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_alloc_constants.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_alloc_constants.h new file mode 100644 index 000000000..da9fb66a6 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_alloc_constants.h @@ -0,0 +1,200 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_ + +#include +#include + +#include "build/build_config.h" +#include "third_party/base/allocator/partition_allocator/page_allocator_constants.h" +#include "third_party/base/logging.h" + +namespace pdfium { +namespace base { + +// Allocation granularity of sizeof(void*) bytes. +static const size_t kAllocationGranularity = sizeof(void *); +static const size_t kAllocationGranularityMask = kAllocationGranularity - 1; +static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2; + +// Underlying partition storage pages (`PartitionPage`s) are a power-of-2 size. +// It is typical for a `PartitionPage` to be based on multiple system pages. +// Most references to "page" refer to `PartitionPage`s. +// +// *Super pages* are the underlying system allocations we make. Super pages +// contain multiple partition pages and include space for a small amount of +// metadata per partition page. +// +// Inside super pages, we store *slot spans*. A slot span is a continguous range +// of one or more `PartitionPage`s that stores allocations of the same size. +// Slot span sizes are adjusted depending on the allocation size, to make sure +// the packing does not lead to unused (wasted) space at the end of the last +// system page of the span. For our current maximum slot span size of 64 KiB and +// other constant values, we pack _all_ `PartitionRootGeneric::Alloc` sizes +// perfectly up against the end of a system page. + +#if defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONGARCH64) +static const size_t kPartitionPageShift = 16; // 64 KiB +#elif defined(ARCH_CPU_PPC64) +static const size_t kPartitionPageShift = 18; // 256 KiB +#elif defined(ARCH_CPU_SW64) +static const size_t kPartitionPageShift = 15; // 32 KiB +#elif defined(OS_APPLE) && defined(ARCH_CPU_ARM64) +static const size_t kPartitionPageShift = 16; // 64 KiB +#else +//default page size changed to 64k +//static const size_t kPartitionPageShift = 18; // 256 KiB +#if defined(SYSTEMPAGESIZE) +static const size_t kPartitionPageShift = log2(SYSTEMPAGESIZE) + 2; +#else +static const size_t kPartitionPageShift = 14; // 16 KiB +#endif +#endif +static const size_t kPartitionPageSize = 1 << kPartitionPageShift; +static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1; +static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask; +// TODO: Should this be 1 if defined(_MIPS_ARCH_LOONGSON)? +static const size_t kMaxPartitionPagesPerSlotSpan = 4; + +// To avoid fragmentation via never-used freelist entries, we hand out partition +// freelist sections gradually, in units of the dominant system page size. What +// we're actually doing is avoiding filling the full `PartitionPage` (16 KiB) +// with freelist pointers right away. Writing freelist pointers will fault and +// dirty a private page, which is very wasteful if we never actually store +// objects there. + +static const size_t kNumSystemPagesPerPartitionPage = + kPartitionPageSize / kSystemPageSize; +static const size_t kMaxSystemPagesPerSlotSpan = + kNumSystemPagesPerPartitionPage * kMaxPartitionPagesPerSlotSpan; + +// We reserve virtual address space in 2 MiB chunks (aligned to 2 MiB as well). +// These chunks are called *super pages*. We do this so that we can store +// metadata in the first few pages of each 2 MiB-aligned section. This makes +// freeing memory very fast. We specifically choose 2 MiB because this virtual +// address block represents a full but single PTE allocation on ARM, ia32 and +// x64. +// +// The layout of the super page is as follows. The sizes below are the same for +// 32- and 64-bit platforms. +// +// +-----------------------+ +// | Guard page (4 KiB) | +// | Metadata page (4 KiB) | +// | Guard pages (8 KiB) | +// | Slot span | +// | Slot span | +// | ... | +// | Slot span | +// | Guard page (4 KiB) | +// +-----------------------+ +// +// Each slot span is a contiguous range of one or more `PartitionPage`s. +// +// The metadata page has the following format. Note that the `PartitionPage` +// that is not at the head of a slot span is "unused". In other words, the +// metadata for the slot span is stored only in the first `PartitionPage` of the +// slot span. Metadata accesses to other `PartitionPage`s are redirected to the +// first `PartitionPage`. +// +// +---------------------------------------------+ +// | SuperPageExtentEntry (32 B) | +// | PartitionPage of slot span 1 (32 B, used) | +// | PartitionPage of slot span 1 (32 B, unused) | +// | PartitionPage of slot span 1 (32 B, unused) | +// | PartitionPage of slot span 2 (32 B, used) | +// | PartitionPage of slot span 3 (32 B, used) | +// | ... | +// | PartitionPage of slot span N (32 B, unused) | +// +---------------------------------------------+ +// +// A direct-mapped page has a similar layout to fake it looking like a super +// page: +// +// +-----------------------+ +// | Guard page (4 KiB) | +// | Metadata page (4 KiB) | +// | Guard pages (8 KiB) | +// | Direct mapped object | +// | Guard page (4 KiB) | +// +-----------------------+ +// +// A direct-mapped page's metadata page has the following layout: +// +// +--------------------------------+ +// | SuperPageExtentEntry (32 B) | +// | PartitionPage (32 B) | +// | PartitionBucket (32 B) | +// | PartitionDirectMapExtent (8 B) | +// +--------------------------------+ + +static const size_t kSuperPageShift = 21; // 2 MiB +static const size_t kSuperPageSize = 1 << kSuperPageShift; +static const size_t kSuperPageOffsetMask = kSuperPageSize - 1; +static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask; +static const size_t kNumPartitionPagesPerSuperPage = + kSuperPageSize / kPartitionPageSize; + +// The following kGeneric* constants apply to the generic variants of the API. +// The "order" of an allocation is closely related to the power-of-1 size of the +// allocation. More precisely, the order is the bit index of the +// most-significant-bit in the allocation size, where the bit numbers starts at +// index 1 for the least-significant-bit. +// +// In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2 +// covers 2->3, order 3 covers 4->7, order 4 covers 8->15. + +static const size_t kGenericMinBucketedOrder = 4; // 8 bytes. +// The largest bucketed order is 1 << (20 - 1), storing [512 KiB, 1 MiB): +static const size_t kGenericMaxBucketedOrder = 20; +static const size_t kGenericNumBucketedOrders = + (kGenericMaxBucketedOrder - kGenericMinBucketedOrder) + 1; +// Eight buckets per order (for the higher orders), e.g. order 8 is 128, 144, +// 160, ..., 240: +static const size_t kGenericNumBucketsPerOrderBits = 3; +static const size_t kGenericNumBucketsPerOrder = + 1 << kGenericNumBucketsPerOrderBits; +static const size_t kGenericNumBuckets = + kGenericNumBucketedOrders * kGenericNumBucketsPerOrder; +static const size_t kGenericSmallestBucket = 1 + << (kGenericMinBucketedOrder - 1); +static const size_t kGenericMaxBucketSpacing = + 1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits); +static const size_t kGenericMaxBucketed = + (1 << (kGenericMaxBucketedOrder - 1)) + + ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing); +// Limit when downsizing a direct mapping using `realloc`: +static const size_t kGenericMinDirectMappedDownsize = kGenericMaxBucketed + 1; +static const size_t kGenericMaxDirectMapped = + (1UL << 31) + kPageAllocationGranularity; // 2 GiB plus 1 more page. +static const size_t kBitsPerSizeT = sizeof(void *) *CHAR_BIT; + +// Constant for the memory reclaim logic. +static const size_t kMaxFreeableSpans = 16; + +// If the total size in bytes of allocated but not committed pages exceeds this +// value (probably it is a "out of virtual address space" crash), a special +// crash stack trace is generated at +// `PartitionOutOfMemoryWithLotsOfUncommitedPages`. This is to distinguish "out +// of virtual address space" from "out of physical memory" in crash reports. +static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1 GiB + +// These byte values match tcmalloc. +static const unsigned char kUninitializedByte = 0xAB; +static const unsigned char kFreedByte = 0xCD; + +// Flags for `PartitionAllocGenericFlags`. +enum PartitionAllocFlags { + PartitionAllocReturnNull = 1 << 0, + PartitionAllocZeroFill = 1 << 1, + + PartitionAllocLastFlag = PartitionAllocZeroFill +}; + +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_bucket.cc b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_bucket.cc new file mode 100644 index 000000000..066f40ca8 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_bucket.cc @@ -0,0 +1,566 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "third_party/base/allocator/partition_allocator/partition_bucket.h" + +#include "build/build_config.h" +#include "third_party/base/allocator/partition_allocator/oom.h" +#include "third_party/base/allocator/partition_allocator/page_allocator.h" +#include "third_party/base/allocator/partition_allocator/partition_alloc_constants.h" +#include "third_party/base/allocator/partition_allocator/partition_direct_map_extent.h" +#include "third_party/base/allocator/partition_allocator/partition_oom.h" +#include "third_party/base/allocator/partition_allocator/partition_page.h" +#include "third_party/base/allocator/partition_allocator/partition_root_base.h" + +namespace pdfium { +namespace base { +namespace internal { + +namespace { + +ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root, + int flags, + size_t raw_size) { + size_t size = PartitionBucket::get_direct_map_size(raw_size); + + // Because we need to fake looking like a super page, we need to allocate + // a bunch of system pages more than "size": + // - The first few system pages are the partition page in which the super + // page metadata is stored. We fault just one system page out of a partition + // page sized clump. + // - We add a trailing guard page on 32-bit (on 64-bit we rely on the + // massive address space plus randomization instead). + size_t map_size = size + kPartitionPageSize; +#if !defined(ARCH_CPU_64_BITS) + map_size += kSystemPageSize; +#endif + // Round up to the allocation granularity. + map_size += kPageAllocationGranularityOffsetMask; + map_size &= kPageAllocationGranularityBaseMask; + + char* ptr = reinterpret_cast(AllocPages(nullptr, map_size, + kSuperPageSize, PageReadWrite, + PageTag::kPartitionAlloc)); + if (UNLIKELY(!ptr)) + return nullptr; + + size_t committed_page_size = size + kSystemPageSize; + root->total_size_of_direct_mapped_pages += committed_page_size; + root->IncreaseCommittedPages(committed_page_size); + + char* slot = ptr + kPartitionPageSize; + SetSystemPagesAccess(ptr + (kSystemPageSize * 2), + kPartitionPageSize - (kSystemPageSize * 2), + PageInaccessible); +#if !defined(ARCH_CPU_64_BITS) + SetSystemPagesAccess(ptr, kSystemPageSize, PageInaccessible); + SetSystemPagesAccess(slot + size, kSystemPageSize, PageInaccessible); +#endif + + PartitionSuperPageExtentEntry* extent = + reinterpret_cast( + PartitionSuperPageToMetadataArea(ptr)); + extent->root = root; + // The new structures are all located inside a fresh system page so they + // will all be zeroed out. These DCHECKs are for documentation. + DCHECK(!extent->super_page_base); + DCHECK(!extent->super_pages_end); + DCHECK(!extent->next); + PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(slot); + PartitionBucket* bucket = reinterpret_cast( + reinterpret_cast(page) + (kPageMetadataSize * 2)); + DCHECK(!page->next_page); + DCHECK(!page->num_allocated_slots); + DCHECK(!page->num_unprovisioned_slots); + DCHECK(!page->page_offset); + DCHECK(!page->empty_cache_index); + page->bucket = bucket; + page->freelist_head = reinterpret_cast(slot); + PartitionFreelistEntry* next_entry = + reinterpret_cast(slot); + next_entry->next = PartitionFreelistEntry::Encode(nullptr); + + DCHECK(!bucket->active_pages_head); + DCHECK(!bucket->empty_pages_head); + DCHECK(!bucket->decommitted_pages_head); + DCHECK(!bucket->num_system_pages_per_slot_span); + DCHECK(!bucket->num_full_pages); + bucket->slot_size = size; + + PartitionDirectMapExtent* map_extent = + PartitionDirectMapExtent::FromPage(page); + map_extent->map_size = map_size - kPartitionPageSize - kSystemPageSize; + map_extent->bucket = bucket; + + // Maintain the doubly-linked list of all direct mappings. + map_extent->next_extent = root->direct_map_list; + if (map_extent->next_extent) + map_extent->next_extent->prev_extent = map_extent; + map_extent->prev_extent = nullptr; + root->direct_map_list = map_extent; + + return page; +} + +} // namespace + +// static +PartitionBucket PartitionBucket::sentinel_bucket_; + +PartitionBucket* PartitionBucket::get_sentinel_bucket() { + return &sentinel_bucket_; +} + +// TODO(ajwong): This seems to interact badly with +// get_pages_per_slot_span() which rounds the value from this up to a +// multiple of kNumSystemPagesPerPartitionPage (aka 4) anyways. +// http://crbug.com/776537 +// +// TODO(ajwong): The waste calculation seems wrong. The PTE usage should cover +// both used and unsed pages. +// http://crbug.com/776537 +uint8_t PartitionBucket::get_system_pages_per_slot_span() { + // This works out reasonably for the current bucket sizes of the generic + // allocator, and the current values of partition page size and constants. + // Specifically, we have enough room to always pack the slots perfectly into + // some number of system pages. The only waste is the waste associated with + // unfaulted pages (i.e. wasted address space). + // TODO: we end up using a lot of system pages for very small sizes. For + // example, we'll use 12 system pages for slot size 24. The slot size is + // so small that the waste would be tiny with just 4, or 1, system pages. + // Later, we can investigate whether there are anti-fragmentation benefits + // to using fewer system pages. + double best_waste_ratio = 1.0f; + uint16_t best_pages = 0; + if (slot_size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) { + // TODO(ajwong): Why is there a DCHECK here for this? + // http://crbug.com/776537 + DCHECK(!(slot_size % kSystemPageSize)); + best_pages = static_cast(slot_size / kSystemPageSize); + // TODO(ajwong): Should this be checking against + // kMaxSystemPagesPerSlotSpan or numeric_limits::max? + // http://crbug.com/776537 + CHECK(best_pages < (1 << 8)); + return static_cast(best_pages); + } + DCHECK(slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize); + for (uint16_t i = kNumSystemPagesPerPartitionPage - 1; + i <= kMaxSystemPagesPerSlotSpan; ++i) { + size_t page_size = kSystemPageSize * i; + size_t num_slots = page_size / slot_size; + size_t waste = page_size - (num_slots * slot_size); + // Leaving a page unfaulted is not free; the page will occupy an empty page + // table entry. Make a simple attempt to account for that. + // + // TODO(ajwong): This looks wrong. PTEs are allocated for all pages + // regardless of whether or not they are wasted. Should it just + // be waste += i * sizeof(void*)? + // http://crbug.com/776537 + size_t num_remainder_pages = i & (kNumSystemPagesPerPartitionPage - 1); + size_t num_unfaulted_pages = + num_remainder_pages + ? (kNumSystemPagesPerPartitionPage - num_remainder_pages) + : 0; + waste += sizeof(void*) * num_unfaulted_pages; + double waste_ratio = + static_cast(waste) / static_cast(page_size); + if (waste_ratio < best_waste_ratio) { + best_waste_ratio = waste_ratio; + best_pages = i; + } + } + DCHECK(best_pages > 0); + CHECK(best_pages <= kMaxSystemPagesPerSlotSpan); + return static_cast(best_pages); +} + +void PartitionBucket::Init(uint32_t new_slot_size) { + slot_size = new_slot_size; + active_pages_head = PartitionPage::get_sentinel_page(); + empty_pages_head = nullptr; + decommitted_pages_head = nullptr; + num_full_pages = 0; + num_system_pages_per_slot_span = get_system_pages_per_slot_span(); +} + +NOINLINE void PartitionBucket::OnFull() { + OOM_CRASH(0); +} + +ALWAYS_INLINE void* PartitionBucket::AllocNewSlotSpan( + PartitionRootBase* root, + int flags, + uint16_t num_partition_pages) { + DCHECK(!(reinterpret_cast(root->next_partition_page) % + kPartitionPageSize)); + DCHECK(!(reinterpret_cast(root->next_partition_page_end) % + kPartitionPageSize)); + DCHECK(num_partition_pages <= kNumPartitionPagesPerSuperPage); + size_t total_size = kPartitionPageSize * num_partition_pages; + size_t num_partition_pages_left = + (root->next_partition_page_end - root->next_partition_page) >> + kPartitionPageShift; + if (LIKELY(num_partition_pages_left >= num_partition_pages)) { + // In this case, we can still hand out pages from the current super page + // allocation. + char* ret = root->next_partition_page; + + // Fresh System Pages in the SuperPages are decommited. Commit them + // before vending them back. + SetSystemPagesAccess(ret, total_size, PageReadWrite); + + root->next_partition_page += total_size; + root->IncreaseCommittedPages(total_size); + return ret; + } + + // Need a new super page. We want to allocate super pages in a continguous + // address region as much as possible. This is important for not causing + // page table bloat and not fragmenting address spaces in 32 bit + // architectures. + char* requested_address = root->next_super_page; + char* super_page = reinterpret_cast( + AllocPages(requested_address, kSuperPageSize, kSuperPageSize, + PageReadWrite, PageTag::kPartitionAlloc)); + if (UNLIKELY(!super_page)) + return nullptr; + + root->total_size_of_super_pages += kSuperPageSize; + root->IncreaseCommittedPages(total_size); + + // |total_size| MUST be less than kSuperPageSize - (kPartitionPageSize*2). + // This is a trustworthy value because num_partition_pages is not user + // controlled. + // + // TODO(ajwong): Introduce a DCHECK. + root->next_super_page = super_page + kSuperPageSize; + char* ret = super_page + kPartitionPageSize; + root->next_partition_page = ret + total_size; + root->next_partition_page_end = root->next_super_page - kPartitionPageSize; + // Make the first partition page in the super page a guard page, but leave a + // hole in the middle. + // This is where we put page metadata and also a tiny amount of extent + // metadata. + SetSystemPagesAccess(super_page, kSystemPageSize, PageInaccessible); + SetSystemPagesAccess(super_page + (kSystemPageSize * 2), + kPartitionPageSize - (kSystemPageSize * 2), + PageInaccessible); + // SetSystemPagesAccess(super_page + (kSuperPageSize - + // kPartitionPageSize), + // kPartitionPageSize, PageInaccessible); + // All remaining slotspans for the unallocated PartitionPages inside the + // SuperPage are conceptually decommitted. Correctly set the state here + // so they do not occupy resources. + // + // TODO(ajwong): Refactor Page Allocator API so the SuperPage comes in + // decommited initially. + SetSystemPagesAccess(super_page + kPartitionPageSize + total_size, + (kSuperPageSize - kPartitionPageSize - total_size), + PageInaccessible); + + // If we were after a specific address, but didn't get it, assume that + // the system chose a lousy address. Here most OS'es have a default + // algorithm that isn't randomized. For example, most Linux + // distributions will allocate the mapping directly before the last + // successful mapping, which is far from random. So we just get fresh + // randomness for the next mapping attempt. + if (requested_address && requested_address != super_page) + root->next_super_page = nullptr; + + // We allocated a new super page so update super page metadata. + // First check if this is a new extent or not. + PartitionSuperPageExtentEntry* latest_extent = + reinterpret_cast( + PartitionSuperPageToMetadataArea(super_page)); + // By storing the root in every extent metadata object, we have a fast way + // to go from a pointer within the partition to the root object. + latest_extent->root = root; + // Most new extents will be part of a larger extent, and these three fields + // are unused, but we initialize them to 0 so that we get a clear signal + // in case they are accidentally used. + latest_extent->super_page_base = nullptr; + latest_extent->super_pages_end = nullptr; + latest_extent->next = nullptr; + + PartitionSuperPageExtentEntry* current_extent = root->current_extent; + bool is_new_extent = (super_page != requested_address); + if (UNLIKELY(is_new_extent)) { + if (UNLIKELY(!current_extent)) { + DCHECK(!root->first_extent); + root->first_extent = latest_extent; + } else { + DCHECK(current_extent->super_page_base); + current_extent->next = latest_extent; + } + root->current_extent = latest_extent; + latest_extent->super_page_base = super_page; + latest_extent->super_pages_end = super_page + kSuperPageSize; + } else { + // We allocated next to an existing extent so just nudge the size up a + // little. + DCHECK(current_extent->super_pages_end); + current_extent->super_pages_end += kSuperPageSize; + DCHECK(ret >= current_extent->super_page_base && + ret < current_extent->super_pages_end); + } + return ret; +} + +ALWAYS_INLINE uint16_t PartitionBucket::get_pages_per_slot_span() { + // Rounds up to nearest multiple of kNumSystemPagesPerPartitionPage. + return (num_system_pages_per_slot_span + + (kNumSystemPagesPerPartitionPage - 1)) / + kNumSystemPagesPerPartitionPage; +} + +ALWAYS_INLINE void PartitionBucket::InitializeSlotSpan(PartitionPage* page) { + // The bucket never changes. We set it up once. + page->bucket = this; + page->empty_cache_index = -1; + + page->Reset(); + + // If this page has just a single slot, do not set up page offsets for any + // page metadata other than the first one. This ensures that attempts to + // touch invalid page metadata fail. + if (page->num_unprovisioned_slots == 1) + return; + + uint16_t num_partition_pages = get_pages_per_slot_span(); + char* page_char_ptr = reinterpret_cast(page); + for (uint16_t i = 1; i < num_partition_pages; ++i) { + page_char_ptr += kPageMetadataSize; + PartitionPage* secondary_page = + reinterpret_cast(page_char_ptr); + secondary_page->page_offset = i; + } +} + +ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) { + DCHECK(page != PartitionPage::get_sentinel_page()); + uint16_t num_slots = page->num_unprovisioned_slots; + DCHECK(num_slots); + // We should only get here when _every_ slot is either used or unprovisioned. + // (The third state is "on the freelist". If we have a non-empty freelist, we + // should not get here.) + DCHECK(num_slots + page->num_allocated_slots == get_slots_per_span()); + // Similarly, make explicitly sure that the freelist is empty. + DCHECK(!page->freelist_head); + DCHECK(page->num_allocated_slots >= 0); + + size_t size = slot_size; + char* base = reinterpret_cast(PartitionPage::ToPointer(page)); + char* return_object = base + (size * page->num_allocated_slots); + char* first_freelist_pointer = return_object + size; + char* first_freelist_pointer_extent = + first_freelist_pointer + sizeof(PartitionFreelistEntry*); + // Our goal is to fault as few system pages as possible. We calculate the + // page containing the "end" of the returned slot, and then allow freelist + // pointers to be written up to the end of that page. + char* sub_page_limit = reinterpret_cast( + RoundUpToSystemPage(reinterpret_cast(first_freelist_pointer))); + char* slots_limit = return_object + (size * num_slots); + char* freelist_limit = sub_page_limit; + if (UNLIKELY(slots_limit < freelist_limit)) + freelist_limit = slots_limit; + + uint16_t num_new_freelist_entries = 0; + if (LIKELY(first_freelist_pointer_extent <= freelist_limit)) { + // Only consider used space in the slot span. If we consider wasted + // space, we may get an off-by-one when a freelist pointer fits in the + // wasted space, but a slot does not. + // We know we can fit at least one freelist pointer. + num_new_freelist_entries = 1; + // Any further entries require space for the whole slot span. + num_new_freelist_entries += static_cast( + (freelist_limit - first_freelist_pointer_extent) / size); + } + + // We always return an object slot -- that's the +1 below. + // We do not neccessarily create any new freelist entries, because we cross + // sub page boundaries frequently for large bucket sizes. + DCHECK(num_new_freelist_entries + 1 <= num_slots); + num_slots -= (num_new_freelist_entries + 1); + page->num_unprovisioned_slots = num_slots; + page->num_allocated_slots++; + + if (LIKELY(num_new_freelist_entries)) { + char* freelist_pointer = first_freelist_pointer; + PartitionFreelistEntry* entry = + reinterpret_cast(freelist_pointer); + page->freelist_head = entry; + while (--num_new_freelist_entries) { + freelist_pointer += size; + PartitionFreelistEntry* next_entry = + reinterpret_cast(freelist_pointer); + entry->next = PartitionFreelistEntry::Encode(next_entry); + entry = next_entry; + } + entry->next = PartitionFreelistEntry::Encode(nullptr); + } else { + page->freelist_head = nullptr; + } + return return_object; +} + +bool PartitionBucket::SetNewActivePage() { + PartitionPage* page = active_pages_head; + if (page == PartitionPage::get_sentinel_page()) + return false; + + PartitionPage* next_page; + + for (; page; page = next_page) { + next_page = page->next_page; + DCHECK(page->bucket == this); + DCHECK(page != empty_pages_head); + DCHECK(page != decommitted_pages_head); + + if (LIKELY(page->is_active())) { + // This page is usable because it has freelist entries, or has + // unprovisioned slots we can create freelist entries from. + active_pages_head = page; + return true; + } + + // Deal with empty and decommitted pages. + if (LIKELY(page->is_empty())) { + page->next_page = empty_pages_head; + empty_pages_head = page; + } else if (LIKELY(page->is_decommitted())) { + page->next_page = decommitted_pages_head; + decommitted_pages_head = page; + } else { + DCHECK(page->is_full()); + // If we get here, we found a full page. Skip over it too, and also + // tag it as full (via a negative value). We need it tagged so that + // free'ing can tell, and move it back into the active page list. + page->num_allocated_slots = -page->num_allocated_slots; + ++num_full_pages; + // num_full_pages is a uint16_t for efficient packing so guard against + // overflow to be safe. + if (UNLIKELY(!num_full_pages)) + OnFull(); + // Not necessary but might help stop accidents. + page->next_page = nullptr; + } + } + + active_pages_head = PartitionPage::get_sentinel_page(); + return false; +} + +void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root, + int flags, + size_t size, + bool* is_already_zeroed) { + // The slow path is called when the freelist is empty. + DCHECK(!active_pages_head->freelist_head); + + PartitionPage* new_page = nullptr; + *is_already_zeroed = false; + + // For the PartitionRootGeneric::Alloc() API, we have a bunch of buckets + // marked as special cases. We bounce them through to the slow path so that + // we can still have a blazing fast hot path due to lack of corner-case + // branches. + // + // Note: The ordering of the conditionals matter! In particular, + // SetNewActivePage() has a side-effect even when returning + // false where it sweeps the active page list and may move things into + // the empty or decommitted lists which affects the subsequent conditional. + bool return_null = flags & PartitionAllocReturnNull; + if (UNLIKELY(is_direct_mapped())) { + DCHECK(size > kGenericMaxBucketed); + DCHECK(this == get_sentinel_bucket()); + DCHECK(active_pages_head == PartitionPage::get_sentinel_page()); + if (size > kGenericMaxDirectMapped) { + if (return_null) + return nullptr; + PartitionExcessiveAllocationSize(size); + } + new_page = PartitionDirectMap(root, flags, size); + *is_already_zeroed = true; + } else if (LIKELY(SetNewActivePage())) { + // First, did we find an active page in the active pages list? + new_page = active_pages_head; + DCHECK(new_page->is_active()); + } else if (LIKELY(empty_pages_head != nullptr) || + LIKELY(decommitted_pages_head != nullptr)) { + // Second, look in our lists of empty and decommitted pages. + // Check empty pages first, which are preferred, but beware that an + // empty page might have been decommitted. + while (LIKELY((new_page = empty_pages_head) != nullptr)) { + DCHECK(new_page->bucket == this); + DCHECK(new_page->is_empty() || new_page->is_decommitted()); + empty_pages_head = new_page->next_page; + // Accept the empty page unless it got decommitted. + if (new_page->freelist_head) { + new_page->next_page = nullptr; + break; + } + DCHECK(new_page->is_decommitted()); + new_page->next_page = decommitted_pages_head; + decommitted_pages_head = new_page; + } + if (UNLIKELY(!new_page) && LIKELY(decommitted_pages_head != nullptr)) { + new_page = decommitted_pages_head; + DCHECK(new_page->bucket == this); + DCHECK(new_page->is_decommitted()); + decommitted_pages_head = new_page->next_page; + void* addr = PartitionPage::ToPointer(new_page); + root->RecommitSystemPages(addr, new_page->bucket->get_bytes_per_span()); + new_page->Reset(); + // TODO(https://crbug.com/890752): Optimizing here might cause pages to + // not be zeroed. + // *is_already_zeroed = true; + } + DCHECK(new_page); + } else { + // Third. If we get here, we need a brand new page. + uint16_t num_partition_pages = get_pages_per_slot_span(); + void* raw_pages = AllocNewSlotSpan(root, flags, num_partition_pages); + if (LIKELY(raw_pages != nullptr)) { + new_page = PartitionPage::FromPointerNoAlignmentCheck(raw_pages); + InitializeSlotSpan(new_page); + // TODO(https://crbug.com/890752): Optimizing here causes pages to not be + // zeroed on at least macOS. + // *is_already_zeroed = true; + } + } + + // Bail if we had a memory allocation failure. + if (UNLIKELY(!new_page)) { + DCHECK(active_pages_head == PartitionPage::get_sentinel_page()); + if (return_null) + return nullptr; + root->OutOfMemory(size); + } + + // TODO(ajwong): Is there a way to avoid the reading of bucket here? + // It seems like in many of the conditional branches above, |this| == + // |new_page->bucket|. Maybe pull this into another function? + PartitionBucket* bucket = new_page->bucket; + DCHECK(bucket != get_sentinel_bucket()); + bucket->active_pages_head = new_page; + new_page->set_raw_size(size); + + // If we found an active page with free slots, or an empty page, we have a + // usable freelist head. + if (LIKELY(new_page->freelist_head != nullptr)) { + PartitionFreelistEntry* entry = new_page->freelist_head; + PartitionFreelistEntry* new_head = + EncodedPartitionFreelistEntry::Decode(entry->next); + new_page->freelist_head = new_head; + new_page->num_allocated_slots++; + return entry; + } + // Otherwise, we need to build the freelist. + DCHECK(new_page->num_unprovisioned_slots); + return AllocAndFillFreelist(new_page); +} + +} // namespace internal +} // namespace base +} // namespace pdfium diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_bucket.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_bucket.h new file mode 100644 index 000000000..a89099b8e --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_bucket.h @@ -0,0 +1,130 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_ + +#include +#include + +#include "third_party/base/allocator/partition_allocator/partition_alloc_constants.h" +#include "third_party/base/base_export.h" +#include "third_party/base/compiler_specific.h" + +namespace pdfium { +namespace base { +namespace internal { + +struct PartitionPage; +struct PartitionRootBase; + +struct PartitionBucket { + // Accessed most in hot path => goes first. + PartitionPage* active_pages_head; + + PartitionPage* empty_pages_head; + PartitionPage* decommitted_pages_head; + uint32_t slot_size; + uint32_t num_system_pages_per_slot_span : 8; + uint32_t num_full_pages : 24; + + // Public API. + void Init(uint32_t new_slot_size); + + // Sets |is_already_zeroed| to true if the allocation was satisfied by + // requesting (a) new page(s) from the operating system, or false otherwise. + // This enables an optimization for when callers use |PartitionAllocZeroFill|: + // there is no need to call memset on fresh pages; the OS has already zeroed + // them. (See |PartitionRootBase::AllocFromBucket|.) + // + // Note the matching Free() functions are in PartitionPage. + BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRootBase* root, + int flags, + size_t size, + bool* is_already_zeroed); + + ALWAYS_INLINE bool is_direct_mapped() const { + return !num_system_pages_per_slot_span; + } + ALWAYS_INLINE size_t get_bytes_per_span() const { + // TODO(ajwong): Change to CheckedMul. https://crbug.com/787153 + // https://crbug.com/680657 + return num_system_pages_per_slot_span * kSystemPageSize; + } + ALWAYS_INLINE uint16_t get_slots_per_span() const { + // TODO(ajwong): Change to CheckedMul. https://crbug.com/787153 + // https://crbug.com/680657 + return static_cast(get_bytes_per_span() / slot_size); + } + + static ALWAYS_INLINE size_t get_direct_map_size(size_t size) { + // Caller must check that the size is not above the kGenericMaxDirectMapped + // limit before calling. This also guards against integer overflow in the + // calculation here. + DCHECK(size <= kGenericMaxDirectMapped); + return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; + } + + // TODO(ajwong): Can this be made private? https://crbug.com/787153 + static PartitionBucket* get_sentinel_bucket(); + + // This helper function scans a bucket's active page list for a suitable new + // active page. When it finds a suitable new active page (one that has + // free slots and is not empty), it is set as the new active page. If there + // is no suitable new active page, the current active page is set to + // PartitionPage::get_sentinel_page(). As potential pages are scanned, they + // are tidied up according to their state. Empty pages are swept on to the + // empty page list, decommitted pages on to the decommitted page list and full + // pages are unlinked from any list. + // + // This is where the guts of the bucket maintenance is done! + bool SetNewActivePage(); + + private: + static void OutOfMemory(const PartitionRootBase* root); + static void OutOfMemoryWithLotsOfUncommitedPages(); + + static NOINLINE void OnFull(); + + // Returns a natural number of PartitionPages (calculated by + // get_system_pages_per_slot_span()) to allocate from the current + // SuperPage when the bucket runs out of slots. + ALWAYS_INLINE uint16_t get_pages_per_slot_span(); + + // Returns the number of system pages in a slot span. + // + // The calculation attemps to find the best number of System Pages to + // allocate for the given slot_size to minimize wasted space. It uses a + // heuristic that looks at number of bytes wasted after the last slot and + // attempts to account for the PTE usage of each System Page. + uint8_t get_system_pages_per_slot_span(); + + // Allocates a new slot span with size |num_partition_pages| from the + // current extent. Metadata within this slot span will be uninitialized. + // Returns nullptr on error. + ALWAYS_INLINE void* AllocNewSlotSpan(PartitionRootBase* root, + int flags, + uint16_t num_partition_pages); + + // Each bucket allocates a slot span when it runs out of slots. + // A slot span's size is equal to get_pages_per_slot_span() number of + // PartitionPages. This function initializes all PartitionPage within the + // span to point to the first PartitionPage which holds all the metadata + // for the span and registers this bucket as the owner of the span. It does + // NOT put the slots into the bucket's freelist. + ALWAYS_INLINE void InitializeSlotSpan(PartitionPage* page); + + // Allocates one slot from the given |page| and then adds the remainder to + // the current bucket. If the |page| was freshly allocated, it must have been + // passed through InitializeSlotSpan() first. + ALWAYS_INLINE char* AllocAndFillFreelist(PartitionPage* page); + + static PartitionBucket sentinel_bucket_; +}; + +} // namespace internal +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_cookie.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_cookie.h new file mode 100644 index 000000000..7cf4e84e0 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_cookie.h @@ -0,0 +1,72 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_ + +#include "third_party/base/compiler_specific.h" +#include "third_party/base/logging.h" + +namespace pdfium { +namespace base { +namespace internal { + +#if DCHECK_IS_ON() +// Handles alignment up to XMM instructions on Intel. +static constexpr size_t kCookieSize = 16; + +static constexpr unsigned char kCookieValue[kCookieSize] = { + 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D, + 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E}; +#endif + +ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) { +#if DCHECK_IS_ON() + unsigned char* cookie_ptr = reinterpret_cast(ptr); + for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) + DCHECK(*cookie_ptr == kCookieValue[i]); +#endif +} + +ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) { +#if DCHECK_IS_ON() + // Add space for cookies, checking for integer overflow. TODO(palmer): + // Investigate the performance and code size implications of using + // CheckedNumeric throughout PA. + DCHECK(size + (2 * kCookieSize) > size); + size += 2 * kCookieSize; +#endif + return size; +} + +ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) { +#if DCHECK_IS_ON() + // The value given to the application is actually just after the cookie. + ptr = static_cast(ptr) - kCookieSize; +#endif + return ptr; +} + +ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) { +#if DCHECK_IS_ON() + // Remove space for cookies. + DCHECK(size >= 2 * kCookieSize); + size -= 2 * kCookieSize; +#endif + return size; +} + +ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) { +#if DCHECK_IS_ON() + unsigned char* cookie_ptr = reinterpret_cast(ptr); + for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) + *cookie_ptr = kCookieValue[i]; +#endif +} + +} // namespace internal +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_direct_map_extent.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_direct_map_extent.h new file mode 100644 index 000000000..192c5b4b3 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_direct_map_extent.h @@ -0,0 +1,35 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_ + +#include "third_party/base/allocator/partition_allocator/partition_bucket.h" +#include "third_party/base/allocator/partition_allocator/partition_page.h" + +namespace pdfium { +namespace base { +namespace internal { + +struct PartitionDirectMapExtent { + PartitionDirectMapExtent* next_extent; + PartitionDirectMapExtent* prev_extent; + PartitionBucket* bucket; + size_t map_size; // Mapped size, not including guard pages and meta-data. + + ALWAYS_INLINE static PartitionDirectMapExtent* FromPage(PartitionPage* page); +}; + +ALWAYS_INLINE PartitionDirectMapExtent* PartitionDirectMapExtent::FromPage( + PartitionPage* page) { + DCHECK(page->bucket->is_direct_mapped()); + return reinterpret_cast( + reinterpret_cast(page) + 3 * kPageMetadataSize); +} + +} // namespace internal +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_freelist_entry.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_freelist_entry.h new file mode 100644 index 000000000..5d46f0f92 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_freelist_entry.h @@ -0,0 +1,73 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_ + +#include + +#include "build/build_config.h" +#include "third_party/base/allocator/partition_allocator/partition_alloc_constants.h" +#include "third_party/base/compiler_specific.h" +#include "third_party/base/sys_byteorder.h" + +namespace pdfium { +namespace base { +namespace internal { + +struct EncodedPartitionFreelistEntry; + +struct PartitionFreelistEntry { + EncodedPartitionFreelistEntry* next; + + PartitionFreelistEntry() = delete; + ~PartitionFreelistEntry() = delete; + + ALWAYS_INLINE static EncodedPartitionFreelistEntry* Encode( + PartitionFreelistEntry* ptr) { + return reinterpret_cast(Transform(ptr)); + } + + private: + friend struct EncodedPartitionFreelistEntry; + static ALWAYS_INLINE void* Transform(void* ptr) { + // We use bswap on little endian as a fast mask for two reasons: + // 1) If an object is freed and its vtable used where the attacker doesn't + // get the chance to run allocations between the free and use, the vtable + // dereference is likely to fault. + // 2) If the attacker has a linear buffer overflow and elects to try and + // corrupt a freelist pointer, partial pointer overwrite attacks are + // thwarted. + // For big endian, similar guarantees are arrived at with a negation. +#if defined(ARCH_CPU_BIG_ENDIAN) + uintptr_t masked = ~reinterpret_cast(ptr); +#else + uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast(ptr)); +#endif + return reinterpret_cast(masked); + } +}; + +struct EncodedPartitionFreelistEntry { + char scrambled[sizeof(PartitionFreelistEntry*)]; + + EncodedPartitionFreelistEntry() = delete; + ~EncodedPartitionFreelistEntry() = delete; + + ALWAYS_INLINE static PartitionFreelistEntry* Decode( + EncodedPartitionFreelistEntry* ptr) { + return reinterpret_cast( + PartitionFreelistEntry::Transform(ptr)); + } +}; + +static_assert(sizeof(PartitionFreelistEntry) == + sizeof(EncodedPartitionFreelistEntry), + "Should not have padding"); + +} // namespace internal +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_oom.cc b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_oom.cc new file mode 100644 index 000000000..f93dae702 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_oom.cc @@ -0,0 +1,26 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "third_party/base/allocator/partition_allocator/partition_oom.h" + +#include "build/build_config.h" +#include "third_party/base/allocator/partition_allocator/oom.h" + +namespace pdfium { +namespace base { +namespace internal { + +void NOINLINE PartitionExcessiveAllocationSize(size_t size) { + OOM_CRASH(size); +} + +#if !defined(ARCH_CPU_64_BITS) +NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages(size_t size) { + OOM_CRASH(size); +} +#endif + +} // namespace internal +} // namespace base +} // namespace pdfium diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_oom.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_oom.h new file mode 100644 index 000000000..683358e08 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_oom.h @@ -0,0 +1,30 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Holds functions for generating OOM errors from PartitionAlloc. This is +// distinct from oom.h in that it is meant only for use in PartitionAlloc. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_ + +#include + +#include "build/build_config.h" +#include "third_party/base/compiler_specific.h" + +namespace pdfium { +namespace base { +namespace internal { + +NOINLINE void PartitionExcessiveAllocationSize(size_t size); + +#if !defined(ARCH_CPU_64_BITS) +NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages(size_t size); +#endif + +} // namespace internal +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_page.cc b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_page.cc new file mode 100644 index 000000000..0ddfe1242 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_page.cc @@ -0,0 +1,168 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "third_party/base/allocator/partition_allocator/partition_page.h" + +#include "third_party/base/allocator/partition_allocator/partition_direct_map_extent.h" +#include "third_party/base/allocator/partition_allocator/partition_root_base.h" + +namespace pdfium { +namespace base { +namespace internal { + +namespace { + +ALWAYS_INLINE DeferredUnmap PartitionDirectUnmap(PartitionPage* page) { + PartitionRootBase* root = PartitionRootBase::FromPage(page); + const PartitionDirectMapExtent* extent = + PartitionDirectMapExtent::FromPage(page); + size_t unmap_size = extent->map_size; + + // Maintain the doubly-linked list of all direct mappings. + if (extent->prev_extent) { + DCHECK(extent->prev_extent->next_extent == extent); + extent->prev_extent->next_extent = extent->next_extent; + } else { + root->direct_map_list = extent->next_extent; + } + if (extent->next_extent) { + DCHECK(extent->next_extent->prev_extent == extent); + extent->next_extent->prev_extent = extent->prev_extent; + } + + // Add on the size of the trailing guard page and preceeding partition + // page. + unmap_size += kPartitionPageSize + kSystemPageSize; + + size_t uncommitted_page_size = page->bucket->slot_size + kSystemPageSize; + root->DecreaseCommittedPages(uncommitted_page_size); + DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size); + root->total_size_of_direct_mapped_pages -= uncommitted_page_size; + + DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask)); + + char* ptr = reinterpret_cast(PartitionPage::ToPointer(page)); + // Account for the mapping starting a partition page before the actual + // allocation address. + ptr -= kPartitionPageSize; + return {ptr, unmap_size}; +} + +ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) { + DCHECK(page->is_empty()); + PartitionRootBase* root = PartitionRootBase::FromPage(page); + + // If the page is already registered as empty, give it another life. + if (page->empty_cache_index != -1) { + DCHECK(page->empty_cache_index >= 0); + DCHECK(static_cast(page->empty_cache_index) < kMaxFreeableSpans); + DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page); + root->global_empty_page_ring[page->empty_cache_index] = nullptr; + } + + int16_t current_index = root->global_empty_page_ring_index; + PartitionPage* page_to_decommit = root->global_empty_page_ring[current_index]; + // The page might well have been re-activated, filled up, etc. before we get + // around to looking at it here. + if (page_to_decommit) + page_to_decommit->DecommitIfPossible(root); + + // We put the empty slot span on our global list of "pages that were once + // empty". thus providing it a bit of breathing room to get re-used before + // we really free it. This improves performance, particularly on Mac OS X + // which has subpar memory management performance. + root->global_empty_page_ring[current_index] = page; + page->empty_cache_index = current_index; + ++current_index; + if (current_index == kMaxFreeableSpans) + current_index = 0; + root->global_empty_page_ring_index = current_index; +} + +} // namespace + +// static +PartitionPage PartitionPage::sentinel_page_; + +PartitionPage* PartitionPage::get_sentinel_page() { + return &sentinel_page_; +} + +DeferredUnmap PartitionPage::FreeSlowPath() { + DCHECK(this != get_sentinel_page()); + if (LIKELY(num_allocated_slots == 0)) { + // Page became fully unused. + if (UNLIKELY(bucket->is_direct_mapped())) { + return PartitionDirectUnmap(this); + } + // If it's the current active page, change it. We bounce the page to + // the empty list as a force towards defragmentation. + if (LIKELY(this == bucket->active_pages_head)) + bucket->SetNewActivePage(); + DCHECK(bucket->active_pages_head != this); + + set_raw_size(0); + DCHECK(!get_raw_size()); + + PartitionRegisterEmptyPage(this); + } else { + DCHECK(!bucket->is_direct_mapped()); + // Ensure that the page is full. That's the only valid case if we + // arrive here. + DCHECK(num_allocated_slots < 0); + // A transition of num_allocated_slots from 0 to -1 is not legal, and + // likely indicates a double-free. + CHECK(num_allocated_slots != -1); + num_allocated_slots = -num_allocated_slots - 2; + DCHECK(num_allocated_slots == bucket->get_slots_per_span() - 1); + // Fully used page became partially used. It must be put back on the + // non-full page list. Also make it the current page to increase the + // chances of it being filled up again. The old current page will be + // the next page. + DCHECK(!next_page); + if (LIKELY(bucket->active_pages_head != get_sentinel_page())) + next_page = bucket->active_pages_head; + bucket->active_pages_head = this; + --bucket->num_full_pages; + // Special case: for a partition page with just a single slot, it may + // now be empty and we want to run it through the empty logic. + if (UNLIKELY(num_allocated_slots == 0)) + return FreeSlowPath(); + } + return {}; +} + +void PartitionPage::Decommit(PartitionRootBase* root) { + DCHECK(is_empty()); + DCHECK(!bucket->is_direct_mapped()); + void* addr = PartitionPage::ToPointer(this); + root->DecommitSystemPages(addr, bucket->get_bytes_per_span()); + + // We actually leave the decommitted page in the active list. We'll sweep + // it on to the decommitted page list when we next walk the active page + // list. + // Pulling this trick enables us to use a singly-linked page list for all + // cases, which is critical in keeping the page metadata structure down to + // 32 bytes in size. + freelist_head = nullptr; + num_unprovisioned_slots = 0; + DCHECK(is_decommitted()); +} + +void PartitionPage::DecommitIfPossible(PartitionRootBase* root) { + DCHECK(empty_cache_index >= 0); + DCHECK(static_cast(empty_cache_index) < kMaxFreeableSpans); + DCHECK(this == root->global_empty_page_ring[empty_cache_index]); + empty_cache_index = -1; + if (is_empty()) + Decommit(root); +} + +void DeferredUnmap::Unmap() { + FreePages(ptr, size); +} + +} // namespace internal +} // namespace base +} // namespace pdfium diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_page.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_page.h new file mode 100644 index 000000000..049ff26ae --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_page.h @@ -0,0 +1,316 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_ + +#include + +#include "third_party/base/allocator/partition_allocator/partition_alloc_constants.h" +#include "third_party/base/allocator/partition_allocator/partition_bucket.h" +#include "third_party/base/allocator/partition_allocator/partition_cookie.h" +#include "third_party/base/allocator/partition_allocator/partition_freelist_entry.h" +#include "third_party/base/allocator/partition_allocator/random.h" + +namespace pdfium { +namespace base { +namespace internal { + +struct PartitionRootBase; + +// PartitionPage::Free() defers unmapping a large page until the lock is +// released. Callers of PartitionPage::Free() must invoke Run(). +// TODO(1061437): Reconsider once the new locking mechanism is implemented. +struct DeferredUnmap { + void* ptr = nullptr; + size_t size = 0; + // In most cases there is no page to unmap and ptr == nullptr. This function + // is inlined to avoid the overhead of a function call in the common case. + ALWAYS_INLINE void Run(); + + private: + BASE_EXPORT NOINLINE void Unmap(); +}; + +// Some notes on page states. A page can be in one of four major states: +// 1) Active. +// 2) Full. +// 3) Empty. +// 4) Decommitted. +// An active page has available free slots. A full page has no free slots. An +// empty page has no free slots, and a decommitted page is an empty page that +// had its backing memory released back to the system. +// There are two linked lists tracking the pages. The "active page" list is an +// approximation of a list of active pages. It is an approximation because +// full, empty and decommitted pages may briefly be present in the list until +// we next do a scan over it. +// The "empty page" list is an accurate list of pages which are either empty +// or decommitted. +// +// The significant page transitions are: +// - free() will detect when a full page has a slot free()'d and immediately +// return the page to the head of the active list. +// - free() will detect when a page is fully emptied. It _may_ add it to the +// empty list or it _may_ leave it on the active list until a future list scan. +// - malloc() _may_ scan the active page list in order to fulfil the request. +// If it does this, full, empty and decommitted pages encountered will be +// booted out of the active list. If there are no suitable active pages found, +// an empty or decommitted page (if one exists) will be pulled from the empty +// list on to the active list. +// +// TODO(ajwong): Evaluate if this should be named PartitionSlotSpanMetadata or +// similar. If so, all uses of the term "page" in comments, member variables, +// local variables, and documentation that refer to this concept should be +// updated. +struct PartitionPage { + PartitionFreelistEntry* freelist_head; + PartitionPage* next_page; + PartitionBucket* bucket; + // Deliberately signed, 0 for empty or decommitted page, -n for full pages: + int16_t num_allocated_slots; + uint16_t num_unprovisioned_slots; + uint16_t page_offset; + int16_t empty_cache_index; // -1 if not in the empty cache. + + // Public API + + // Note the matching Alloc() functions are in PartitionPage. + // Callers must invoke DeferredUnmap::Run() after releasing the lock. + BASE_EXPORT NOINLINE DeferredUnmap FreeSlowPath() WARN_UNUSED_RESULT; + ALWAYS_INLINE DeferredUnmap Free(void* ptr) WARN_UNUSED_RESULT; + + void Decommit(PartitionRootBase* root); + void DecommitIfPossible(PartitionRootBase* root); + + // Pointer manipulation functions. These must be static as the input |page| + // pointer may be the result of an offset calculation and therefore cannot + // be trusted. The objective of these functions is to sanitize this input. + ALWAYS_INLINE static void* ToPointer(const PartitionPage* page); + ALWAYS_INLINE static PartitionPage* FromPointerNoAlignmentCheck(void* ptr); + ALWAYS_INLINE static PartitionPage* FromPointer(void* ptr); + + ALWAYS_INLINE const size_t* get_raw_size_ptr() const; + ALWAYS_INLINE size_t* get_raw_size_ptr() { + return const_cast( + const_cast(this)->get_raw_size_ptr()); + } + + ALWAYS_INLINE size_t get_raw_size() const; + ALWAYS_INLINE void set_raw_size(size_t size); + + ALWAYS_INLINE void Reset(); + + // TODO(ajwong): Can this be made private? https://crbug.com/787153 + BASE_EXPORT static PartitionPage* get_sentinel_page(); + + // Page State accessors. + // Note that it's only valid to call these functions on pages found on one of + // the page lists. Specifically, you can't call these functions on full pages + // that were detached from the active list. + // + // This restriction provides the flexibity for some of the status fields to + // be repurposed when a page is taken off a list. See the negation of + // |num_allocated_slots| when a full page is removed from the active list + // for an example of such repurposing. + ALWAYS_INLINE bool is_active() const; + ALWAYS_INLINE bool is_full() const; + ALWAYS_INLINE bool is_empty() const; + ALWAYS_INLINE bool is_decommitted() const; + + private: + // g_sentinel_page is used as a sentinel to indicate that there is no page + // in the active page list. We can use nullptr, but in that case we need + // to add a null-check branch to the hot allocation path. We want to avoid + // that. + // + // Note, this declaration is kept in the header as opposed to an anonymous + // namespace so the getter can be fully inlined. + static PartitionPage sentinel_page_; +}; +static_assert(sizeof(PartitionPage) <= kPageMetadataSize, + "PartitionPage must be able to fit in a metadata slot"); + +ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) { + uintptr_t pointer_as_uint = reinterpret_cast(ptr); + DCHECK(!(pointer_as_uint & kSuperPageOffsetMask)); + // The metadata area is exactly one system page (the guard page) into the + // super page. + return reinterpret_cast(pointer_as_uint + kSystemPageSize); +} + +ALWAYS_INLINE PartitionPage* PartitionPage::FromPointerNoAlignmentCheck( + void* ptr) { + uintptr_t pointer_as_uint = reinterpret_cast(ptr); + char* super_page_ptr = + reinterpret_cast(pointer_as_uint & kSuperPageBaseMask); + uintptr_t partition_page_index = + (pointer_as_uint & kSuperPageOffsetMask) >> kPartitionPageShift; + // Index 0 is invalid because it is the metadata and guard area and + // the last index is invalid because it is a guard page. + DCHECK(partition_page_index); + DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1); + PartitionPage* page = reinterpret_cast( + PartitionSuperPageToMetadataArea(super_page_ptr) + + (partition_page_index << kPageMetadataShift)); + // Partition pages in the same slot span can share the same page object. + // Adjust for that. + size_t delta = page->page_offset << kPageMetadataShift; + page = + reinterpret_cast(reinterpret_cast(page) - delta); + return page; +} + +// Resturns start of the slot span for the PartitionPage. +ALWAYS_INLINE void* PartitionPage::ToPointer(const PartitionPage* page) { + uintptr_t pointer_as_uint = reinterpret_cast(page); + + uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask); + + // A valid |page| must be past the first guard System page and within + // the following metadata region. + DCHECK(super_page_offset > kSystemPageSize); + // Must be less than total metadata region. + DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * + kPageMetadataSize)); + uintptr_t partition_page_index = + (super_page_offset - kSystemPageSize) >> kPageMetadataShift; + // Index 0 is invalid because it is the superpage extent metadata and the + // last index is invalid because the whole PartitionPage is set as guard + // pages for the metadata region. + DCHECK(partition_page_index); + DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1); + uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask); + void* ret = reinterpret_cast( + super_page_base + (partition_page_index << kPartitionPageShift)); + return ret; +} + +ALWAYS_INLINE PartitionPage* PartitionPage::FromPointer(void* ptr) { + PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(ptr); + // Checks that the pointer is a multiple of bucket size. + DCHECK(!((reinterpret_cast(ptr) - + reinterpret_cast(PartitionPage::ToPointer(page))) % + page->bucket->slot_size)); + return page; +} + +ALWAYS_INLINE const size_t* PartitionPage::get_raw_size_ptr() const { + // For single-slot buckets which span more than one partition page, we + // have some spare metadata space to store the raw allocation size. We + // can use this to report better statistics. + if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) + return nullptr; + + DCHECK((bucket->slot_size % kSystemPageSize) == 0); + DCHECK(bucket->is_direct_mapped() || bucket->get_slots_per_span() == 1); + + const PartitionPage* the_next_page = this + 1; + return reinterpret_cast(&the_next_page->freelist_head); +} + +ALWAYS_INLINE size_t PartitionPage::get_raw_size() const { + const size_t* ptr = get_raw_size_ptr(); + if (UNLIKELY(ptr != nullptr)) + return *ptr; + return 0; +} + +ALWAYS_INLINE DeferredUnmap PartitionPage::Free(void* ptr) { +#if DCHECK_IS_ON() + size_t slot_size = bucket->slot_size; + const size_t raw_size = get_raw_size(); + if (raw_size) { + slot_size = raw_size; + } + + // If these asserts fire, you probably corrupted memory. + PartitionCookieCheckValue(ptr); + PartitionCookieCheckValue(reinterpret_cast(ptr) + slot_size - + kCookieSize); + + memset(ptr, kFreedByte, slot_size); +#endif + + DCHECK(num_allocated_slots); + // Catches an immediate double free. + CHECK(ptr != freelist_head); + // Look for double free one level deeper in debug. + DCHECK(!freelist_head || + ptr != EncodedPartitionFreelistEntry::Decode(freelist_head->next)); + internal::PartitionFreelistEntry* entry = + static_cast(ptr); + entry->next = internal::PartitionFreelistEntry::Encode(freelist_head); + freelist_head = entry; + --num_allocated_slots; + if (UNLIKELY(num_allocated_slots <= 0)) { + return FreeSlowPath(); + } else { + // All single-slot allocations must go through the slow path to + // correctly update the size metadata. + DCHECK(get_raw_size() == 0); + } + return {}; +} + +ALWAYS_INLINE bool PartitionPage::is_active() const { + DCHECK(this != get_sentinel_page()); + DCHECK(!page_offset); + return (num_allocated_slots > 0 && + (freelist_head || num_unprovisioned_slots)); +} + +ALWAYS_INLINE bool PartitionPage::is_full() const { + DCHECK(this != get_sentinel_page()); + DCHECK(!page_offset); + bool ret = (num_allocated_slots == bucket->get_slots_per_span()); + if (ret) { + DCHECK(!freelist_head); + DCHECK(!num_unprovisioned_slots); + } + return ret; +} + +ALWAYS_INLINE bool PartitionPage::is_empty() const { + DCHECK(this != get_sentinel_page()); + DCHECK(!page_offset); + return (!num_allocated_slots && freelist_head); +} + +ALWAYS_INLINE bool PartitionPage::is_decommitted() const { + DCHECK(this != get_sentinel_page()); + DCHECK(!page_offset); + bool ret = (!num_allocated_slots && !freelist_head); + if (ret) { + DCHECK(!num_unprovisioned_slots); + DCHECK(empty_cache_index == -1); + } + return ret; +} + +ALWAYS_INLINE void PartitionPage::set_raw_size(size_t size) { + size_t* raw_size_ptr = get_raw_size_ptr(); + if (UNLIKELY(raw_size_ptr != nullptr)) + *raw_size_ptr = size; +} + +ALWAYS_INLINE void PartitionPage::Reset() { + DCHECK(is_decommitted()); + + num_unprovisioned_slots = bucket->get_slots_per_span(); + DCHECK(num_unprovisioned_slots); + + next_page = nullptr; +} + +ALWAYS_INLINE void DeferredUnmap::Run() { + if (UNLIKELY(ptr)) { + Unmap(); + } +} + +} // namespace internal +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_root_base.cc b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_root_base.cc new file mode 100644 index 000000000..543434718 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_root_base.cc @@ -0,0 +1,42 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "third_party/base/allocator/partition_allocator/partition_root_base.h" + +#include "build/build_config.h" +#include "third_party/base/allocator/partition_allocator/oom.h" +#include "third_party/base/allocator/partition_allocator/partition_oom.h" +#include "third_party/base/allocator/partition_allocator/partition_page.h" + +namespace pdfium { +namespace base { +namespace internal { + +NOINLINE void PartitionRootBase::OutOfMemory(size_t size) { +#if !defined(ARCH_CPU_64_BITS) + // Check whether this OOM is due to a lot of super pages that are allocated + // but not committed, probably due to http://crbug.com/421387. + if (total_size_of_super_pages + total_size_of_direct_mapped_pages - + total_size_of_committed_pages > + kReasonableSizeOfUnusedPages) { + PartitionOutOfMemoryWithLotsOfUncommitedPages(size); + } +#endif + if (PartitionRootBase::g_oom_handling_function) + (*PartitionRootBase::g_oom_handling_function)(size); + OOM_CRASH(size); +} + +void PartitionRootBase::DecommitEmptyPages() { + for (size_t i = 0; i < kMaxFreeableSpans; ++i) { + internal::PartitionPage* page = global_empty_page_ring[i]; + if (page) + page->DecommitIfPossible(this); + global_empty_page_ring[i] = nullptr; + } +} + +} // namespace internal +} // namespace base +} // namespace pdfium diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_root_base.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_root_base.h new file mode 100644 index 000000000..5d692b2ab --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/partition_root_base.h @@ -0,0 +1,201 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_ + +#include "build/build_config.h" +#include "third_party/base/allocator/partition_allocator/page_allocator.h" +#include "third_party/base/allocator/partition_allocator/partition_alloc_constants.h" +#include "third_party/base/allocator/partition_allocator/partition_bucket.h" +#include "third_party/base/allocator/partition_allocator/partition_direct_map_extent.h" +#include "third_party/base/allocator/partition_allocator/partition_page.h" + +namespace pdfium { +namespace base { + +typedef void (*OomFunction)(size_t); + +namespace internal { + +struct PartitionPage; +struct PartitionRootBase; + +// An "extent" is a span of consecutive superpages. We link to the partition's +// next extent (if there is one) to the very start of a superpage's metadata +// area. +struct PartitionSuperPageExtentEntry { + PartitionRootBase* root; + char* super_page_base; + char* super_pages_end; + PartitionSuperPageExtentEntry* next; +}; +static_assert( + sizeof(PartitionSuperPageExtentEntry) <= kPageMetadataSize, + "PartitionSuperPageExtentEntry must be able to fit in a metadata slot"); + +struct BASE_EXPORT PartitionRootBase { + PartitionRootBase(); + virtual ~PartitionRootBase(); + size_t total_size_of_committed_pages = 0; + size_t total_size_of_super_pages = 0; + size_t total_size_of_direct_mapped_pages = 0; + // Invariant: total_size_of_committed_pages <= + // total_size_of_super_pages + + // total_size_of_direct_mapped_pages. + unsigned num_buckets = 0; + unsigned max_allocation = 0; + bool initialized = false; + char* next_super_page = nullptr; + char* next_partition_page = nullptr; + char* next_partition_page_end = nullptr; + PartitionSuperPageExtentEntry* current_extent = nullptr; + PartitionSuperPageExtentEntry* first_extent = nullptr; + PartitionDirectMapExtent* direct_map_list = nullptr; + PartitionPage* global_empty_page_ring[kMaxFreeableSpans] = {}; + int16_t global_empty_page_ring_index = 0; + uintptr_t inverted_self = 0; + + // Public API + + // Allocates out of the given bucket. Properly, this function should probably + // be in PartitionBucket, but because the implementation needs to be inlined + // for performance, and because it needs to inspect PartitionPage, + // it becomes impossible to have it in PartitionBucket as this causes a + // cyclical dependency on PartitionPage function implementations. + // + // Moving it a layer lower couples PartitionRootBase and PartitionBucket, but + // preserves the layering of the includes. + // + // Note the matching Free() functions are in PartitionPage. + ALWAYS_INLINE void* AllocFromBucket(PartitionBucket* bucket, + int flags, + size_t size); + + ALWAYS_INLINE static bool IsValidPage(PartitionPage* page); + ALWAYS_INLINE static PartitionRootBase* FromPage(PartitionPage* page); + + // g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory. + static OomFunction g_oom_handling_function; + NOINLINE void OutOfMemory(size_t size); + + ALWAYS_INLINE void IncreaseCommittedPages(size_t len); + ALWAYS_INLINE void DecreaseCommittedPages(size_t len); + ALWAYS_INLINE void DecommitSystemPages(void* address, size_t length); + ALWAYS_INLINE void RecommitSystemPages(void* address, size_t length); + + // Frees memory from this partition, if possible, by decommitting pages. + // |flags| is an OR of base::PartitionPurgeFlags. + virtual void PurgeMemory(int flags) = 0; + void DecommitEmptyPages(); +}; + +ALWAYS_INLINE void* PartitionRootBase::AllocFromBucket(PartitionBucket* bucket, + int flags, + size_t size) { + bool zero_fill = flags & PartitionAllocZeroFill; + bool is_already_zeroed = false; + + PartitionPage* page = bucket->active_pages_head; + // Check that this page is neither full nor freed. + DCHECK(page->num_allocated_slots >= 0); + void* ret = page->freelist_head; + if (LIKELY(ret != 0)) { + // If these DCHECKs fire, you probably corrupted memory. TODO(palmer): See + // if we can afford to make these CHECKs. + DCHECK(PartitionRootBase::IsValidPage(page)); + + // All large allocations must go through the slow path to correctly update + // the size metadata. + DCHECK(page->get_raw_size() == 0); + internal::PartitionFreelistEntry* new_head = + internal::EncodedPartitionFreelistEntry::Decode( + page->freelist_head->next); + page->freelist_head = new_head; + page->num_allocated_slots++; + } else { + ret = bucket->SlowPathAlloc(this, flags, size, &is_already_zeroed); + // TODO(palmer): See if we can afford to make this a CHECK. + DCHECK(!ret || + PartitionRootBase::IsValidPage(PartitionPage::FromPointer(ret))); + } + +#if DCHECK_IS_ON() + if (!ret) { + return nullptr; + } + + page = PartitionPage::FromPointer(ret); + // TODO(ajwong): Can |page->bucket| ever not be |this|? If not, can this just + // be bucket->slot_size? + size_t new_slot_size = page->bucket->slot_size; + size_t raw_size = page->get_raw_size(); + if (raw_size) { + DCHECK(raw_size == size); + new_slot_size = raw_size; + } + size_t no_cookie_size = PartitionCookieSizeAdjustSubtract(new_slot_size); + char* char_ret = static_cast(ret); + // The value given to the application is actually just after the cookie. + ret = char_ret + kCookieSize; + + // Fill the region kUninitializedByte or 0, and surround it with 2 cookies. + PartitionCookieWriteValue(char_ret); + if (!zero_fill) { + memset(ret, kUninitializedByte, no_cookie_size); + } else if (!is_already_zeroed) { + memset(ret, 0, no_cookie_size); + } + PartitionCookieWriteValue(char_ret + kCookieSize + no_cookie_size); +#else + if (ret && zero_fill && !is_already_zeroed) { + memset(ret, 0, size); + } +#endif + + return ret; +} + +ALWAYS_INLINE bool PartitionRootBase::IsValidPage(PartitionPage* page) { + PartitionRootBase* root = PartitionRootBase::FromPage(page); + return root->inverted_self == ~reinterpret_cast(root); +} + +ALWAYS_INLINE PartitionRootBase* PartitionRootBase::FromPage( + PartitionPage* page) { + PartitionSuperPageExtentEntry* extent_entry = + reinterpret_cast( + reinterpret_cast(page) & kSystemPageBaseMask); + return extent_entry->root; +} + +ALWAYS_INLINE void PartitionRootBase::IncreaseCommittedPages(size_t len) { + total_size_of_committed_pages += len; + DCHECK(total_size_of_committed_pages <= + total_size_of_super_pages + total_size_of_direct_mapped_pages); +} + +ALWAYS_INLINE void PartitionRootBase::DecreaseCommittedPages(size_t len) { + total_size_of_committed_pages -= len; + DCHECK(total_size_of_committed_pages <= + total_size_of_super_pages + total_size_of_direct_mapped_pages); +} + +ALWAYS_INLINE void PartitionRootBase::DecommitSystemPages(void* address, + size_t length) { + ::pdfium::base::DecommitSystemPages(address, length); + DecreaseCommittedPages(length); +} + +ALWAYS_INLINE void PartitionRootBase::RecommitSystemPages(void* address, + size_t length) { + CHECK(::pdfium::base::RecommitSystemPages(address, length, PageReadWrite)); + IncreaseCommittedPages(length); +} + +} // namespace internal +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/random.cc b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/random.cc new file mode 100644 index 000000000..7a13855d3 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/random.cc @@ -0,0 +1,96 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "third_party/base/allocator/partition_allocator/random.h" + +#include "build/build_config.h" +#include "third_party/base/allocator/partition_allocator/spin_lock.h" +#include "third_party/base/no_destructor.h" + +#if defined(OS_WIN) +#include +#else +#include +#include +#endif + +namespace pdfium { +namespace base { + +// This is the same PRNG as used by tcmalloc for mapping address randomness; +// see http://burtleburtle.net/bob/rand/smallprng.html. +struct RandomContext { + subtle::SpinLock lock; + bool initialized; + uint32_t a; + uint32_t b; + uint32_t c; + uint32_t d; +}; + +namespace { + +#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k)))) + +uint32_t RandomValueInternal(RandomContext* x) { + uint32_t e = x->a - rot(x->b, 27); + x->a = x->b ^ rot(x->c, 17); + x->b = x->c + x->d; + x->c = x->d + e; + x->d = e + x->a; + return x->d; +} + +#undef rot + +RandomContext* GetRandomContext() { + static NoDestructor g_random_context; + RandomContext* x = g_random_context.get(); + subtle::SpinLock::Guard guard(x->lock); + if (UNLIKELY(!x->initialized)) { + x->initialized = true; + char c; + uint32_t seed = static_cast(reinterpret_cast(&c)); + uint32_t pid; + uint32_t usec; +#if defined(OS_WIN) + pid = GetCurrentProcessId(); + SYSTEMTIME st; + GetSystemTime(&st); + usec = static_cast(st.wMilliseconds * 1000); +#else + pid = static_cast(getpid()); + struct timeval tv; + gettimeofday(&tv, 0); + usec = static_cast(tv.tv_usec); +#endif + seed ^= pid; + seed ^= usec; + x->a = 0xf1ea5eed; + x->b = x->c = x->d = seed; + for (int i = 0; i < 20; ++i) { + RandomValueInternal(x); + } + } + return x; +} + +} // namespace + +uint32_t RandomValue() { + RandomContext* x = GetRandomContext(); + subtle::SpinLock::Guard guard(x->lock); + return RandomValueInternal(x); +} + +void SetMmapSeedForTesting(int64_t seed) { + RandomContext* x = GetRandomContext(); + subtle::SpinLock::Guard guard(x->lock); + x->a = x->b = static_cast(seed); + x->c = x->d = static_cast(seed >> 32); + x->initialized = true; +} + +} // namespace base +} // namespace pdfium diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/random.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/random.h new file mode 100644 index 000000000..e485d6d4a --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/random.h @@ -0,0 +1,29 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_RANDOM_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_RANDOM_H_ + +#include + +#include "third_party/base/base_export.h" + +namespace pdfium { +namespace base { + +// Returns a random value. The generator's internal state is initialized with +// `base::RandUint64` which is very unpredictable, but which is expensive due to +// the need to call into the kernel. Therefore this generator uses a fast, +// entirely user-space function after initialization. +BASE_EXPORT uint32_t RandomValue(); + +// Sets the seed for the random number generator to a known value, to cause the +// RNG to generate a predictable sequence of outputs. May be called multiple +// times. +BASE_EXPORT void SetMmapSeedForTesting(uint64_t seed); + +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_RANDOM_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/spin_lock.cc b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/spin_lock.cc new file mode 100644 index 000000000..968c58ad9 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/spin_lock.cc @@ -0,0 +1,109 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "third_party/base/allocator/partition_allocator/spin_lock.h" + +#include "build/build_config.h" +#include "third_party/base/logging.h" + +#if defined(OS_WIN) +#include +#elif defined(OS_POSIX) || defined(OS_FUCHSIA) +#include +#endif + +// The YIELD_PROCESSOR macro wraps an architecture specific-instruction that +// informs the processor we're in a busy wait, so it can handle the branch more +// intelligently and e.g. reduce power to our core or give more resources to the +// other hyper-thread on this core. See the following for context: +// https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops +// +// The YIELD_THREAD macro tells the OS to relinquish our quantum. This is +// basically a worst-case fallback, and if you're hitting it with any frequency +// you really should be using a proper lock (such as |base::Lock|)rather than +// these spinlocks. +#if defined(OS_WIN) + +#define YIELD_PROCESSOR YieldProcessor() +#define YIELD_THREAD SwitchToThread() + +#elif defined(OS_POSIX) || defined(OS_FUCHSIA) + +#if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_X86) +#define YIELD_PROCESSOR __asm__ __volatile__("pause") +#elif (defined(ARCH_CPU_ARMEL) && __ARM_ARCH >= 6) || defined(ARCH_CPU_ARM64) +#define YIELD_PROCESSOR __asm__ __volatile__("yield") +#elif defined(ARCH_CPU_MIPSEL) +// The MIPS32 docs state that the PAUSE instruction is a no-op on older +// architectures (first added in MIPS32r2). To avoid assembler errors when +// targeting pre-r2, we must encode the instruction manually. +#define YIELD_PROCESSOR __asm__ __volatile__(".word 0x00000140") +#elif defined(ARCH_CPU_MIPS64EL) && __mips_isa_rev >= 2 +// Don't bother doing using .word here since r2 is the lowest supported mips64 +// that Chromium supports. +#define YIELD_PROCESSOR __asm__ __volatile__("pause") +#elif defined(ARCH_CPU_PPC64_FAMILY) +#define YIELD_PROCESSOR __asm__ __volatile__("or 31,31,31") +#elif defined(ARCH_CPU_S390_FAMILY) +// just do nothing +#define YIELD_PROCESSOR ((void)0) +#elif defined(ARCH_CPU_ALPHA21264_FAMILY) +// just do nothing +#define YIELD_PROCESSOR ((void)0) +#endif // ARCH + +#ifndef YIELD_PROCESSOR +#warning "Processor yield not supported on this architecture." +#define YIELD_PROCESSOR ((void)0) +#endif + +#define YIELD_THREAD sched_yield() + +#else // Other OS + +#warning "Thread yield not supported on this OS." +#define YIELD_THREAD ((void)0) + +#endif // OS_WIN + +namespace pdfium { +namespace base { +namespace subtle { + +void SpinLock::LockSlow() { + // The value of |kYieldProcessorTries| is cargo culted from TCMalloc, Windows + // critical section defaults, and various other recommendations. + // TODO(jschuh): Further tuning may be warranted. + static const int kYieldProcessorTries = 1000; + // The value of |kYieldThreadTries| is completely made up. + static const int kYieldThreadTries = 10; + int yield_thread_count = 0; + do { + do { + for (int count = 0; count < kYieldProcessorTries; ++count) { + // Let the processor know we're spinning. + YIELD_PROCESSOR; + if (!lock_.load(std::memory_order_relaxed) && + LIKELY(!lock_.exchange(true, std::memory_order_acquire))) + return; + } + + if (yield_thread_count < kYieldThreadTries) { + ++yield_thread_count; + // Give the OS a chance to schedule something on this core. + YIELD_THREAD; + } else { + // At this point, it's likely that the lock is held by a lower priority + // thread that is unavailable to finish its work because of higher + // priority threads spinning here. Sleeping should ensure that they make + // progress. + NOTREACHED(); + } + } while (lock_.load(std::memory_order_relaxed)); + } while (UNLIKELY(lock_.exchange(true, std::memory_order_acquire))); +} + +} // namespace subtle +} // namespace base +} // namespace pdfium diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/spin_lock.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/spin_lock.h new file mode 100644 index 000000000..5613fd130 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/allocator/partition_allocator/spin_lock.h @@ -0,0 +1,52 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPIN_LOCK_H_ +#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPIN_LOCK_H_ + +#include +#include +#include + +#include "third_party/base/base_export.h" +#include "third_party/base/compiler_specific.h" + +// Spinlock is a simple spinlock class based on the standard CPU primitive of +// atomic increment and decrement of an int at a given memory address. These are +// intended only for very short duration locks and assume a system with multiple +// cores. For any potentially longer wait you should use a real lock, such as +// |base::Lock|. +namespace pdfium { +namespace base { +namespace subtle { + +class BASE_EXPORT SpinLock { + public: + constexpr SpinLock() = default; + ~SpinLock() = default; + using Guard = std::lock_guard; + + ALWAYS_INLINE void lock() { + static_assert(sizeof(lock_) == sizeof(int), + "int and lock_ are different sizes"); + if (LIKELY(!lock_.exchange(true, std::memory_order_acquire))) + return; + LockSlow(); + } + + ALWAYS_INLINE void unlock() { lock_.store(false, std::memory_order_release); } + + private: + // This is called if the initial attempt to acquire the lock fails. It's + // slower, but has a much better scheduling and power consumption behavior. + void LockSlow(); + + std::atomic_int lock_{0}; +}; + +} // namespace subtle +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPIN_LOCK_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/base_export.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/base_export.h new file mode 100644 index 000000000..a0d6d9bb7 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/base_export.h @@ -0,0 +1,3 @@ +#ifndef BASE_EXPORT +#define BASE_EXPORT +#endif diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/bits.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/bits.h new file mode 100644 index 000000000..125368388 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/bits.h @@ -0,0 +1,187 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file defines some bit utilities. + +#ifndef THIRD_PARTY_BASE_BITS_H_ +#define THIRD_PARTY_BASE_BITS_H_ + +#include +#include + +#include + +#include "third_party/base/compiler_specific.h" +#include "third_party/base/logging.h" + +#if defined(COMPILER_MSVC) +#include +#endif + +namespace pdfium { +namespace base { +namespace bits { + +// Returns true iff |value| is a power of 2. +template ::value>> +constexpr inline bool IsPowerOfTwo(T value) { + // From "Hacker's Delight": Section 2.1 Manipulating Rightmost Bits. + // + // Only positive integers with a single bit set are powers of two. If only one + // bit is set in x (e.g. 0b00000100000000) then |x-1| will have that bit set + // to zero and all bits to its right set to 1 (e.g. 0b00000011111111). Hence + // |x & (x-1)| is 0 iff x is a power of two. + return value > 0 && (value & (value - 1)) == 0; +} + +// Round up |size| to a multiple of alignment, which must be a power of two. +inline size_t Align(size_t size, size_t alignment) { + DCHECK(IsPowerOfTwo(alignment)); + return (size + alignment - 1) & ~(alignment - 1); +} + +// CountLeadingZeroBits(value) returns the number of zero bits following the +// most significant 1 bit in |value| if |value| is non-zero, otherwise it +// returns {sizeof(T) * 8}. +// Example: 00100010 -> 2 +// +// CountTrailingZeroBits(value) returns the number of zero bits preceding the +// least significant 1 bit in |value| if |value| is non-zero, otherwise it +// returns {sizeof(T) * 8}. +// Example: 00100010 -> 1 +// +// C does not have an operator to do this, but fortunately the various +// compilers have built-ins that map to fast underlying processor instructions. +#if defined(COMPILER_MSVC) + +template +ALWAYS_INLINE + typename std::enable_if::value && sizeof(T) <= 4, + unsigned>::type + CountLeadingZeroBits(T x) { + static_assert(bits > 0, "invalid instantiation"); + unsigned long index; + return LIKELY(_BitScanReverse(&index, static_cast(x))) + ? (31 - index - (32 - bits)) + : bits; +} + +template +ALWAYS_INLINE + typename std::enable_if::value && sizeof(T) == 8, + unsigned>::type + CountLeadingZeroBits(T x) { + static_assert(bits > 0, "invalid instantiation"); + unsigned long index; + return LIKELY(_BitScanReverse64(&index, static_cast(x))) + ? (63 - index) + : 64; +} + +template +ALWAYS_INLINE + typename std::enable_if::value && sizeof(T) <= 4, + unsigned>::type + CountTrailingZeroBits(T x) { + static_assert(bits > 0, "invalid instantiation"); + unsigned long index; + return LIKELY(_BitScanForward(&index, static_cast(x))) ? index + : bits; +} + +template +ALWAYS_INLINE + typename std::enable_if::value && sizeof(T) == 8, + unsigned>::type + CountTrailingZeroBits(T x) { + static_assert(bits > 0, "invalid instantiation"); + unsigned long index; + return LIKELY(_BitScanForward64(&index, static_cast(x))) ? index + : 64; +} + +ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) { + return CountLeadingZeroBits(x); +} + +#if defined(ARCH_CPU_64_BITS) + +// MSVC only supplies _BitScanForward64 when building for a 64-bit target. +ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) { + return CountLeadingZeroBits(x); +} + +#endif + +#elif defined(COMPILER_GCC) + +// __builtin_clz has undefined behaviour for an input of 0, even though there's +// clearly a return value that makes sense, and even though some processor clz +// instructions have defined behaviour for 0. We could drop to raw __asm__ to +// do better, but we'll avoid doing that unless we see proof that we need to. +template +ALWAYS_INLINE + typename std::enable_if::value && sizeof(T) <= 8, + unsigned>::type + CountLeadingZeroBits(T value) { + static_assert(bits > 0, "invalid instantiation"); + return LIKELY(value) + ? bits == 64 + ? __builtin_clzll(static_cast(value)) + : __builtin_clz(static_cast(value)) - (32 - bits) + : bits; +} + +template +ALWAYS_INLINE + typename std::enable_if::value && sizeof(T) <= 8, + unsigned>::type + CountTrailingZeroBits(T value) { + return LIKELY(value) ? bits == 64 + ? __builtin_ctzll(static_cast(value)) + : __builtin_ctz(static_cast(value)) + : bits; +} + +ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) { + return CountLeadingZeroBits(x); +} + +#if defined(ARCH_CPU_64_BITS) + +ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) { + return CountLeadingZeroBits(x); +} + +#endif + +#endif + +ALWAYS_INLINE size_t CountLeadingZeroBitsSizeT(size_t x) { + return CountLeadingZeroBits(x); +} + +ALWAYS_INLINE size_t CountTrailingZeroBitsSizeT(size_t x) { + return CountTrailingZeroBits(x); +} + +// Returns the integer i such as 2^i <= n < 2^(i+1) +inline int Log2Floor(uint32_t n) { + return 31 - CountLeadingZeroBits(n); +} + +// Returns the integer i such as 2^(i-1) < n <= 2^i +inline int Log2Ceiling(uint32_t n) { + // When n == 0, we want the function to return -1. + // When n == 0, (n - 1) will underflow to 0xFFFFFFFF, which is + // why the statement below starts with (n ? 32 : -1). + return (n ? 32 : -1) - CountLeadingZeroBits(n - 1); +} + +} // namespace bits +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_BITS_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/compiler_specific.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/compiler_specific.h new file mode 100644 index 000000000..947cbf3df --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/compiler_specific.h @@ -0,0 +1,183 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_COMPILER_SPECIFIC_H_ +#define THIRD_PARTY_BASE_COMPILER_SPECIFIC_H_ + +#include "build/build_config.h" + +// Annotate a variable indicating it's ok if the variable is not used. +// (Typically used to silence a compiler warning when the assignment +// is important for some other reason.) +// Use like: +// int x = ...; +// ALLOW_UNUSED_LOCAL(x); +#define ALLOW_UNUSED_LOCAL(x) (void)x + +// Annotate a typedef or function indicating it's ok if it's not used. +// Use like: +// typedef Foo Bar ALLOW_UNUSED_TYPE; +#if defined(COMPILER_GCC) || defined(__clang__) +#define ALLOW_UNUSED_TYPE __attribute__((unused)) +#else +#define ALLOW_UNUSED_TYPE +#endif + +// Annotate a function indicating it should not be inlined. +// Use like: +// NOINLINE void DoStuff() { ... } +#if defined(COMPILER_GCC) +#define NOINLINE __attribute__((noinline)) +#elif defined(COMPILER_MSVC) +#define NOINLINE __declspec(noinline) +#else +#define NOINLINE +#endif + +#if defined(COMPILER_GCC) && defined(NDEBUG) +#define ALWAYS_INLINE inline __attribute__((__always_inline__)) +#elif defined(COMPILER_MSVC) && defined(NDEBUG) +#define ALWAYS_INLINE __forceinline +#else +#define ALWAYS_INLINE inline +#endif + +// Specify memory alignment for structs, classes, etc. +// Use like: +// class ALIGNAS(16) MyClass { ... } +// ALIGNAS(16) int array[4]; +// +// In most places you can use the C++11 keyword "alignas", which is preferred. +// +// But compilers have trouble mixing __attribute__((...)) syntax with +// alignas(...) syntax. +// +// Doesn't work in clang or gcc: +// struct alignas(16) __attribute__((packed)) S { char c; }; +// Works in clang but not gcc: +// struct __attribute__((packed)) alignas(16) S2 { char c; }; +// Works in clang and gcc: +// struct alignas(16) S3 { char c; } __attribute__((packed)); +// +// There are also some attributes that must be specified *before* a class +// definition: visibility (used for exporting functions/classes) is one of +// these attributes. This means that it is not possible to use alignas() with a +// class that is marked as exported. +#if defined(COMPILER_MSVC) +#define ALIGNAS(byte_alignment) __declspec(align(byte_alignment)) +#elif defined(COMPILER_GCC) +#define ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment))) +#endif + +// Annotate a function indicating the caller must examine the return value. +// Use like: +// int foo() WARN_UNUSED_RESULT; +// To explicitly ignore a result, see |ignore_result()| in base/macros.h. +#undef WARN_UNUSED_RESULT +#if defined(COMPILER_GCC) || defined(__clang__) +#define WARN_UNUSED_RESULT __attribute__((warn_unused_result)) +#else +#define WARN_UNUSED_RESULT +#endif + +// Tell the compiler a function is using a printf-style format string. +// |format_param| is the one-based index of the format string parameter; +// |dots_param| is the one-based index of the "..." parameter. +// For v*printf functions (which take a va_list), pass 0 for dots_param. +// (This is undocumented but matches what the system C headers do.) +#if defined(COMPILER_GCC) || defined(__clang__) +#define PRINTF_FORMAT(format_param, dots_param) \ + __attribute__((format(printf, format_param, dots_param))) +#else +#define PRINTF_FORMAT(format_param, dots_param) +#endif + +// WPRINTF_FORMAT is the same, but for wide format strings. +// This doesn't appear to yet be implemented in any compiler. +// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38308 . +#define WPRINTF_FORMAT(format_param, dots_param) +// If available, it would look like: +// __attribute__((format(wprintf, format_param, dots_param))) + +// Sanitizers annotations. +#if defined(__has_attribute) +#if __has_attribute(no_sanitize) +#define NO_SANITIZE(what) __attribute__((no_sanitize(what))) +#endif +#endif +#if !defined(NO_SANITIZE) +#define NO_SANITIZE(what) +#endif + +// MemorySanitizer annotations. +#if defined(MEMORY_SANITIZER) && !defined(OS_NACL) +#include + +// Mark a memory region fully initialized. +// Use this to annotate code that deliberately reads uninitialized data, for +// example a GC scavenging root set pointers from the stack. +#define MSAN_UNPOISON(p, size) __msan_unpoison(p, size) + +// Check a memory region for initializedness, as if it was being used here. +// If any bits are uninitialized, crash with an MSan report. +// Use this to sanitize data which MSan won't be able to track, e.g. before +// passing data to another process via shared memory. +#define MSAN_CHECK_MEM_IS_INITIALIZED(p, size) \ + __msan_check_mem_is_initialized(p, size) +#else // MEMORY_SANITIZER +#define MSAN_UNPOISON(p, size) +#define MSAN_CHECK_MEM_IS_INITIALIZED(p, size) +#endif // MEMORY_SANITIZER + +// DISABLE_CFI_PERF -- Disable Control Flow Integrity for perf reasons. +#if !defined(DISABLE_CFI_PERF) +#if defined(__clang__) && defined(OFFICIAL_BUILD) +#define DISABLE_CFI_PERF __attribute__((no_sanitize("cfi"))) +#else +#define DISABLE_CFI_PERF +#endif +#endif + +// Macro useful for writing cross-platform function pointers. +#if !defined(CDECL) +#if defined(OS_WIN) +#define CDECL __cdecl +#else // defined(OS_WIN) +#define CDECL +#endif // defined(OS_WIN) +#endif // !defined(CDECL) + +// Macro for hinting that an expression is likely to be false. +#if !defined(UNLIKELY) +#if defined(COMPILER_GCC) || defined(__clang__) +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif // defined(COMPILER_GCC) +#endif // !defined(UNLIKELY) + +#if !defined(LIKELY) +#if defined(COMPILER_GCC) || defined(__clang__) +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif // defined(COMPILER_GCC) +#endif // !defined(LIKELY) + +// Compiler feature-detection. +// clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension +#if defined(__has_feature) +#define HAS_FEATURE(FEATURE) __has_feature(FEATURE) +#else +#define HAS_FEATURE(FEATURE) 0 +#endif + +// Macro for telling -Wimplicit-fallthrough that a fallthrough is intentional. +#if defined(__clang__) +#define FALLTHROUGH [[clang::fallthrough]] +#else +#define FALLTHROUGH +#endif + +#endif // THIRD_PARTY_BASE_COMPILER_SPECIFIC_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/containers/adapters.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/containers/adapters.h new file mode 100644 index 000000000..0f65ea00b --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/containers/adapters.h @@ -0,0 +1,54 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_CONTAINERS_ADAPTERS_H_ +#define THIRD_PARTY_BASE_CONTAINERS_ADAPTERS_H_ + +#include + +#include +#include + +namespace pdfium { +namespace base { + +namespace internal { + +// Internal adapter class for implementing base::Reversed. +template +class ReversedAdapter { + public: + using Iterator = decltype(std::rbegin(std::declval())); + + explicit ReversedAdapter(T& t) : t_(t) {} + ReversedAdapter(const ReversedAdapter& ra) : t_(ra.t_) {} + ReversedAdapter& operator=(const ReversedAdapter&) = delete; + + Iterator begin() const { return std::rbegin(t_); } + Iterator end() const { return std::rend(t_); } + + private: + T& t_; +}; + +} // namespace internal + +// Reversed returns a container adapter usable in a range-based "for" statement +// for iterating a reversible container in reverse order. +// +// Example: +// +// std::vector v = ...; +// for (int i : base::Reversed(v)) { +// // iterates through v from back to front +// } +template +internal::ReversedAdapter Reversed(T& t) { + return internal::ReversedAdapter(t); +} + +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_CONTAINERS_ADAPTERS_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/debug/alias.cc b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/debug/alias.cc new file mode 100644 index 000000000..6ee2ee975 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/debug/alias.cc @@ -0,0 +1,30 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "third_party/base/debug/alias.h" + +#include "build/build_config.h" + +namespace pdfium { +namespace base { +namespace debug { + +#if defined(COMPILER_MSVC) +#pragma optimize("", off) +#elif defined(__clang__) +#pragma clang optimize off +#endif + +void Alias(const void* var) { +} + +#if defined(COMPILER_MSVC) +#pragma optimize("", on) +#elif defined(__clang__) +#pragma clang optimize on +#endif + +} // namespace debug +} // namespace base +} // namespace pdfium diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/debug/alias.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/debug/alias.h new file mode 100644 index 000000000..8228a6fc9 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/debug/alias.h @@ -0,0 +1,34 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_DEBUG_ALIAS_H_ +#define THIRD_PARTY_BASE_DEBUG_ALIAS_H_ + +namespace pdfium { +namespace base { +namespace debug { + +// Make the optimizer think that var is aliased. This is to prevent it from +// optimizing out local variables that would not otherwise be live at the point +// of a potential crash. +// base::debug::Alias should only be used for local variables, not globals, +// object members, or function return values - these must be copied to locals if +// you want to ensure they are recorded in crash dumps. +// Note that if the local variable is a pointer then its value will be retained +// but the memory that it points to will probably not be saved in the crash +// dump - by default only stack memory is saved. Therefore the aliasing +// technique is usually only worthwhile with non-pointer variables. If you have +// a pointer to an object and you want to retain the object's state you need to +// copy the object or its fields to local variables. Example usage: +// int last_error = err_; +// base::debug::Alias(&last_error); +// DEBUG_ALIAS_FOR_CSTR(name_copy, p->name, 16); +// CHECK(false); +void Alias(const void* var); + +} // namespace debug +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_DEBUG_ALIAS_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/immediate_crash.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/immediate_crash.h new file mode 100644 index 000000000..81f18cc0b --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/immediate_crash.h @@ -0,0 +1,168 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_IMMEDIATE_CRASH_H_ +#define THIRD_PARTY_BASE_IMMEDIATE_CRASH_H_ + +#include "build/build_config.h" + +// Crashes in the fastest possible way with no attempt at logging. +// There are several constraints; see http://crbug.com/664209 for more context. +// +// - TRAP_SEQUENCE_() must be fatal. It should not be possible to ignore the +// resulting exception or simply hit 'continue' to skip over it in a debugger. +// - Different instances of TRAP_SEQUENCE_() must not be folded together, to +// ensure crash reports are debuggable. Unlike __builtin_trap(), asm volatile +// blocks will not be folded together. +// Note: TRAP_SEQUENCE_() previously required an instruction with a unique +// nonce since unlike clang, GCC folds together identical asm volatile +// blocks. +// - TRAP_SEQUENCE_() must produce a signal that is distinct from an invalid +// memory access. +// - TRAP_SEQUENCE_() must be treated as a set of noreturn instructions. +// __builtin_unreachable() is used to provide that hint here. clang also uses +// this as a heuristic to pack the instructions in the function epilogue to +// improve code density. +// +// Additional properties that are nice to have: +// - TRAP_SEQUENCE_() should be as compact as possible. +// - The first instruction of TRAP_SEQUENCE_() should not change, to avoid +// shifting crash reporting clusters. As a consequence of this, explicit +// assembly is preferred over intrinsics. +// Note: this last bullet point may no longer be true, and may be removed in +// the future. + +// Note: TRAP_SEQUENCE Is currently split into two macro helpers due to the fact +// that clang emits an actual instruction for __builtin_unreachable() on certain +// platforms (see https://crbug.com/958675). In addition, the int3/bkpt/brk will +// be removed in followups, so splitting it up like this now makes it easy to +// land the followups. + +#if defined(COMPILER_GCC) + +#if defined(OS_NACL) + +// Crash report accuracy is not guaranteed on NaCl. +#define TRAP_SEQUENCE1_() __builtin_trap() +#define TRAP_SEQUENCE2_() asm volatile("") + +#elif defined(ARCH_CPU_X86_FAMILY) + +// TODO(https://crbug.com/958675): In theory, it should be possible to use just +// int3. However, there are a number of crashes with SIGILL as the exception +// code, so it seems likely that there's a signal handler that allows execution +// to continue after SIGTRAP. +#define TRAP_SEQUENCE1_() asm volatile("int3") + +#if defined(OS_APPLE) +// Intentionally empty: __builtin_unreachable() is always part of the sequence +// (see IMMEDIATE_CRASH below) and already emits a ud2 on Mac. +#define TRAP_SEQUENCE2_() asm volatile("") +#else +#define TRAP_SEQUENCE2_() asm volatile("ud2") +#endif // defined(OS_APPLE) + +#elif defined(ARCH_CPU_ARMEL) + +// bkpt will generate a SIGBUS when running on armv7 and a SIGTRAP when running +// as a 32 bit userspace app on arm64. There doesn't seem to be any way to +// cause a SIGTRAP from userspace without using a syscall (which would be a +// problem for sandboxing). +// TODO(https://crbug.com/958675): Remove bkpt from this sequence. +#define TRAP_SEQUENCE1_() asm volatile("bkpt #0") +#define TRAP_SEQUENCE2_() asm volatile("udf #0") + +#elif defined(ARCH_CPU_ARM64) + +// This will always generate a SIGTRAP on arm64. +// TODO(https://crbug.com/958675): Remove brk from this sequence. +#define TRAP_SEQUENCE1_() asm volatile("brk #0") +#define TRAP_SEQUENCE2_() asm volatile("hlt #0") + +#else + +// Crash report accuracy will not be guaranteed on other architectures, but at +// least this will crash as expected. +#define TRAP_SEQUENCE1_() __builtin_trap() +#define TRAP_SEQUENCE2_() asm volatile("") + +#endif // ARCH_CPU_* + +#elif defined(COMPILER_MSVC) + +#if !defined(__clang__) + +// MSVC x64 doesn't support inline asm, so use the MSVC intrinsic. +#define TRAP_SEQUENCE1_() __debugbreak() +#define TRAP_SEQUENCE2_() + +#elif defined(ARCH_CPU_ARM64) + +// Windows ARM64 uses "BRK #F000" as its breakpoint instruction, and +// __debugbreak() generates that in both VC++ and clang. +#define TRAP_SEQUENCE1_() __debugbreak() +// Intentionally empty: __builtin_unreachable() is always part of the sequence +// (see IMMEDIATE_CRASH below) and already emits a ud2 on Win64, +// https://crbug.com/958373 +#define TRAP_SEQUENCE2_() __asm volatile("") + +#else + +#define TRAP_SEQUENCE1_() asm volatile("int3") +#define TRAP_SEQUENCE2_() asm volatile("ud2") + +#endif // __clang__ + +#else + +#error No supported trap sequence! + +#endif // COMPILER_GCC + +#define TRAP_SEQUENCE_() \ + do { \ + TRAP_SEQUENCE1_(); \ + TRAP_SEQUENCE2_(); \ + } while (false) + +// CHECK() and the trap sequence can be invoked from a constexpr function. +// This could make compilation fail on GCC, as it forbids directly using inline +// asm inside a constexpr function. However, it allows calling a lambda +// expression including the same asm. +// The side effect is that the top of the stacktrace will not point to the +// calling function, but to this anonymous lambda. This is still useful as the +// full name of the lambda will typically include the name of the function that +// calls CHECK() and the debugger will still break at the right line of code. +#if !defined(COMPILER_GCC) + +#define WRAPPED_TRAP_SEQUENCE_() TRAP_SEQUENCE_() + +#else + +#define WRAPPED_TRAP_SEQUENCE_() \ + do { \ + [] { TRAP_SEQUENCE_(); }(); \ + } while (false) + +#endif // !defined(COMPILER_GCC) + +#if defined(__clang__) || defined(COMPILER_GCC) + +// __builtin_unreachable() hints to the compiler that this is noreturn and can +// be packed in the function epilogue. +#define IMMEDIATE_CRASH() \ + ({ \ + WRAPPED_TRAP_SEQUENCE_(); \ + __builtin_unreachable(); \ + }) + +#else + +// This is supporting non-chromium user of logging.h to build with MSVC, like +// pdfium. On MSVC there is no __builtin_unreachable(). +#define IMMEDIATE_CRASH() WRAPPED_TRAP_SEQUENCE_() + +#endif // defined(__clang__) || defined(COMPILER_GCC) + +#endif // THIRD_PARTY_BASE_IMMEDIATE_CRASH_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/logging.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/logging.h new file mode 100644 index 000000000..967eb3816 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/logging.h @@ -0,0 +1,42 @@ +// Copyright (c) 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_LOGGING_H_ +#define THIRD_PARTY_BASE_LOGGING_H_ + +#include + +#include "build/build_config.h" +#include "third_party/base/compiler_specific.h" +#include "third_party/base/immediate_crash.h" + +#define CHECK(condition) \ + do { \ + if (UNLIKELY(!(condition))) { \ + IMMEDIATE_CRASH(); \ + } \ + } while (0) + +#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON) +#define DCHECK_IS_ON() 0 +#else +#define DCHECK_IS_ON() 1 +#endif + +// Debug mode: Use assert() for better diagnostics +// Release mode, DCHECK_ALWAYS_ON: Use CHECK() since assert() is a no-op. +// Release mode, no DCHECK_ALWAYS_ON: Use assert(), which is a no-op. +#if defined(NDEBUG) && defined(DCHECK_ALWAYS_ON) +#define DCHECK CHECK +#else +#define DCHECK assert +#endif + +#define CHECK_EQ(x, y) CHECK((x) == (y)) +#define CHECK_NE(x, y) CHECK((x) != (y)) +#define DCHECK_EQ(x, y) DCHECK((x) == (y)) +#define DCHECK_NE(x, y) DCHECK((x) != (y)) +#define NOTREACHED() DCHECK(false) + +#endif // THIRD_PARTY_BASE_LOGGING_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/memory/aligned_memory.cc b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/memory/aligned_memory.cc new file mode 100644 index 000000000..3cacf3f71 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/memory/aligned_memory.cc @@ -0,0 +1,50 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "third_party/base/memory/aligned_memory.h" + +#include "build/build_config.h" +#include "third_party/base/logging.h" + +#if defined(OS_ANDROID) +#include +#endif + +namespace pdfium { +namespace base { + +void* AlignedAlloc(size_t size, size_t alignment) { + DCHECK(size > 0U); + DCHECK(bits::IsPowerOfTwo(alignment)); + DCHECK_EQ(alignment % sizeof(void*), 0U); + void* ptr = nullptr; +#if defined(COMPILER_MSVC) + ptr = _aligned_malloc(size, alignment); +#elif defined(OS_ANDROID) + // Android technically supports posix_memalign(), but does not expose it in + // the current version of the library headers used by Chrome. Luckily, + // memalign() on Android returns pointers which can safely be used with + // free(), so we can use it instead. Issue filed to document this: + // http://code.google.com/p/android/issues/detail?id=35391 + ptr = memalign(alignment, size); +#else + int ret = posix_memalign(&ptr, alignment, size); + if (ret != 0) { + ptr = nullptr; + } +#endif + + // Since aligned allocations may fail for non-memory related reasons, force a + // crash if we encounter a failed allocation; maintaining consistent behavior + // with a normal allocation failure in Chrome. + if (!ptr) { + CHECK(false); + } + // Sanity check alignment just to be safe. + DCHECK(IsAligned(ptr, alignment)); + return ptr; +} + +} // namespace base +} // namespace pdfium diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/memory/aligned_memory.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/memory/aligned_memory.h new file mode 100644 index 000000000..06241a509 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/memory/aligned_memory.h @@ -0,0 +1,84 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_MEMORY_ALIGNED_MEMORY_H_ +#define THIRD_PARTY_BASE_MEMORY_ALIGNED_MEMORY_H_ + +#include +#include + +#include + +#include "build/build_config.h" +#include "third_party/base/base_export.h" +#include "third_party/base/bits.h" + +#if defined(COMPILER_MSVC) +#include +#else +#include +#endif + +// A runtime sized aligned allocation can be created: +// +// float* my_array = static_cast(AlignedAlloc(size, alignment)); +// +// // ... later, to release the memory: +// AlignedFree(my_array); +// +// Or using unique_ptr: +// +// std::unique_ptr my_array( +// static_cast(AlignedAlloc(size, alignment))); + +namespace pdfium { +namespace base { + +// This can be replaced with std::aligned_alloc when we have C++17. +// Caveat: std::aligned_alloc requires the size parameter be an integral +// multiple of alignment. +BASE_EXPORT void* AlignedAlloc(size_t size, size_t alignment); + +inline void AlignedFree(void* ptr) { +#if defined(COMPILER_MSVC) + _aligned_free(ptr); +#else + free(ptr); +#endif +} + +// Deleter for use with unique_ptr. E.g., use as +// std::unique_ptr foo; +struct AlignedFreeDeleter { + inline void operator()(void* ptr) const { + AlignedFree(ptr); + } +}; + +#ifdef __has_builtin +#define SUPPORTS_BUILTIN_IS_ALIGNED (__has_builtin(__builtin_is_aligned)) +#else +#define SUPPORTS_BUILTIN_IS_ALIGNED 0 +#endif + +inline bool IsAligned(uintptr_t val, size_t alignment) { + // If the compiler supports builtin alignment checks prefer them. +#if SUPPORTS_BUILTIN_IS_ALIGNED + return __builtin_is_aligned(val, alignment); +#else + DCHECK(bits::IsPowerOfTwo(alignment)); + return (val & (alignment - 1)) == 0; +#endif +} + +#undef SUPPORTS_BUILTIN_IS_ALIGNED + +inline bool IsAligned(void* val, size_t alignment) { + return IsAligned(reinterpret_cast(val), alignment); +} + +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_MEMORY_ALIGNED_MEMORY_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/no_destructor.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/no_destructor.h new file mode 100644 index 000000000..d9ff32e64 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/no_destructor.h @@ -0,0 +1,100 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_NO_DESTRUCTOR_H_ +#define THIRD_PARTY_BASE_NO_DESTRUCTOR_H_ + +#include +#include + +namespace pdfium { +namespace base { + +// A wrapper that makes it easy to create an object of type T with static +// storage duration that: +// - is only constructed on first access +// - never invokes the destructor +// in order to satisfy the styleguide ban on global constructors and +// destructors. +// +// Runtime constant example: +// const std::string& GetLineSeparator() { +// // Forwards to std::string(size_t, char, const Allocator&) constructor. +// static const base::NoDestructor s(5, '-'); +// return *s; +// } +// +// More complex initialization with a lambda: +// const std::string& GetSessionNonce() { +// static const base::NoDestructor nonce([] { +// std::string s(16); +// crypto::RandString(s.data(), s.size()); +// return s; +// }()); +// return *nonce; +// } +// +// NoDestructor stores the object inline, so it also avoids a pointer +// indirection and a malloc. Also note that since C++11 static local variable +// initialization is thread-safe and so is this pattern. Code should prefer to +// use NoDestructor over: +// - A function scoped static T* or T& that is dynamically initialized. +// - A global base::LazyInstance. +// +// Note that since the destructor is never run, this *will* leak memory if used +// as a stack or member variable. Furthermore, a NoDestructor should never +// have global scope as that may require a static initializer. +template +class NoDestructor { + public: + // Not constexpr; just write static constexpr T x = ...; if the value should + // be a constexpr. + template + explicit NoDestructor(Args&&... args) { + new (storage_) T(std::forward(args)...); + } + + // Allows copy and move construction of the contained type, to allow + // construction from an initializer list, e.g. for std::vector. + explicit NoDestructor(const T& x) { new (storage_) T(x); } + explicit NoDestructor(T&& x) { new (storage_) T(std::move(x)); } + + NoDestructor(const NoDestructor&) = delete; + NoDestructor& operator=(const NoDestructor&) = delete; + + ~NoDestructor() = default; + + const T& operator*() const { return *get(); } + T& operator*() { return *get(); } + + const T* operator->() const { return get(); } + T* operator->() { return get(); } + + const T* get() const { return reinterpret_cast(storage_); } + T* get() { return reinterpret_cast(storage_); } + + private: + alignas(T) char storage_[sizeof(T)]; + +#if defined(LEAK_SANITIZER) + // TODO(https://crbug.com/812277): This is a hack to work around the fact + // that LSan doesn't seem to treat NoDestructor as a root for reachability + // analysis. This means that code like this: + // static base::NoDestructor> v({1, 2, 3}); + // is considered a leak. Using the standard leak sanitizer annotations to + // suppress leaks doesn't work: std::vector is implicitly constructed before + // calling the base::NoDestructor constructor. + // + // Unfortunately, I haven't been able to demonstrate this issue in simpler + // reproductions: until that's resolved, hold an explicit pointer to the + // placement-new'd object in leak sanitizer mode to help LSan realize that + // objects allocated by the contained type are still reachable. + T* storage_ptr_ = reinterpret_cast(storage_); +#endif // defined(LEAK_SANITIZER) +}; + +} // namespace base +} // namespace pdfium + +#endif // THIRD_PARTY_BASE_NO_DESTRUCTOR_H_ diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/OWNERS b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/OWNERS new file mode 100644 index 000000000..f7816afe8 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/OWNERS @@ -0,0 +1,3 @@ +jschuh@chromium.org +tsepez@chromium.org +palmer@chromium.org diff --git a/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/checked_math.h b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/checked_math.h new file mode 100644 index 000000000..2da3677b6 --- /dev/null +++ b/3rdparty/deepin-pdfium/src/3rdparty/pdfium/pdfium/base/numerics/checked_math.h @@ -0,0 +1,395 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_BASE_NUMERICS_CHECKED_MATH_H_ +#define THIRD_PARTY_BASE_NUMERICS_CHECKED_MATH_H_ + +#include + +#include +#include + +#include "third_party/base/numerics/checked_math_impl.h" + +namespace pdfium { +namespace base { +namespace internal { + +template +class CheckedNumeric { + static_assert(std::is_arithmetic::value, + "CheckedNumeric: T must be a numeric type."); + + public: + using type = T; + + constexpr CheckedNumeric() = default; + + // Copy constructor. + template + constexpr CheckedNumeric(const CheckedNumeric& rhs) + : state_(rhs.state_.value(), rhs.IsValid()) {} + + template + friend class CheckedNumeric; + + // This is not an explicit constructor because we implicitly upgrade regular + // numerics to CheckedNumerics to make them easier to use. + template + constexpr CheckedNumeric(Src value) // NOLINT(runtime/explicit) + : state_(value) { + static_assert(std::is_arithmetic::value, "Argument must be numeric."); + } + + // This is not an explicit constructor because we want a seamless conversion + // from StrictNumeric types. + template + constexpr CheckedNumeric( + StrictNumeric value) // NOLINT(runtime/explicit) + : state_(static_cast(value)) {} + + // IsValid() - The public API to test if a CheckedNumeric is currently valid. + // A range checked destination type can be supplied using the Dst template + // parameter. + template + constexpr bool IsValid() const { + return state_.is_valid() && + IsValueInRangeForNumericType(state_.value()); + } + + // AssignIfValid(Dst) - Assigns the underlying value if it is currently valid + // and is within the range supported by the destination type. Returns true if + // successful and false otherwise. + template +#if defined(__clang__) || defined(__GNUC__) + __attribute__((warn_unused_result)) +#elif defined(_MSC_VER) + _Check_return_ +#endif + constexpr bool + AssignIfValid(Dst* result) const { + return BASE_NUMERICS_LIKELY(IsValid()) + ? ((*result = static_cast(state_.value())), true) + : false; + } + + // ValueOrDie() - The primary accessor for the underlying value. If the + // current state is not valid it will CHECK and crash. + // A range checked destination type can be supplied using the Dst template + // parameter, which will trigger a CHECK if the value is not in bounds for + // the destination. + // The CHECK behavior can be overridden by supplying a handler as a + // template parameter, for test code, etc. However, the handler cannot access + // the underlying value, and it is not available through other means. + template + constexpr StrictNumeric ValueOrDie() const { + return BASE_NUMERICS_LIKELY(IsValid()) + ? static_cast(state_.value()) + : CheckHandler::template HandleFailure(); + } + + // ValueOrDefault(T default_value) - A convenience method that returns the + // current value if the state is valid, and the supplied default_value for + // any other state. + // A range checked destination type can be supplied using the Dst template + // parameter. WARNING: This function may fail to compile or CHECK at runtime + // if the supplied default_value is not within range of the destination type. + template + constexpr StrictNumeric ValueOrDefault(const Src default_value) const { + return BASE_NUMERICS_LIKELY(IsValid()) + ? static_cast(state_.value()) + : checked_cast(default_value); + } + + // Returns a checked numeric of the specified type, cast from the current + // CheckedNumeric. If the current state is invalid or the destination cannot + // represent the result then the returned CheckedNumeric will be invalid. + template + constexpr CheckedNumeric::type> Cast() const { + return *this; + } + + // This friend method is available solely for providing more detailed logging + // in the the tests. Do not implement it in production code, because the + // underlying values may change at any time. + template + friend U GetNumericValueForTest(const CheckedNumeric& src); + + // Prototypes for the supported arithmetic operator overloads. + template + constexpr CheckedNumeric& operator+=(const Src rhs); + template + constexpr CheckedNumeric& operator-=(const Src rhs); + template + constexpr CheckedNumeric& operator*=(const Src rhs); + template + constexpr CheckedNumeric& operator/=(const Src rhs); + template + constexpr CheckedNumeric& operator%=(const Src rhs); + template + constexpr CheckedNumeric& operator<<=(const Src rhs); + template + constexpr CheckedNumeric& operator>>=(const Src rhs); + template + constexpr CheckedNumeric& operator&=(const Src rhs); + template + constexpr CheckedNumeric& operator|=(const Src rhs); + template + constexpr CheckedNumeric& operator^=(const Src rhs); + + constexpr CheckedNumeric operator-() const { + // The negation of two's complement int min is int min, so we simply + // check for that in the constexpr case. + // We use an optimized code path for a known run-time variable. + return MustTreatAsConstexpr(state_.value()) || !std::is_signed::value || + std::is_floating_point::value + ? CheckedNumeric( + NegateWrapper(state_.value()), + IsValid() && (!std::is_signed::value || + std::is_floating_point::value || + NegateWrapper(state_.value()) != + std::numeric_limits::lowest())) + : FastRuntimeNegate(); + } + + constexpr CheckedNumeric operator~() const { + return CheckedNumeric( + InvertWrapper(state_.value()), IsValid()); + } + + constexpr CheckedNumeric Abs() const { + return !IsValueNegative(state_.value()) ? *this : -*this; + } + + template + constexpr CheckedNumeric::type> Max( + const U rhs) const { + using R = typename UnderlyingType::type; + using result_type = typename MathWrapper::type; + // TODO(jschuh): This can be converted to the MathOp version and remain + // constexpr once we have C++14 support. + return CheckedNumeric( + static_cast( + IsGreater::Test(state_.value(), Wrapper::value(rhs)) + ? state_.value() + : Wrapper::value(rhs)), + state_.is_valid() && Wrapper::is_valid(rhs)); + } + + template + constexpr CheckedNumeric::type> Min( + const U rhs) const { + using R = typename UnderlyingType::type; + using result_type = typename MathWrapper::type; + // TODO(jschuh): This can be converted to the MathOp version and remain + // constexpr once we have C++14 support. + return CheckedNumeric( + static_cast( + IsLess::Test(state_.value(), Wrapper::value(rhs)) + ? state_.value() + : Wrapper::value(rhs)), + state_.is_valid() && Wrapper::is_valid(rhs)); + } + + // This function is available only for integral types. It returns an unsigned + // integer of the same width as the source type, containing the absolute value + // of the source, and properly handling signed min. + constexpr CheckedNumeric::type> + UnsignedAbs() const { + return CheckedNumeric::type>( + SafeUnsignedAbs(state_.value()), state_.is_valid()); + } + + constexpr CheckedNumeric& operator++() { + *this += 1; + return *this; + } + + constexpr CheckedNumeric operator++(int) { + CheckedNumeric value = *this; + *this += 1; + return value; + } + + constexpr CheckedNumeric& operator--() { + *this -= 1; + return *this; + } + + constexpr CheckedNumeric operator--(int) { + CheckedNumeric value = *this; + *this -= 1; + return value; + } + + // These perform the actual math operations on the CheckedNumerics. + // Binary arithmetic operations. + template