mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-11 11:55:28 +02:00
Merge branch 'android15-6.6' into branch 'android15-6.6-lts'
This catches the android15-6.6-lts branch up with a lot of abi additions and other changes in the android15-6.6 branch. Included in here are the following commits: *41bf0d8226
ANDROID: Use -fomit-frame-pointer for x86_ptrace_syscall_x86_64 *b7c3d209b4
ANDROID: GKI: Swap Allocator ABI Fixup *7cbe80f0b6
FROMLIST: BACKPORT: mm: swap: mTHP allocate swap entries from nonfull list *96836d4fd5
FROMLIST: BACKPORT: mm: swap: swap cluster switch to double link list *2b24dd9d53
Revert "FROMLIST: BACKPORT: mm: swap: swap cluster switch to double link list" *887a20b67d
Revert "FROMLIST: BACKPORT: mm: swap: mTHP allocate swap entries from nonfull list" *d24fe223ea
Revert "ANDROID: ABI: mm: swap: reserve cluster according to mount option." *0cd41fbf53
UPSTREAM: erofs: fix uninitialized page cache reported by KMSAN *7341712cac
ANDROID: oplus: Update the ABI xml and symbol list *ffb163667c
ANDROID: vendor_hooks: Add hook for fsnotify *59717f94ca
ANDROID: ABI: update galaxy symbol list *4970677225
ANDROID: net: export symbol for tracepoint_consume_skb *f50d04854c
ANDROID: ABI: update symbol list for galaxy *89d387d916
ANDROID: memcg: add vendor hook to use vm_swappiness *cebdeae238
ANDROID: ABI: update symbol list for galaxy *371517c5d0
ANDROID: mm: add vendor hook to tune warn_alloc *830cfc7a96
ANDROID: ABI: update symbol list for galaxy *3a6f635139
ANDROID: mm: add vendor hook in alloc_contig_range() *5752526073
ANDROID: ABI: update symbol list for galaxy *969cecdd1e
ANDROID: mm: export tracepoint vm_unmapped_area *ac1de79449
ANDROID: gfp: add __GFP_CMA in __def_gfpflag_names *b983c75e38
ANDROID: ABI: update symbol list for galaxy *c6ba1fd41e
ANDROID: cma: add vendor hook for cma_debug_show_areas *2ca27754f0
ANDROID: ABI: update symbol list for galaxy *0bd33fde18
ANDROID: mm: add vendor hook for __alloc_pages_slowpath() *35cf4d8eab
ANDROID: ABI: Update pixel symbol list *46f2aacf91
ANDROID: ABI: Update symbol list for Qcom *cd83afb4f2
ANDROID: ABI: Update pixel symbol list *0935fda826
ANDROID: Export raise_softirq *4816d3f14a
FROMGIT: media: amphion: Report the average QP of current encoded frame *a9cf95c8ef
FROMGIT: media: amphion: Remove lock in s_ctrl callback *fadc0ed15c
FROMGIT: media: v4l2-ctrls: Add average QP control *075954d0d8
ANDROID: GKI: Update qcom symbol list *0fc8eacf64
ANDROID: KVM: arm64: Fix state masking in guest_request_walker() *a8759911ea
ANDROID: ABI: Update symbol list for galaxy *0a53bc59a7
ANDROID: abi_gki_aarch64_qcom: Add configfs abi symbol *6b40f68ef9
ANDROID: Update symbol list for mtk *9ca99e6500
ANDROID: GKI: Update symbol list for Amlogic *9443f1fa14
ANDROID: GKI: update symbol list for unisoc *48509bef2a
UPSTREAM: drm/drm_file: Fix pid refcounting race *8fd0ab9532
ANDROID: ABI: Update symbol list for galaxy *43b4adb9c9
ANDROID: mm: add vendor hook in vmscan.c *dd071677cb
ANDROID: GKI: Update symbol list for vivo *c4db168998
ANDROID: GKI: net: add vendor hooks net qos for gki purpose *359c132bee
ANDROID: GKI: update symbol list file for xiaomi *84dfc40591
ANDROID: abi_gki_aarch64_qcom: Add Tegra Symbols *8c8ee78fde
ANDROID: ABI: Update symbol list for galaxy *16fd47ba32
ANDROID: GKI: Update QCOM symbol list *b0acaed287
ANDROID: arm64: Allow non granule aligned MMIO guard requests *bc028c905a
ANDROID: gunyah: Add support for tracking Guest VM run time *2182ee6395
ANDROID: GKI: update xiaomi symbol list *885dc76e19
ANDROID: KVM: arm64: Fix pKVM mod hyp events lookup *fcbb7a1d21
ANDROID: GKI: Add initialization for mutex oem_data. *1efd61a1c0
ANDROID: GKI: Update symbol list for vivo *d2a7ba068d
ANDROID: vendor_hooks: add vendor hooks for readahead bio *57bfc45d46
ANDROID: ABI: Update symbol list for galaxy *6589977ca5
ANDROID: mm: add vendor hooks in psi.c *14d4f8f785
ANDROID: ABI: Update symbol list for galaxy *9db4e9899b
ANDROID: mm: add vendor hooks in madvise for swap entry *970642eb2d
ANDROID: ABI: Update symbol list for galaxy *66c7ba200c
ANDROID: dma-buf: add dma_heap_try_get_pool_size_kb for vendor hook *d4474bddf8
ANDROID: KVM: arm64: Consolidate allowed and restricted guest cpu feature checks *c7b8a41d0f
ANDROID: android: Add symbols to debug_symbols driver *674cbcb7a2
ANDROID: abi_gki_aarch64_vivo: Update symbol list *b0807745d4
ANDROID: mm: add vendor hooks to adjust memory reclamation *6d955b09ac
ANDROID: GKI: Add symbol to symbol list for vivo. *e41b8e8e59
ANDROID: vendor_hooks: add hooks in prctl_set_vma *fb3f403773
UPSTREAM: usb: dwc3: core: Workaround for CSR read timeout *c867ece908
UPSTREAM: f2fs: fix to force buffered IO on inline_data inode *3efb7c2d2a
ANDROID: GKI: Add symbol to symbol list for imx *e24990b254
UPSTREAM: net: usb: ax88179_178a: improve reset check *9f53a5ac99
UPSTREAM: net: usb: ax88179_178a: fix link status when link is set to down/up *8a43f59a49
ANDROID: Reapply: "net: usb: ax88179_178a: avoid writing the mac address before first reading" *2916880780
ANDROID: f2fs: enable cleancache *f9df46617d
ANDROID: KVM: Update nVHE stack size to 8KB *6e716f19dc
UPSTREAM: arm64: Add USER_STACKTRACE support *aaca6b10f1
ANDROID: GKI: Add initialization for rwsem's oem_data and vendor_data. *1036ce8d67
ANDROID: GKI: Update symbols to symbol list for honor *bb4dd28c18
ANDROID: Update the ABI representation *da5b43867d
BACKPORT: FROMLIST: dm-verity: improve performance by using multibuffer hashing *6c33cbb433
BACKPORT: FROMLIST: dm-verity: reduce scope of real and wanted digests *3503ed6feb
FROMLIST: dm-verity: hash blocks with shash import+finup when possible *3ed9f23932
BACKPORT: FROMLIST: dm-verity: make verity_hash() take dm_verity_io instead of ahash_request *33bfa57441
BACKPORT: FROMLIST: dm-verity: always "map" the data blocks *901b6a1577
FROMLIST: dm-verity: provide dma_alignment limit in io_hints *a936860934
FROMLIST: dm-verity: make real_digest and want_digest fixed-length *7958bb4e87
BACKPORT: FROMLIST: dm-verity: move data hash mismatch handling into its own function *76fed9f013
BACKPORT: FROMLIST: dm-verity: move hash algorithm setup into its own function *abed1a5d36
FROMLIST: fsverity: improve performance by using multibuffer hashing *08600b5d0c
FROMLIST: crypto: arm64/sha256-ce - add support for finup_mb *16e22de481
FROMLIST: crypto: x86/sha256-ni - add support for finup_mb *a2372f602d
FROMLIST: crypto: testmgr - add tests for finup_mb *17f53e8a94
FROMLIST: crypto: testmgr - generate power-of-2 lengths more often *614beb21b3
BACKPORT: FROMLIST: crypto: shash - add support for finup_mb *9c58b7c147
UPSTREAM: fsverity: remove hash page spin lock *627ec822c1
UPSTREAM: crypto: arm64/sha2-ce - clean up backwards function names *b6284a7064
UPSTREAM: crypto: arm64/sha2-ce - implement ->digest for sha256 *1725496fe7
UPSTREAM: crypto: x86/sha256 - implement ->digest for sha256 *2414c5e05b
UPSTREAM: erofs: ensure m_llen is reset to 0 if metadata is invalid *2a09862b0d
ANDROID: Add thermal headers to aarch64 allowlist *1bf09fb4fb
ANDROID: ABI: Update pixel symbol list *4d6aca029c
ANDROID: GKI: Update lenovo symbol list *0347be8c14
ANDROID: rust: disable floating point target features *96ba096630
ANDROID: ABI: Update oplus symbol list *84e4882c2a
UPSTREAM: mm/vmalloc: fix vmalloc which may return null if called with __GFP_NOFAIL *5a875d7051
FROMGIT: KVM: arm64: nVHE: Support CONFIG_CFI_CLANG at EL2 *f3cc12e6b9
FROMGIT: KVM: arm64: Introduce print_nvhe_hyp_panic helper *ad4668a0b4
FROMGIT: arm64: Introduce esr_brk_comment, esr_is_cfi_brk *546ea288d0
FROMGIT: KVM: arm64: VHE: Mark __hyp_call_panic __noreturn *57d9ce55ce
FROMGIT: KVM: arm64: nVHE: gen-hyprel: Skip R_AARCH64_ABS32 *406d5af44a
FROMGIT: KVM: arm64: nVHE: Simplify invalid_host_el2_vect *9dd9c0ecc8
FROMGIT: KVM: arm64: Fix __pkvm_init_switch_pgd call ABI *16302047f0
FROMGIT: KVM: arm64: Fix clobbered ELR in sync abort/SError *2fe138183d
ANDROID: KVM: Reduce upstream diff for kaslr_off *30068fa327
Revert "FROMLIST: KVM: arm64: Fix clobbered ELR in sync abort/SError" *0bbdca2ec5
Revert "FROMLIST: KVM: arm64: Fix __pkvm_init_switch_pgd C signature" *16b7f3f996
Revert "FROMLIST: KVM: arm64: Pass pointer to __pkvm_init_switch_pgd" *386f51645f
Revert "FROMLIST: KVM: arm64: nVHE: Remove __guest_exit_panic path" *5f4a702e41
Revert "FROMLIST: KVM: arm64: nVHE: Add EL2h sync exception handler" *ff1e4507cd
Revert "FROMLIST: KVM: arm64: nVHE: gen-hyprel: Skip R_AARCH64_ABS32" *fe72c7b6c5
Revert "FROMLIST: KVM: arm64: VHE: Mark __hyp_call_panic __noreturn" *b6e7c9eb19
Revert "FROMLIST: arm64: Move esr_comment() to <asm/esr.h>" *5456aa91d4
Revert "BACKPORT: FROMLIST: KVM: arm64: nVHE: Support CONFIG_CFI..." *c876dae46a
ANDROID: ABI: Update pixel symbol list *a8f26ab36d
ANDROID: ABI: Update kvm_hyp_iommu ABI *740a179b42
ANDROID: KVM: arm64: deduplicate kvm_hyp_iommu *691810c3b9
ANDROID: Makefile: Fail the build if RUST and CFI are both enabled *437e699ef9
ANDROID: KVM: arm64: Fix psci_mem_protect_dec() on VM reclaim *013c5ddc64
ANDROID: rust_binder: fix leak of name in binderfs *0dcde40390
Revert "ANDROID: scsi: ufs: Add hook to influence the UFS clock scaling policy" *c573b85983
Revert "ANDROID: sched: Add vendor hook for update_load_sum" *22f0a58277
Revert "ANDROID: PM / Domains: add vendor_hook to disallow domain idle state" *808331120f
ANDROID: ABI: Export kvm_iommu_flush_unmap_cache *21f5282377
ANDROID: KVM: arm64: iommu: Allow driver to flush cached refcount *65ea117650
ANDROID: KVM: arm64: iommu: Fix map_pages() error path *dbc350cb13
ANDROID: GKI: Update lenovo symbol list *1cefa59a20
Revert "ANDROID: vendor_hooks: Add hook for mmc queue" *48f130c18f
Revert "ANDROID: GKI: net: add vendor hooks for 'struct sock' lifecycle" *142c5838f4
FROMLIST: binder_alloc: Replace kcalloc with kvcalloc to mitigate OOM issues *9dc982c238
ANDROID: Update the ABI symbol list: set_normalized_timespec64 *6f22fc659b
ANDROID: GKI: Update qcom symbol list *eb1f7db04a
ANDROID: fix kernelci GCC builds of fips140.ko *d4103f937a
ANDROID: GKI: add a parameter to vh_blk_fill_rwbs *e763f6a5a9
UPSTREAM: sched/fair: Use all little CPUs for CPU-bound workloads *2b640be5df
ANDROID: GKI: update symbol list for honor *0e8d838f3d
FROMLIST: locking/rwsem: Add __always_inline annotation to __down_write_common() and inlined callers *fea3a332a9
ANDROID: ABI: Update pixel symbol list *0db446aae1
ANDROID: abi_gki_aarch64_qcom: Add v4l2 abi symbol *d2da2d32f6
ANDROID: ABI: Update QCOM symbol list *145f51aca0
ANDROID: fips140: remove unnecessary no_sanitize(cfi) *45688919de
ANDROID: GKI: Add whitelist related to runtime energy model *79591ebabf
ANDROID: sched/psi: disable the privilege check if CONFIG_DEFAULT_SECURITY_SELINUX is enabled *5f59226f87
ANDROID: Update the ABI symbol list *ba91ea859e
ANDROID: scheduler: add vendor-specific wake flag *df27fe0be2
ANDROID: Update the ABI symbol list *aca2287a01
FROMGIT: erofs: fix possible memory leak in z_erofs_gbuf_exit() *412548f4e6
BACKPORT: erofs: add a reserved buffer pool for lz4 decompression *43b3f34c6b
BACKPORT: erofs: do not use pagepool in z_erofs_gbuf_growsize() *5084a99bb6
BACKPORT: erofs: rename per-CPU buffers to global buffer pool and make it configurable *c69d9ecaf3
BACKPORT: erofs: rename utils.c to zutil.c *2d68f6d5bb
BACKPORT: erofs: relaxed temporary buffers allocation on readahead *85f00ea4c7
ANDROID: Limit vfs-only namespace to GKI builds *4b9c4f5f50
ANDROID: ABI: Update symbol list for OPLUS *2435f3246b
FROMGIT: usb: dwc3: core: remove lock of otg mode during gadget suspend/resume to avoid deadlock *9cb7fd9a3d
ANDROID: GKI: Update symbol list for Amlogic *d14189b69e
FROMGIT: f2fs: clean up set REQ_RAHEAD given rac *2f4e6b1def
ANDROID: Disable warning about new bindgen *a10b25b5a4
ANDROID: ABI: Update symbol list for Exynos SoC *3396c2131d
ANDROID: rust: use target.json for aarch64 *1656e8d99d
ANDROID: rust: rustfmt scripts/generate_rust_target.rs *8d2c337716
ANDROID: GKI: update symbol list for lenovo *791cea9469
ANDROID: GKI: add a vendor hook in cpufreq_online *ddf8bd0861
ANDROID: abi_gki_aarch64_qcom: update abi symbols *44045194d3
ANDROID: Revert^3 "ANDROID: Enable Rust Binder Module" *512a729ce8
ANDROID: ABI: Update pixel symbol list *324b653e2a
ANDROID: scsi: ufs: add complete init vendor hook *0c09faf922
ANDROID: scsi: ufs: add vendor hook to override key reprogramming *b5f875e6b5
ANDROID: GKI: Update Honor abi symbol list *6137bb28d6
ANDROID: GKI: Add hooks for sk_alloc. *27547c6a80
ANDROID: GKI: Update lenovo symbol list *0383c45728
ANDROID: GKI: Export css_task_iter_start() *d8755d1258
ANDROID: thermal: Fix cases for vendor hook function not accounted correctly *23f02fa409
ANDROID: GKI: Update symbol list for xiaomi *d2b35f36dc
ANDROID: vendor_hooks: export cgroup_threadgroup_rwsem *95c7d8e95a
FROMLIST: mm: fix incorrect vbq reference in purge_fragmented_block *2886675699
ANDROID: GKI: Update symbol list for vivo *c996e1044e
ANDROID: GKI: Modify the RWBS_LEN for blk_fill_rwbs *0dd775b383
ANDROID: GKI: add vendor hooks for blk_fill_rwbs *32721ad08c
ANDROID: GKI: Update symbol list for Amlogic *05a1f39385
ANDROID: mm: allow hooks into free_pages_prepare() *379c8853b2
ANDROID: mm: allow hooks into __alloc_pages() *6d28431b7f
FROMLIST: selftests/vDSO: fix clang build errors and warnings *069482893b
FROMLIST: selftests/timers: Guard LONG_MAX / LONG_MIN defines *4141052d20
ANDROID: GKI: Update symbol list for vivo *cdb09f7ea3
ANDROID: vendor hooks: add vendor hooks for do_new_mount *5bfee09a96
ANDROID: GKI: Export tracepoint tcp_retransmit_skb Change-Id: Ic961ad4f6bd1536fcd025f2b4a95614166a2bc4a Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
3b4c9871b1
|
@ -870,6 +870,8 @@ ddk_headers(
|
||||||
ddk_headers(
|
ddk_headers(
|
||||||
name = "all_headers_allowlist_aarch64",
|
name = "all_headers_allowlist_aarch64",
|
||||||
hdrs = [
|
hdrs = [
|
||||||
|
"drivers/thermal/thermal_core.h",
|
||||||
|
"drivers/thermal/thermal_netlink.h",
|
||||||
":all_headers_allowlist_aarch64_globs",
|
":all_headers_allowlist_aarch64_globs",
|
||||||
":all_headers_allowlist_common_globs",
|
":all_headers_allowlist_common_globs",
|
||||||
],
|
],
|
||||||
|
@ -879,6 +881,7 @@ ddk_headers(
|
||||||
linux_includes = [
|
linux_includes = [
|
||||||
"arch/arm64/include",
|
"arch/arm64/include",
|
||||||
"arch/arm64/include/uapi",
|
"arch/arm64/include/uapi",
|
||||||
|
"drivers/thermal",
|
||||||
"include",
|
"include",
|
||||||
"include/uapi",
|
"include/uapi",
|
||||||
],
|
],
|
||||||
|
@ -1899,7 +1902,7 @@ cc_binary_with_abi(
|
||||||
"x86_64",
|
"x86_64",
|
||||||
"x86",
|
"x86",
|
||||||
],
|
],
|
||||||
copts = _KSELFTEST_COPTS,
|
copts = _KSELFTEST_COPTS + ["-fomit-frame-pointer"],
|
||||||
includes = ["tools/testing/selftests"],
|
includes = ["tools/testing/selftests"],
|
||||||
linkopts = ["-static"],
|
linkopts = ["-static"],
|
||||||
path_prefix = _KSELFTEST_DIR,
|
path_prefix = _KSELFTEST_DIR,
|
||||||
|
|
|
@ -1653,6 +1653,20 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
|
||||||
Quantization parameter for a P frame for FWHT. Valid range: from 1
|
Quantization parameter for a P frame for FWHT. Valid range: from 1
|
||||||
to 31.
|
to 31.
|
||||||
|
|
||||||
|
``V4L2_CID_MPEG_VIDEO_AVERAGE_QP (integer)``
|
||||||
|
This read-only control returns the average QP value of the currently
|
||||||
|
encoded frame. The value applies to the last dequeued capture buffer
|
||||||
|
(VIDIOC_DQBUF). Its valid range depends on the encoding format and parameters.
|
||||||
|
For H264, its valid range is from 0 to 51.
|
||||||
|
For HEVC, its valid range is from 0 to 51 for 8 bit and
|
||||||
|
from 0 to 63 for 10 bit.
|
||||||
|
For H263 and MPEG4, its valid range is from 1 to 31.
|
||||||
|
For VP8, its valid range is from 0 to 127.
|
||||||
|
For VP9, its valid range is from 0 to 255.
|
||||||
|
If the codec's MIN_QP and MAX_QP are set, then the QP will meet both requirements.
|
||||||
|
Codecs need to always use the specified range, rather then a HW custom range.
|
||||||
|
Applicable to encoders
|
||||||
|
|
||||||
.. raw:: latex
|
.. raw:: latex
|
||||||
|
|
||||||
\normalsize
|
\normalsize
|
||||||
|
|
1
Makefile
1
Makefile
|
@ -1008,6 +1008,7 @@ ifdef CONFIG_RUST
|
||||||
# This addresses the problem that on e.g. i686, int != long, and Rust
|
# This addresses the problem that on e.g. i686, int != long, and Rust
|
||||||
# maps both to i32.
|
# maps both to i32.
|
||||||
# See https://rcvalle.com/docs/rust-cfi-design-doc.pdf for details.
|
# See https://rcvalle.com/docs/rust-cfi-design-doc.pdf for details.
|
||||||
|
$(error "Enabling Rust and CFI silently changes the KMI.")
|
||||||
CC_FLAGS_CFI += -fsanitize-cfi-icall-experimental-normalize-integers
|
CC_FLAGS_CFI += -fsanitize-cfi-icall-experimental-normalize-integers
|
||||||
RS_FLAGS_CFI := -Zsanitizer=kcfi -Zsanitizer-cfi-normalize-integers
|
RS_FLAGS_CFI := -Zsanitizer=kcfi -Zsanitizer-cfi-normalize-integers
|
||||||
KBUILD_RUSTFLAGS += $(RS_FLAGS_CFI)
|
KBUILD_RUSTFLAGS += $(RS_FLAGS_CFI)
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -135,6 +135,7 @@
|
||||||
class_create_file_ns
|
class_create_file_ns
|
||||||
class_destroy
|
class_destroy
|
||||||
class_find_device
|
class_find_device
|
||||||
|
class_is_registered
|
||||||
class_register
|
class_register
|
||||||
class_remove_file_ns
|
class_remove_file_ns
|
||||||
class_unregister
|
class_unregister
|
||||||
|
@ -631,10 +632,12 @@
|
||||||
drm_crtc_handle_vblank
|
drm_crtc_handle_vblank
|
||||||
drm_crtc_init_with_planes
|
drm_crtc_init_with_planes
|
||||||
drm_crtc_send_vblank_event
|
drm_crtc_send_vblank_event
|
||||||
|
drm_crtc_vblank_count
|
||||||
drm_crtc_vblank_get
|
drm_crtc_vblank_get
|
||||||
drm_crtc_vblank_helper_get_vblank_timestamp
|
drm_crtc_vblank_helper_get_vblank_timestamp
|
||||||
drm_crtc_vblank_off
|
drm_crtc_vblank_off
|
||||||
drm_crtc_vblank_on
|
drm_crtc_vblank_on
|
||||||
|
drm_crtc_vblank_put
|
||||||
___drm_dbg
|
___drm_dbg
|
||||||
drm_debugfs_create_files
|
drm_debugfs_create_files
|
||||||
drm_dev_alloc
|
drm_dev_alloc
|
||||||
|
@ -976,6 +979,8 @@
|
||||||
hex2bin
|
hex2bin
|
||||||
hex_asc
|
hex_asc
|
||||||
hex_dump_to_buffer
|
hex_dump_to_buffer
|
||||||
|
__hid_register_driver
|
||||||
|
hid_unregister_driver
|
||||||
high_memory
|
high_memory
|
||||||
hrtimer_active
|
hrtimer_active
|
||||||
hrtimer_cancel
|
hrtimer_cancel
|
||||||
|
@ -1002,6 +1007,7 @@
|
||||||
i2c_put_adapter
|
i2c_put_adapter
|
||||||
i2c_register_driver
|
i2c_register_driver
|
||||||
i2c_smbus_read_byte_data
|
i2c_smbus_read_byte_data
|
||||||
|
i2c_smbus_write_byte
|
||||||
i2c_smbus_write_byte_data
|
i2c_smbus_write_byte_data
|
||||||
i2c_smbus_xfer
|
i2c_smbus_xfer
|
||||||
i2c_transfer
|
i2c_transfer
|
||||||
|
@ -2222,6 +2228,8 @@
|
||||||
__traceiter_mmap_lock_acquire_returned
|
__traceiter_mmap_lock_acquire_returned
|
||||||
__traceiter_mmap_lock_released
|
__traceiter_mmap_lock_released
|
||||||
__traceiter_mmap_lock_start_locking
|
__traceiter_mmap_lock_start_locking
|
||||||
|
__traceiter_mm_page_alloc
|
||||||
|
__traceiter_mm_page_free
|
||||||
__traceiter_rwmmio_post_read
|
__traceiter_rwmmio_post_read
|
||||||
__traceiter_rwmmio_post_write
|
__traceiter_rwmmio_post_write
|
||||||
__traceiter_rwmmio_read
|
__traceiter_rwmmio_read
|
||||||
|
@ -2256,6 +2264,8 @@
|
||||||
__tracepoint_mmap_lock_acquire_returned
|
__tracepoint_mmap_lock_acquire_returned
|
||||||
__tracepoint_mmap_lock_released
|
__tracepoint_mmap_lock_released
|
||||||
__tracepoint_mmap_lock_start_locking
|
__tracepoint_mmap_lock_start_locking
|
||||||
|
__tracepoint_mm_page_alloc
|
||||||
|
__tracepoint_mm_page_free
|
||||||
tracepoint_probe_register
|
tracepoint_probe_register
|
||||||
tracepoint_probe_unregister
|
tracepoint_probe_unregister
|
||||||
__tracepoint_rwmmio_post_read
|
__tracepoint_rwmmio_post_read
|
||||||
|
@ -2277,6 +2287,7 @@
|
||||||
try_module_get
|
try_module_get
|
||||||
try_wait_for_completion
|
try_wait_for_completion
|
||||||
tty_flip_buffer_push
|
tty_flip_buffer_push
|
||||||
|
tty_termios_hw_change
|
||||||
uart_add_one_port
|
uart_add_one_port
|
||||||
uart_console_device
|
uart_console_device
|
||||||
uart_console_write
|
uart_console_write
|
||||||
|
@ -2399,6 +2410,13 @@
|
||||||
usb_role_switch_unregister
|
usb_role_switch_unregister
|
||||||
usb_root_hub_lost_power
|
usb_root_hub_lost_power
|
||||||
usb_scuttle_anchored_urbs
|
usb_scuttle_anchored_urbs
|
||||||
|
usb_serial_deregister_drivers
|
||||||
|
usb_serial_generic_close
|
||||||
|
usb_serial_generic_get_icount
|
||||||
|
usb_serial_generic_open
|
||||||
|
usb_serial_generic_throttle
|
||||||
|
usb_serial_generic_unthrottle
|
||||||
|
usb_serial_register_drivers
|
||||||
usb_set_interface
|
usb_set_interface
|
||||||
usb_submit_urb
|
usb_submit_urb
|
||||||
usb_unanchor_urb
|
usb_unanchor_urb
|
||||||
|
|
|
@ -1527,6 +1527,10 @@
|
||||||
snd_usb_autoresume
|
snd_usb_autoresume
|
||||||
snd_usb_autosuspend
|
snd_usb_autosuspend
|
||||||
snd_usb_register_platform_ops
|
snd_usb_register_platform_ops
|
||||||
|
__traceiter_android_rvh_usb_dev_suspend
|
||||||
|
__traceiter_android_vh_usb_dev_resume
|
||||||
|
__tracepoint_android_rvh_usb_dev_suspend
|
||||||
|
__tracepoint_android_vh_usb_dev_resume
|
||||||
usb_altnum_to_altsetting
|
usb_altnum_to_altsetting
|
||||||
usb_choose_configuration
|
usb_choose_configuration
|
||||||
usb_ifnum_to_if
|
usb_ifnum_to_if
|
||||||
|
@ -1610,6 +1614,8 @@
|
||||||
# required by exynos_thermal_v2.ko
|
# required by exynos_thermal_v2.ko
|
||||||
devm_thermal_of_zone_register
|
devm_thermal_of_zone_register
|
||||||
kthread_flush_work
|
kthread_flush_work
|
||||||
|
kunit_hooks
|
||||||
|
kunit_running
|
||||||
of_get_cpu_node
|
of_get_cpu_node
|
||||||
thermal_cdev_update
|
thermal_cdev_update
|
||||||
thermal_cooling_device_unregister
|
thermal_cooling_device_unregister
|
||||||
|
|
|
@ -15,7 +15,9 @@
|
||||||
cleancache_register_ops
|
cleancache_register_ops
|
||||||
copy_page
|
copy_page
|
||||||
_dev_alert
|
_dev_alert
|
||||||
|
device_pm_wait_for_dev
|
||||||
__devm_alloc_percpu
|
__devm_alloc_percpu
|
||||||
|
dma_heap_try_get_pool_size_kb
|
||||||
elv_bio_merge_ok
|
elv_bio_merge_ok
|
||||||
elv_rb_add
|
elv_rb_add
|
||||||
elv_rb_del
|
elv_rb_del
|
||||||
|
@ -51,6 +53,8 @@
|
||||||
rfkill_soft_blocked
|
rfkill_soft_blocked
|
||||||
scsi_device_quiesce
|
scsi_device_quiesce
|
||||||
scsi_device_resume
|
scsi_device_resume
|
||||||
|
stack_trace_save_regs
|
||||||
|
__kfree_skb
|
||||||
__traceiter_android_rvh_arm64_serror_panic
|
__traceiter_android_rvh_arm64_serror_panic
|
||||||
__traceiter_android_rvh_die_kernel_fault
|
__traceiter_android_rvh_die_kernel_fault
|
||||||
__traceiter_android_rvh_do_el1_bti
|
__traceiter_android_rvh_do_el1_bti
|
||||||
|
@ -60,24 +64,40 @@
|
||||||
__traceiter_android_rvh_do_sp_pc_abort
|
__traceiter_android_rvh_do_sp_pc_abort
|
||||||
__traceiter_android_rvh_panic_unhandled
|
__traceiter_android_rvh_panic_unhandled
|
||||||
__traceiter_android_rvh_report_bug
|
__traceiter_android_rvh_report_bug
|
||||||
|
__traceiter_android_vh_alloc_contig_range_not_isolated
|
||||||
|
__traceiter_android_vh_alloc_pages_slowpath_start
|
||||||
|
__traceiter_android_vh_alloc_pages_slowpath_end
|
||||||
__traceiter_android_vh_cache_show
|
__traceiter_android_vh_cache_show
|
||||||
|
__traceiter_android_vh_cma_debug_show_areas
|
||||||
__traceiter_android_vh_exit_mm
|
__traceiter_android_vh_exit_mm
|
||||||
__traceiter_android_vh_is_fpsimd_save
|
__traceiter_android_vh_is_fpsimd_save
|
||||||
__traceiter_android_vh_logbuf
|
__traceiter_android_vh_logbuf
|
||||||
__traceiter_android_vh_logbuf_pr_cont
|
__traceiter_android_vh_logbuf_pr_cont
|
||||||
|
__traceiter_android_vh_madvise_pageout_swap_entry
|
||||||
__traceiter_android_vh_madvise_swapin_walk_pmd_entry
|
__traceiter_android_vh_madvise_swapin_walk_pmd_entry
|
||||||
__traceiter_android_vh_meminfo_proc_show
|
__traceiter_android_vh_meminfo_proc_show
|
||||||
__traceiter_android_vh_print_slabinfo_header
|
__traceiter_android_vh_print_slabinfo_header
|
||||||
__traceiter_android_vh_process_madvise
|
__traceiter_android_vh_process_madvise
|
||||||
|
__traceiter_android_vh_psi_update_triggers
|
||||||
__traceiter_android_vh_ptype_head
|
__traceiter_android_vh_ptype_head
|
||||||
|
__traceiter_android_vh_rebalance_anon_lru_bypass
|
||||||
|
__traceiter_android_vh_rtmutex_wait_finish
|
||||||
__traceiter_android_vh_show_mem
|
__traceiter_android_vh_show_mem
|
||||||
__traceiter_android_vh_show_smap
|
__traceiter_android_vh_show_smap
|
||||||
__traceiter_android_vh_smaps_pte_entry
|
__traceiter_android_vh_smaps_pte_entry
|
||||||
__traceiter_android_vh_try_to_freeze_todo
|
__traceiter_android_vh_try_to_freeze_todo
|
||||||
__traceiter_android_vh_try_to_freeze_todo_unfrozen
|
__traceiter_android_vh_try_to_freeze_todo_unfrozen
|
||||||
|
__traceiter_android_vh_use_vm_swappiness
|
||||||
|
__traceiter_android_vh_warn_alloc_tune_ratelimit
|
||||||
|
__traceiter_android_vh_warn_alloc_show_mem_bypass
|
||||||
__traceiter_android_vh_watchdog_timer_softlockup
|
__traceiter_android_vh_watchdog_timer_softlockup
|
||||||
|
__traceiter_android_vh_wq_lockup_pool
|
||||||
__traceiter_console
|
__traceiter_console
|
||||||
|
__traceiter_consume_skb
|
||||||
|
__traceiter_error_report_end
|
||||||
|
__traceiter_vm_unmapped_area
|
||||||
__traceiter_workqueue_execute_start
|
__traceiter_workqueue_execute_start
|
||||||
|
__traceiter_kfree_skb
|
||||||
__tracepoint_android_rvh_arm64_serror_panic
|
__tracepoint_android_rvh_arm64_serror_panic
|
||||||
__tracepoint_android_rvh_die_kernel_fault
|
__tracepoint_android_rvh_die_kernel_fault
|
||||||
__tracepoint_android_rvh_do_el1_bti
|
__tracepoint_android_rvh_do_el1_bti
|
||||||
|
@ -87,23 +107,39 @@
|
||||||
__tracepoint_android_rvh_do_sp_pc_abort
|
__tracepoint_android_rvh_do_sp_pc_abort
|
||||||
__tracepoint_android_rvh_panic_unhandled
|
__tracepoint_android_rvh_panic_unhandled
|
||||||
__tracepoint_android_rvh_report_bug
|
__tracepoint_android_rvh_report_bug
|
||||||
|
__tracepoint_android_vh_alloc_contig_range_not_isolated
|
||||||
|
__tracepoint_android_vh_alloc_pages_slowpath_start
|
||||||
|
__tracepoint_android_vh_alloc_pages_slowpath_end
|
||||||
__tracepoint_android_vh_cache_show
|
__tracepoint_android_vh_cache_show
|
||||||
|
__tracepoint_android_vh_cma_debug_show_areas
|
||||||
__tracepoint_android_vh_exit_mm
|
__tracepoint_android_vh_exit_mm
|
||||||
__tracepoint_android_vh_is_fpsimd_save
|
__tracepoint_android_vh_is_fpsimd_save
|
||||||
__tracepoint_android_vh_logbuf
|
__tracepoint_android_vh_logbuf
|
||||||
__tracepoint_android_vh_logbuf_pr_cont
|
__tracepoint_android_vh_logbuf_pr_cont
|
||||||
|
__tracepoint_android_vh_madvise_pageout_swap_entry
|
||||||
__tracepoint_android_vh_madvise_swapin_walk_pmd_entry
|
__tracepoint_android_vh_madvise_swapin_walk_pmd_entry
|
||||||
__tracepoint_android_vh_meminfo_proc_show
|
__tracepoint_android_vh_meminfo_proc_show
|
||||||
__tracepoint_android_vh_print_slabinfo_header
|
__tracepoint_android_vh_print_slabinfo_header
|
||||||
__tracepoint_android_vh_process_madvise
|
__tracepoint_android_vh_process_madvise
|
||||||
|
__tracepoint_android_vh_psi_update_triggers
|
||||||
__tracepoint_android_vh_ptype_head
|
__tracepoint_android_vh_ptype_head
|
||||||
|
__tracepoint_android_vh_rebalance_anon_lru_bypass
|
||||||
|
__tracepoint_android_vh_rtmutex_wait_finish
|
||||||
__tracepoint_android_vh_show_mem
|
__tracepoint_android_vh_show_mem
|
||||||
__tracepoint_android_vh_show_smap
|
__tracepoint_android_vh_show_smap
|
||||||
__tracepoint_android_vh_smaps_pte_entry
|
__tracepoint_android_vh_smaps_pte_entry
|
||||||
__tracepoint_android_vh_try_to_freeze_todo
|
__tracepoint_android_vh_try_to_freeze_todo
|
||||||
__tracepoint_android_vh_try_to_freeze_todo_unfrozen
|
__tracepoint_android_vh_try_to_freeze_todo_unfrozen
|
||||||
|
__tracepoint_android_vh_use_vm_swappiness
|
||||||
|
__tracepoint_android_vh_warn_alloc_tune_ratelimit
|
||||||
|
__tracepoint_android_vh_warn_alloc_show_mem_bypass
|
||||||
__tracepoint_android_vh_watchdog_timer_softlockup
|
__tracepoint_android_vh_watchdog_timer_softlockup
|
||||||
|
__tracepoint_android_vh_wq_lockup_pool
|
||||||
__tracepoint_console
|
__tracepoint_console
|
||||||
|
__tracepoint_consume_skb
|
||||||
|
__tracepoint_error_report_end
|
||||||
|
__tracepoint_vm_unmapped_area
|
||||||
__tracepoint_workqueue_execute_start
|
__tracepoint_workqueue_execute_start
|
||||||
|
__tracepoint_kfree_skb
|
||||||
usb_set_device_state
|
usb_set_device_state
|
||||||
yield
|
yield
|
||||||
|
|
|
@ -45,8 +45,11 @@
|
||||||
nvmem_device_find
|
nvmem_device_find
|
||||||
device_match_of_node
|
device_match_of_node
|
||||||
drop_super
|
drop_super
|
||||||
|
filp_open_block
|
||||||
mm_trace_rss_stat
|
mm_trace_rss_stat
|
||||||
__kfifo_len_r
|
__kfifo_len_r
|
||||||
|
__traceiter_android_vh_rwsem_write_wait_finish
|
||||||
|
__tracepoint_android_vh_rwsem_write_wait_finish
|
||||||
__tracepoint_android_rvh_cpuinfo_c_show
|
__tracepoint_android_rvh_cpuinfo_c_show
|
||||||
__traceiter_android_rvh_cpuinfo_c_show
|
__traceiter_android_rvh_cpuinfo_c_show
|
||||||
__tracepoint_android_vh_dc_send_copy
|
__tracepoint_android_vh_dc_send_copy
|
||||||
|
@ -77,7 +80,13 @@
|
||||||
__traceiter_android_vh_si_mem_available_adjust
|
__traceiter_android_vh_si_mem_available_adjust
|
||||||
__tracepoint_android_vh_si_meminfo_adjust
|
__tracepoint_android_vh_si_meminfo_adjust
|
||||||
__traceiter_android_vh_si_meminfo_adjust
|
__traceiter_android_vh_si_meminfo_adjust
|
||||||
|
__traceiter_android_vh_rwsem_write_finished
|
||||||
|
__tracepoint_android_vh_rwsem_write_finished
|
||||||
__traceiter_android_rvh_hw_protection_shutdown
|
__traceiter_android_rvh_hw_protection_shutdown
|
||||||
__tracepoint_android_rvh_hw_protection_shutdown
|
__tracepoint_android_rvh_hw_protection_shutdown
|
||||||
__traceiter_android_rvh_bpf_int_jit_compile_ro
|
__traceiter_android_rvh_bpf_int_jit_compile_ro
|
||||||
__tracepoint_android_rvh_bpf_int_jit_compile_ro
|
__tracepoint_android_rvh_bpf_int_jit_compile_ro
|
||||||
|
__traceiter_android_vh_sk_alloc
|
||||||
|
__tracepoint_android_vh_sk_alloc
|
||||||
|
__traceiter_android_vh_sk_free
|
||||||
|
__tracepoint_android_vh_sk_free
|
||||||
|
|
|
@ -168,6 +168,8 @@
|
||||||
clk_notifier_unregister
|
clk_notifier_unregister
|
||||||
clk_prepare
|
clk_prepare
|
||||||
clk_put
|
clk_put
|
||||||
|
clk_rate_exclusive_get
|
||||||
|
clk_rate_exclusive_put
|
||||||
clk_round_rate
|
clk_round_rate
|
||||||
clk_set_parent
|
clk_set_parent
|
||||||
clk_set_rate
|
clk_set_rate
|
||||||
|
@ -445,6 +447,7 @@
|
||||||
__devm_spi_alloc_controller
|
__devm_spi_alloc_controller
|
||||||
devm_spi_mem_dirmap_create
|
devm_spi_mem_dirmap_create
|
||||||
devm_spi_register_controller
|
devm_spi_register_controller
|
||||||
|
devm_tegra_memory_controller_get
|
||||||
devm_thermal_of_zone_register
|
devm_thermal_of_zone_register
|
||||||
devm_usb_get_phy
|
devm_usb_get_phy
|
||||||
devm_usb_get_phy_by_phandle
|
devm_usb_get_phy_by_phandle
|
||||||
|
@ -453,9 +456,12 @@
|
||||||
dev_open
|
dev_open
|
||||||
dev_pm_domain_attach_by_id
|
dev_pm_domain_attach_by_id
|
||||||
dev_pm_domain_attach_by_name
|
dev_pm_domain_attach_by_name
|
||||||
|
dev_pm_domain_attach_list
|
||||||
dev_pm_domain_detach
|
dev_pm_domain_detach
|
||||||
|
dev_pm_domain_detach_list
|
||||||
dev_pm_genpd_add_notifier
|
dev_pm_genpd_add_notifier
|
||||||
dev_pm_genpd_remove_notifier
|
dev_pm_genpd_remove_notifier
|
||||||
|
dev_pm_genpd_set_performance_state
|
||||||
dev_pm_opp_add_dynamic
|
dev_pm_opp_add_dynamic
|
||||||
dev_pm_opp_clear_config
|
dev_pm_opp_clear_config
|
||||||
dev_pm_opp_find_freq_ceil
|
dev_pm_opp_find_freq_ceil
|
||||||
|
@ -1578,6 +1584,7 @@
|
||||||
open_candev
|
open_candev
|
||||||
orderly_poweroff
|
orderly_poweroff
|
||||||
orderly_reboot
|
orderly_reboot
|
||||||
|
overflowuid
|
||||||
page_pinner_inited
|
page_pinner_inited
|
||||||
__page_pinner_put_page
|
__page_pinner_put_page
|
||||||
page_pool_alloc_pages
|
page_pool_alloc_pages
|
||||||
|
@ -1588,6 +1595,7 @@
|
||||||
panic_notifier_list
|
panic_notifier_list
|
||||||
param_array_ops
|
param_array_ops
|
||||||
param_get_int
|
param_get_int
|
||||||
|
param_get_uint
|
||||||
param_ops_bool
|
param_ops_bool
|
||||||
param_ops_byte
|
param_ops_byte
|
||||||
param_ops_charp
|
param_ops_charp
|
||||||
|
@ -1597,6 +1605,7 @@
|
||||||
param_ops_ullong
|
param_ops_ullong
|
||||||
param_ops_ulong
|
param_ops_ulong
|
||||||
param_ops_ushort
|
param_ops_ushort
|
||||||
|
param_set_uint_minmax
|
||||||
pci_alloc_irq_vectors
|
pci_alloc_irq_vectors
|
||||||
pci_ats_supported
|
pci_ats_supported
|
||||||
pci_bus_type
|
pci_bus_type
|
||||||
|
@ -2354,6 +2363,8 @@
|
||||||
__tasklet_schedule
|
__tasklet_schedule
|
||||||
tasklet_setup
|
tasklet_setup
|
||||||
tasklet_unlock_wait
|
tasklet_unlock_wait
|
||||||
|
tegra_mc_probe_device
|
||||||
|
tegra_sku_info
|
||||||
thermal_cooling_device_unregister
|
thermal_cooling_device_unregister
|
||||||
thermal_of_cooling_device_register
|
thermal_of_cooling_device_register
|
||||||
thermal_zone_device_priv
|
thermal_zone_device_priv
|
||||||
|
@ -2689,6 +2700,7 @@
|
||||||
vring_del_virtqueue
|
vring_del_virtqueue
|
||||||
vring_interrupt
|
vring_interrupt
|
||||||
vring_new_virtqueue
|
vring_new_virtqueue
|
||||||
|
vscnprintf
|
||||||
vsnprintf
|
vsnprintf
|
||||||
vunmap
|
vunmap
|
||||||
vzalloc
|
vzalloc
|
||||||
|
|
|
@ -141,6 +141,8 @@
|
||||||
clk_put
|
clk_put
|
||||||
clk_set_rate
|
clk_set_rate
|
||||||
clk_unprepare
|
clk_unprepare
|
||||||
|
clockevent_delta2ns
|
||||||
|
clockevents_register_device
|
||||||
cma_alloc
|
cma_alloc
|
||||||
cma_get_name
|
cma_get_name
|
||||||
cma_release
|
cma_release
|
||||||
|
@ -189,6 +191,9 @@
|
||||||
cpu_topology
|
cpu_topology
|
||||||
crc32_le
|
crc32_le
|
||||||
css_next_child
|
css_next_child
|
||||||
|
css_task_iter_end
|
||||||
|
css_task_iter_next
|
||||||
|
css_task_iter_start
|
||||||
csum_partial
|
csum_partial
|
||||||
_ctype
|
_ctype
|
||||||
debugfs_attr_read
|
debugfs_attr_read
|
||||||
|
@ -489,6 +494,7 @@
|
||||||
fwnode_property_read_u32_array
|
fwnode_property_read_u32_array
|
||||||
gcd
|
gcd
|
||||||
gen_pool_virt_to_phys
|
gen_pool_virt_to_phys
|
||||||
|
generic_access_phys
|
||||||
generic_device_group
|
generic_device_group
|
||||||
generic_file_llseek
|
generic_file_llseek
|
||||||
generic_handle_domain_irq
|
generic_handle_domain_irq
|
||||||
|
@ -996,6 +1002,7 @@
|
||||||
rtnl_lock
|
rtnl_lock
|
||||||
rtnl_unlock
|
rtnl_unlock
|
||||||
sched_clock
|
sched_clock
|
||||||
|
sched_clock_register
|
||||||
sched_feat_names
|
sched_feat_names
|
||||||
sched_set_fifo
|
sched_set_fifo
|
||||||
sched_set_fifo_low
|
sched_set_fifo_low
|
||||||
|
@ -1241,6 +1248,7 @@
|
||||||
__traceiter_android_rvh_update_cpu_capacity
|
__traceiter_android_rvh_update_cpu_capacity
|
||||||
__traceiter_android_rvh_wake_up_new_task
|
__traceiter_android_rvh_wake_up_new_task
|
||||||
__traceiter_android_vh_dup_task_struct
|
__traceiter_android_vh_dup_task_struct
|
||||||
|
__traceiter_android_vh_cpufreq_online
|
||||||
__traceiter_android_vh_update_topology_flags_workfn
|
__traceiter_android_vh_update_topology_flags_workfn
|
||||||
__traceiter_binder_transaction_received
|
__traceiter_binder_transaction_received
|
||||||
__traceiter_cpu_frequency_limits
|
__traceiter_cpu_frequency_limits
|
||||||
|
@ -1266,6 +1274,7 @@
|
||||||
__tracepoint_android_rvh_update_cpu_capacity
|
__tracepoint_android_rvh_update_cpu_capacity
|
||||||
__tracepoint_android_rvh_wake_up_new_task
|
__tracepoint_android_rvh_wake_up_new_task
|
||||||
__tracepoint_android_vh_dup_task_struct
|
__tracepoint_android_vh_dup_task_struct
|
||||||
|
__tracepoint_android_vh_cpufreq_online
|
||||||
__tracepoint_android_vh_update_topology_flags_workfn
|
__tracepoint_android_vh_update_topology_flags_workfn
|
||||||
__tracepoint_binder_transaction_received
|
__tracepoint_binder_transaction_received
|
||||||
__tracepoint_cpu_frequency_limits
|
__tracepoint_cpu_frequency_limits
|
||||||
|
|
|
@ -2950,6 +2950,7 @@
|
||||||
__traceiter_android_rvh_try_to_wake_up_success
|
__traceiter_android_rvh_try_to_wake_up_success
|
||||||
__traceiter_android_rvh_update_cpu_capacity
|
__traceiter_android_rvh_update_cpu_capacity
|
||||||
__traceiter_android_rvh_update_misfit_status
|
__traceiter_android_rvh_update_misfit_status
|
||||||
|
__traceiter_android_rvh_util_est_update
|
||||||
__traceiter_android_rvh_wake_up_new_task
|
__traceiter_android_rvh_wake_up_new_task
|
||||||
__traceiter_android_vh_alter_futex_plist_add
|
__traceiter_android_vh_alter_futex_plist_add
|
||||||
__traceiter_android_vh_alter_rwsem_list_add
|
__traceiter_android_vh_alter_rwsem_list_add
|
||||||
|
@ -3067,6 +3068,7 @@
|
||||||
__tracepoint_android_rvh_try_to_wake_up_success
|
__tracepoint_android_rvh_try_to_wake_up_success
|
||||||
__tracepoint_android_rvh_update_cpu_capacity
|
__tracepoint_android_rvh_update_cpu_capacity
|
||||||
__tracepoint_android_rvh_update_misfit_status
|
__tracepoint_android_rvh_update_misfit_status
|
||||||
|
__tracepoint_android_rvh_util_est_update
|
||||||
__tracepoint_android_rvh_wake_up_new_task
|
__tracepoint_android_rvh_wake_up_new_task
|
||||||
__tracepoint_android_vh_alter_futex_plist_add
|
__tracepoint_android_vh_alter_futex_plist_add
|
||||||
__tracepoint_android_vh_alter_rwsem_list_add
|
__tracepoint_android_vh_alter_rwsem_list_add
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
blk_rq_map_user
|
blk_rq_map_user
|
||||||
blk_rq_map_user_iov
|
blk_rq_map_user_iov
|
||||||
blk_start_plug
|
blk_start_plug
|
||||||
|
blk_fill_rwbs
|
||||||
__break_lease
|
__break_lease
|
||||||
cgroup_add_legacy_cftypes
|
cgroup_add_legacy_cftypes
|
||||||
config_item_init_type_name
|
config_item_init_type_name
|
||||||
|
@ -249,6 +250,7 @@
|
||||||
__traceiter_android_vh_futex_wake_this
|
__traceiter_android_vh_futex_wake_this
|
||||||
__traceiter_android_vh_futex_wake_traverse_plist
|
__traceiter_android_vh_futex_wake_traverse_plist
|
||||||
__traceiter_android_vh_futex_wake_up_q_finish
|
__traceiter_android_vh_futex_wake_up_q_finish
|
||||||
|
__traceiter_android_vh_fsnotify_open
|
||||||
__traceiter_android_vh_get_page_wmark
|
__traceiter_android_vh_get_page_wmark
|
||||||
__traceiter_android_vh_irqtime_account_process_tick
|
__traceiter_android_vh_irqtime_account_process_tick
|
||||||
__traceiter_android_vh_kmalloc_slab
|
__traceiter_android_vh_kmalloc_slab
|
||||||
|
@ -394,6 +396,7 @@
|
||||||
__tracepoint_android_vh_futex_wake_this
|
__tracepoint_android_vh_futex_wake_this
|
||||||
__tracepoint_android_vh_futex_wake_traverse_plist
|
__tracepoint_android_vh_futex_wake_traverse_plist
|
||||||
__tracepoint_android_vh_futex_wake_up_q_finish
|
__tracepoint_android_vh_futex_wake_up_q_finish
|
||||||
|
__tracepoint_android_vh_fsnotify_open
|
||||||
__tracepoint_android_vh_get_page_wmark
|
__tracepoint_android_vh_get_page_wmark
|
||||||
__tracepoint_android_vh_irqtime_account_process_tick
|
__tracepoint_android_vh_irqtime_account_process_tick
|
||||||
__tracepoint_android_vh_kmalloc_slab
|
__tracepoint_android_vh_kmalloc_slab
|
||||||
|
@ -490,6 +493,7 @@
|
||||||
v4l2_i2c_subdev_init
|
v4l2_i2c_subdev_init
|
||||||
vfs_create
|
vfs_create
|
||||||
vfs_getattr
|
vfs_getattr
|
||||||
|
vfs_getattr_nosec
|
||||||
vfs_iter_read
|
vfs_iter_read
|
||||||
vfs_iter_write
|
vfs_iter_write
|
||||||
vfs_llseek
|
vfs_llseek
|
||||||
|
|
|
@ -63,6 +63,7 @@
|
||||||
__bitmap_or
|
__bitmap_or
|
||||||
bitmap_parse
|
bitmap_parse
|
||||||
bitmap_parselist
|
bitmap_parselist
|
||||||
|
bitmap_parse_user
|
||||||
bitmap_print_to_pagebuf
|
bitmap_print_to_pagebuf
|
||||||
__bitmap_set
|
__bitmap_set
|
||||||
bitmap_to_arr32
|
bitmap_to_arr32
|
||||||
|
@ -423,6 +424,7 @@
|
||||||
devm_regulator_get_optional
|
devm_regulator_get_optional
|
||||||
devm_regulator_put
|
devm_regulator_put
|
||||||
devm_regulator_register
|
devm_regulator_register
|
||||||
|
devm_request_any_context_irq
|
||||||
__devm_request_region
|
__devm_request_region
|
||||||
devm_request_threaded_irq
|
devm_request_threaded_irq
|
||||||
devm_rtc_device_register
|
devm_rtc_device_register
|
||||||
|
@ -554,6 +556,7 @@
|
||||||
down_write
|
down_write
|
||||||
dput
|
dput
|
||||||
drain_workqueue
|
drain_workqueue
|
||||||
|
driver_for_each_device
|
||||||
driver_register
|
driver_register
|
||||||
driver_unregister
|
driver_unregister
|
||||||
drm_add_edid_modes
|
drm_add_edid_modes
|
||||||
|
@ -829,6 +832,7 @@
|
||||||
frame_vector_create
|
frame_vector_create
|
||||||
frame_vector_destroy
|
frame_vector_destroy
|
||||||
frame_vector_to_pages
|
frame_vector_to_pages
|
||||||
|
free_hyp_memcache
|
||||||
free_iova_fast
|
free_iova_fast
|
||||||
free_irq
|
free_irq
|
||||||
free_netdev
|
free_netdev
|
||||||
|
@ -1088,6 +1092,7 @@
|
||||||
iommu_set_fault_handler
|
iommu_set_fault_handler
|
||||||
iommu_unmap
|
iommu_unmap
|
||||||
iommu_unregister_device_fault_handler
|
iommu_unregister_device_fault_handler
|
||||||
|
io_pgtable_configure
|
||||||
ioremap_prot
|
ioremap_prot
|
||||||
io_schedule_timeout
|
io_schedule_timeout
|
||||||
iounmap
|
iounmap
|
||||||
|
@ -1153,6 +1158,14 @@
|
||||||
kfree_skb_reason
|
kfree_skb_reason
|
||||||
kill_fasync
|
kill_fasync
|
||||||
kimage_voffset
|
kimage_voffset
|
||||||
|
klist_add_head
|
||||||
|
klist_add_tail
|
||||||
|
klist_init
|
||||||
|
klist_iter_exit
|
||||||
|
klist_iter_init
|
||||||
|
klist_next
|
||||||
|
klist_node_attached
|
||||||
|
klist_remove
|
||||||
__kmalloc
|
__kmalloc
|
||||||
kmalloc_caches
|
kmalloc_caches
|
||||||
kmalloc_large
|
kmalloc_large
|
||||||
|
@ -1332,6 +1345,7 @@
|
||||||
__netdev_alloc_skb
|
__netdev_alloc_skb
|
||||||
netdev_err
|
netdev_err
|
||||||
netdev_info
|
netdev_info
|
||||||
|
netdev_refcnt_read
|
||||||
netdev_set_default_ethtool_ops
|
netdev_set_default_ethtool_ops
|
||||||
netdev_state_change
|
netdev_state_change
|
||||||
netdev_update_features
|
netdev_update_features
|
||||||
|
@ -1410,6 +1424,7 @@
|
||||||
of_genpd_add_provider_simple
|
of_genpd_add_provider_simple
|
||||||
of_get_child_by_name
|
of_get_child_by_name
|
||||||
of_get_cpu_node
|
of_get_cpu_node
|
||||||
|
of_get_drm_panel_display_mode
|
||||||
of_get_named_gpio
|
of_get_named_gpio
|
||||||
of_get_next_available_child
|
of_get_next_available_child
|
||||||
of_get_next_child
|
of_get_next_child
|
||||||
|
@ -1557,6 +1572,10 @@
|
||||||
pin_user_pages_fast
|
pin_user_pages_fast
|
||||||
pin_user_pages_remote
|
pin_user_pages_remote
|
||||||
pktgen_xfrm_outer_mode_output
|
pktgen_xfrm_outer_mode_output
|
||||||
|
pkvm_iommu_resume
|
||||||
|
pkvm_iommu_suspend
|
||||||
|
__pkvm_topup_hyp_alloc
|
||||||
|
__pkvm_topup_hyp_alloc_mgt
|
||||||
platform_bus_type
|
platform_bus_type
|
||||||
platform_device_add
|
platform_device_add
|
||||||
platform_device_add_data
|
platform_device_add_data
|
||||||
|
@ -1655,6 +1674,7 @@
|
||||||
radix_tree_iter_delete
|
radix_tree_iter_delete
|
||||||
radix_tree_lookup
|
radix_tree_lookup
|
||||||
radix_tree_next_chunk
|
radix_tree_next_chunk
|
||||||
|
raise_softirq
|
||||||
___ratelimit
|
___ratelimit
|
||||||
raw_notifier_call_chain
|
raw_notifier_call_chain
|
||||||
raw_notifier_chain_register
|
raw_notifier_chain_register
|
||||||
|
@ -1863,6 +1883,7 @@
|
||||||
seq_write
|
seq_write
|
||||||
set_cpus_allowed_ptr
|
set_cpus_allowed_ptr
|
||||||
set_freezable
|
set_freezable
|
||||||
|
set_normalized_timespec64
|
||||||
set_page_dirty
|
set_page_dirty
|
||||||
set_page_dirty_lock
|
set_page_dirty_lock
|
||||||
set_task_cpu
|
set_task_cpu
|
||||||
|
@ -1946,6 +1967,7 @@
|
||||||
snd_soc_add_dai_controls
|
snd_soc_add_dai_controls
|
||||||
snd_soc_bytes_tlv_callback
|
snd_soc_bytes_tlv_callback
|
||||||
snd_soc_card_get_kcontrol
|
snd_soc_card_get_kcontrol
|
||||||
|
snd_soc_card_get_kcontrol_locked
|
||||||
snd_soc_card_jack_new_pins
|
snd_soc_card_jack_new_pins
|
||||||
snd_soc_component_disable_pin
|
snd_soc_component_disable_pin
|
||||||
snd_soc_component_enable_pin
|
snd_soc_component_enable_pin
|
||||||
|
@ -2204,6 +2226,8 @@
|
||||||
__traceiter_android_rvh_set_user_nice_locked
|
__traceiter_android_rvh_set_user_nice_locked
|
||||||
__traceiter_android_rvh_tick_entry
|
__traceiter_android_rvh_tick_entry
|
||||||
__traceiter_android_rvh_uclamp_eff_get
|
__traceiter_android_rvh_uclamp_eff_get
|
||||||
|
__traceiter_android_rvh_ufs_complete_init
|
||||||
|
__traceiter_android_rvh_ufs_reprogram_all_keys
|
||||||
__traceiter_android_rvh_update_blocked_fair
|
__traceiter_android_rvh_update_blocked_fair
|
||||||
__traceiter_android_rvh_update_load_avg
|
__traceiter_android_rvh_update_load_avg
|
||||||
__traceiter_android_rvh_update_misfit_status
|
__traceiter_android_rvh_update_misfit_status
|
||||||
|
@ -2298,6 +2322,8 @@
|
||||||
__tracepoint_android_rvh_set_user_nice_locked
|
__tracepoint_android_rvh_set_user_nice_locked
|
||||||
__tracepoint_android_rvh_tick_entry
|
__tracepoint_android_rvh_tick_entry
|
||||||
__tracepoint_android_rvh_uclamp_eff_get
|
__tracepoint_android_rvh_uclamp_eff_get
|
||||||
|
__tracepoint_android_rvh_ufs_complete_init
|
||||||
|
__tracepoint_android_rvh_ufs_reprogram_all_keys
|
||||||
__tracepoint_android_rvh_update_blocked_fair
|
__tracepoint_android_rvh_update_blocked_fair
|
||||||
__tracepoint_android_rvh_update_load_avg
|
__tracepoint_android_rvh_update_load_avg
|
||||||
__tracepoint_android_rvh_update_misfit_status
|
__tracepoint_android_rvh_update_misfit_status
|
||||||
|
|
|
@ -161,6 +161,7 @@
|
||||||
complete_all
|
complete_all
|
||||||
completion_done
|
completion_done
|
||||||
config_ep_by_speed
|
config_ep_by_speed
|
||||||
|
configfs_register_default_group
|
||||||
configfs_register_group
|
configfs_register_group
|
||||||
configfs_register_subsystem
|
configfs_register_subsystem
|
||||||
configfs_unregister_group
|
configfs_unregister_group
|
||||||
|
@ -401,6 +402,8 @@
|
||||||
devm_reset_controller_register
|
devm_reset_controller_register
|
||||||
devm_rtc_allocate_device
|
devm_rtc_allocate_device
|
||||||
__devm_rtc_register_device
|
__devm_rtc_register_device
|
||||||
|
devm_snd_soc_register_component
|
||||||
|
devm_tegra_memory_controller_get
|
||||||
devm_thermal_of_cooling_device_register
|
devm_thermal_of_cooling_device_register
|
||||||
devm_thermal_of_zone_register
|
devm_thermal_of_zone_register
|
||||||
devm_usb_get_phy_by_node
|
devm_usb_get_phy_by_node
|
||||||
|
@ -461,6 +464,7 @@
|
||||||
dma_buf_map_attachment
|
dma_buf_map_attachment
|
||||||
dma_buf_map_attachment_unlocked
|
dma_buf_map_attachment_unlocked
|
||||||
dma_buf_put
|
dma_buf_put
|
||||||
|
dma_buf_set_name
|
||||||
dma_buf_unmap_attachment
|
dma_buf_unmap_attachment
|
||||||
dma_buf_unmap_attachment_unlocked
|
dma_buf_unmap_attachment_unlocked
|
||||||
dma_contiguous_default_area
|
dma_contiguous_default_area
|
||||||
|
@ -516,11 +520,15 @@
|
||||||
driver_register
|
driver_register
|
||||||
driver_set_override
|
driver_set_override
|
||||||
driver_unregister
|
driver_unregister
|
||||||
|
drm_add_edid_modes
|
||||||
drm_atomic_get_connector_state
|
drm_atomic_get_connector_state
|
||||||
drm_atomic_get_crtc_state
|
drm_atomic_get_crtc_state
|
||||||
drm_atomic_get_new_private_obj_state
|
drm_atomic_get_new_private_obj_state
|
||||||
drm_atomic_get_old_private_obj_state
|
drm_atomic_get_old_private_obj_state
|
||||||
drm_atomic_get_private_obj_state
|
drm_atomic_get_private_obj_state
|
||||||
|
drm_atomic_helper_connector_destroy_state
|
||||||
|
drm_atomic_helper_connector_duplicate_state
|
||||||
|
drm_atomic_helper_connector_reset
|
||||||
__drm_atomic_helper_private_obj_duplicate_state
|
__drm_atomic_helper_private_obj_duplicate_state
|
||||||
drm_atomic_helper_wait_for_vblanks
|
drm_atomic_helper_wait_for_vblanks
|
||||||
drm_atomic_private_obj_fini
|
drm_atomic_private_obj_fini
|
||||||
|
@ -528,12 +536,18 @@
|
||||||
drm_atomic_state_default_clear
|
drm_atomic_state_default_clear
|
||||||
drm_atomic_state_default_release
|
drm_atomic_state_default_release
|
||||||
drm_atomic_state_init
|
drm_atomic_state_init
|
||||||
|
drm_bridge_add
|
||||||
|
drm_bridge_remove
|
||||||
drm_client_init
|
drm_client_init
|
||||||
drm_client_modeset_commit_locked
|
drm_client_modeset_commit_locked
|
||||||
drm_client_register
|
drm_client_register
|
||||||
|
drm_connector_attach_encoder
|
||||||
|
drm_connector_cleanup
|
||||||
|
drm_connector_init
|
||||||
drm_connector_list_update
|
drm_connector_list_update
|
||||||
drm_connector_register
|
drm_connector_register
|
||||||
drm_connector_unregister
|
drm_connector_unregister
|
||||||
|
drm_connector_update_edid_property
|
||||||
drm_crtc_add_crc_entry
|
drm_crtc_add_crc_entry
|
||||||
__drm_crtc_commit_free
|
__drm_crtc_commit_free
|
||||||
drm_crtc_commit_wait
|
drm_crtc_commit_wait
|
||||||
|
@ -544,6 +558,7 @@
|
||||||
__drm_dev_dbg
|
__drm_dev_dbg
|
||||||
drm_dev_printk
|
drm_dev_printk
|
||||||
drm_display_mode_from_cea_vic
|
drm_display_mode_from_cea_vic
|
||||||
|
drm_do_get_edid
|
||||||
drm_edid_dup
|
drm_edid_dup
|
||||||
drm_edid_duplicate
|
drm_edid_duplicate
|
||||||
drm_edid_free
|
drm_edid_free
|
||||||
|
@ -556,19 +571,24 @@
|
||||||
drm_framebuffer_unregister_private
|
drm_framebuffer_unregister_private
|
||||||
drm_gem_mmap_obj
|
drm_gem_mmap_obj
|
||||||
drm_get_connector_status_name
|
drm_get_connector_status_name
|
||||||
|
drm_helper_probe_single_connector_modes
|
||||||
drm_kms_helper_hotplug_event
|
drm_kms_helper_hotplug_event
|
||||||
drm_master_get
|
drm_master_get
|
||||||
drm_master_put
|
drm_master_put
|
||||||
drm_mode_convert_umode
|
drm_mode_convert_umode
|
||||||
|
drm_mode_copy
|
||||||
drm_mode_create_dp_colorspace_property
|
drm_mode_create_dp_colorspace_property
|
||||||
|
drm_mode_duplicate
|
||||||
drm_mode_is_420_only
|
drm_mode_is_420_only
|
||||||
drm_mode_object_put
|
drm_mode_object_put
|
||||||
|
drm_mode_probed_add
|
||||||
drm_mode_prune_invalid
|
drm_mode_prune_invalid
|
||||||
drm_modeset_lock
|
drm_modeset_lock
|
||||||
drm_modeset_lock_single_interruptible
|
drm_modeset_lock_single_interruptible
|
||||||
drm_mode_set_name
|
drm_mode_set_name
|
||||||
drm_modeset_unlock
|
drm_modeset_unlock
|
||||||
drm_mode_sort
|
drm_mode_sort
|
||||||
|
drm_mode_vrefresh
|
||||||
drm_object_property_set_value
|
drm_object_property_set_value
|
||||||
drm_printf
|
drm_printf
|
||||||
__drm_printfn_debug
|
__drm_printfn_debug
|
||||||
|
@ -643,6 +663,7 @@
|
||||||
fwnode_property_read_u32_array
|
fwnode_property_read_u32_array
|
||||||
gcd
|
gcd
|
||||||
generic_device_group
|
generic_device_group
|
||||||
|
generic_file_llseek
|
||||||
generic_handle_domain_irq
|
generic_handle_domain_irq
|
||||||
generic_handle_irq
|
generic_handle_irq
|
||||||
geni_icc_disable
|
geni_icc_disable
|
||||||
|
@ -729,6 +750,11 @@
|
||||||
gpio_free_array
|
gpio_free_array
|
||||||
gpio_request
|
gpio_request
|
||||||
gpio_to_desc
|
gpio_to_desc
|
||||||
|
gunyah_rm_call
|
||||||
|
gunyah_rm_notifier_register
|
||||||
|
gunyah_rm_notifier_unregister
|
||||||
|
gunyah_rm_register_platform_ops
|
||||||
|
gunyah_rm_unregister_platform_ops
|
||||||
handle_bad_irq
|
handle_bad_irq
|
||||||
handle_edge_irq
|
handle_edge_irq
|
||||||
handle_fasteoi_ack_irq
|
handle_fasteoi_ack_irq
|
||||||
|
@ -738,6 +764,7 @@
|
||||||
handle_simple_irq
|
handle_simple_irq
|
||||||
handle_sysrq
|
handle_sysrq
|
||||||
hashlen_string
|
hashlen_string
|
||||||
|
hdmi_audio_infoframe_init
|
||||||
hex_dump_to_buffer
|
hex_dump_to_buffer
|
||||||
housekeeping_cpumask
|
housekeeping_cpumask
|
||||||
housekeeping_overridden
|
housekeeping_overridden
|
||||||
|
@ -1009,14 +1036,20 @@
|
||||||
kstrtoull_from_user
|
kstrtoull_from_user
|
||||||
kthread_bind_mask
|
kthread_bind_mask
|
||||||
kthread_cancel_work_sync
|
kthread_cancel_work_sync
|
||||||
|
kthread_create_on_cpu
|
||||||
kthread_create_on_node
|
kthread_create_on_node
|
||||||
kthread_create_worker
|
kthread_create_worker
|
||||||
kthread_destroy_worker
|
kthread_destroy_worker
|
||||||
kthread_flush_worker
|
kthread_flush_worker
|
||||||
__kthread_init_worker
|
__kthread_init_worker
|
||||||
|
kthread_park
|
||||||
|
kthread_parkme
|
||||||
kthread_queue_work
|
kthread_queue_work
|
||||||
|
kthread_set_per_cpu
|
||||||
|
kthread_should_park
|
||||||
kthread_should_stop
|
kthread_should_stop
|
||||||
kthread_stop
|
kthread_stop
|
||||||
|
kthread_unpark
|
||||||
kthread_worker_fn
|
kthread_worker_fn
|
||||||
ktime_get
|
ktime_get
|
||||||
ktime_get_coarse_with_offset
|
ktime_get_coarse_with_offset
|
||||||
|
@ -1206,6 +1239,7 @@
|
||||||
of_drm_find_panel
|
of_drm_find_panel
|
||||||
of_find_compatible_node
|
of_find_compatible_node
|
||||||
of_find_device_by_node
|
of_find_device_by_node
|
||||||
|
of_find_mipi_dsi_host_by_node
|
||||||
of_find_node_by_name
|
of_find_node_by_name
|
||||||
of_find_node_by_phandle
|
of_find_node_by_phandle
|
||||||
of_find_node_opts_by_path
|
of_find_node_opts_by_path
|
||||||
|
@ -1225,9 +1259,11 @@
|
||||||
of_get_property
|
of_get_property
|
||||||
of_get_regulator_init_data
|
of_get_regulator_init_data
|
||||||
of_get_required_opp_performance_state
|
of_get_required_opp_performance_state
|
||||||
|
of_graph_get_endpoint_by_regs
|
||||||
of_graph_get_next_endpoint
|
of_graph_get_next_endpoint
|
||||||
of_graph_get_port_parent
|
of_graph_get_port_parent
|
||||||
of_graph_get_remote_endpoint
|
of_graph_get_remote_endpoint
|
||||||
|
of_graph_get_remote_port_parent
|
||||||
of_graph_is_present
|
of_graph_is_present
|
||||||
of_graph_parse_endpoint
|
of_graph_parse_endpoint
|
||||||
of_hwspin_lock_get_id
|
of_hwspin_lock_get_id
|
||||||
|
@ -1384,8 +1420,10 @@
|
||||||
pm_clk_suspend
|
pm_clk_suspend
|
||||||
pm_generic_resume
|
pm_generic_resume
|
||||||
pm_generic_suspend
|
pm_generic_suspend
|
||||||
|
pm_genpd_add_device
|
||||||
pm_genpd_add_subdomain
|
pm_genpd_add_subdomain
|
||||||
pm_genpd_init
|
pm_genpd_init
|
||||||
|
pm_genpd_remove_device
|
||||||
pm_genpd_remove_subdomain
|
pm_genpd_remove_subdomain
|
||||||
pm_power_off
|
pm_power_off
|
||||||
__pm_relax
|
__pm_relax
|
||||||
|
@ -1410,6 +1448,10 @@
|
||||||
pm_system_wakeup
|
pm_system_wakeup
|
||||||
pm_wakeup_dev_event
|
pm_wakeup_dev_event
|
||||||
pm_wakeup_ws_event
|
pm_wakeup_ws_event
|
||||||
|
powercap_register_control_type
|
||||||
|
powercap_register_zone
|
||||||
|
powercap_unregister_control_type
|
||||||
|
powercap_unregister_zone
|
||||||
power_supply_changed
|
power_supply_changed
|
||||||
power_supply_get_by_name
|
power_supply_get_by_name
|
||||||
power_supply_get_drvdata
|
power_supply_get_drvdata
|
||||||
|
@ -1418,10 +1460,6 @@
|
||||||
power_supply_reg_notifier
|
power_supply_reg_notifier
|
||||||
power_supply_set_property
|
power_supply_set_property
|
||||||
power_supply_unreg_notifier
|
power_supply_unreg_notifier
|
||||||
powercap_register_control_type
|
|
||||||
powercap_register_zone
|
|
||||||
powercap_unregister_control_type
|
|
||||||
powercap_unregister_zone
|
|
||||||
preempt_schedule
|
preempt_schedule
|
||||||
preempt_schedule_notrace
|
preempt_schedule_notrace
|
||||||
prepare_to_wait_event
|
prepare_to_wait_event
|
||||||
|
@ -1757,14 +1795,24 @@
|
||||||
skip_spaces
|
skip_spaces
|
||||||
smp_call_function_single
|
smp_call_function_single
|
||||||
smp_call_function_single_async
|
smp_call_function_single_async
|
||||||
|
snd_ctl_add
|
||||||
|
snd_ctl_new1
|
||||||
snd_info_create_module_entry
|
snd_info_create_module_entry
|
||||||
snd_info_free_entry
|
snd_info_free_entry
|
||||||
snd_info_register
|
snd_info_register
|
||||||
|
snd_pcm_add_chmap_ctls
|
||||||
|
snd_pcm_create_iec958_consumer_default
|
||||||
|
snd_pcm_fill_iec958_consumer
|
||||||
|
snd_pcm_fill_iec958_consumer_hw_params
|
||||||
|
snd_pcm_format_width
|
||||||
|
snd_pcm_hw_constraint_eld
|
||||||
_snd_pcm_hw_params_any
|
_snd_pcm_hw_params_any
|
||||||
snd_soc_card_jack_new
|
snd_soc_card_jack_new
|
||||||
snd_soc_component_exit_regmap
|
snd_soc_component_exit_regmap
|
||||||
|
snd_soc_dapm_add_routes
|
||||||
snd_soc_dapm_new_widgets
|
snd_soc_dapm_new_widgets
|
||||||
snd_soc_get_pcm_runtime
|
snd_soc_get_pcm_runtime
|
||||||
|
snd_soc_jack_report
|
||||||
snd_soc_lookup_component
|
snd_soc_lookup_component
|
||||||
snd_soc_rtdcom_lookup
|
snd_soc_rtdcom_lookup
|
||||||
snd_usb_autoresume
|
snd_usb_autoresume
|
||||||
|
@ -1888,6 +1936,7 @@
|
||||||
__task_rq_lock
|
__task_rq_lock
|
||||||
task_rq_lock
|
task_rq_lock
|
||||||
tcp_hashinfo
|
tcp_hashinfo
|
||||||
|
tegra_mc_probe_device
|
||||||
thermal_cdev_update
|
thermal_cdev_update
|
||||||
thermal_cooling_device_register
|
thermal_cooling_device_register
|
||||||
thermal_cooling_device_unregister
|
thermal_cooling_device_unregister
|
||||||
|
@ -2000,6 +2049,7 @@
|
||||||
__traceiter_android_vh_scheduler_tick
|
__traceiter_android_vh_scheduler_tick
|
||||||
__traceiter_android_vh_show_resume_epoch_val
|
__traceiter_android_vh_show_resume_epoch_val
|
||||||
__traceiter_android_vh_show_suspend_epoch_val
|
__traceiter_android_vh_show_suspend_epoch_val
|
||||||
|
__traceiter_android_vh_thermal_pm_notify_suspend
|
||||||
__traceiter_android_vh_timer_calc_index
|
__traceiter_android_vh_timer_calc_index
|
||||||
__traceiter_android_vh_try_fixup_sea
|
__traceiter_android_vh_try_fixup_sea
|
||||||
__traceiter_android_vh_try_to_unmap_one
|
__traceiter_android_vh_try_to_unmap_one
|
||||||
|
@ -2102,6 +2152,7 @@
|
||||||
__tracepoint_android_vh_scheduler_tick
|
__tracepoint_android_vh_scheduler_tick
|
||||||
__tracepoint_android_vh_show_resume_epoch_val
|
__tracepoint_android_vh_show_resume_epoch_val
|
||||||
__tracepoint_android_vh_show_suspend_epoch_val
|
__tracepoint_android_vh_show_suspend_epoch_val
|
||||||
|
__tracepoint_android_vh_thermal_pm_notify_suspend
|
||||||
__tracepoint_android_vh_timer_calc_index
|
__tracepoint_android_vh_timer_calc_index
|
||||||
__tracepoint_android_vh_try_fixup_sea
|
__tracepoint_android_vh_try_fixup_sea
|
||||||
__tracepoint_android_vh_try_to_unmap_one
|
__tracepoint_android_vh_try_to_unmap_one
|
||||||
|
@ -2215,8 +2266,8 @@
|
||||||
up_read
|
up_read
|
||||||
up_write
|
up_write
|
||||||
usb_add_phy_dev
|
usb_add_phy_dev
|
||||||
usb_alloc_dev
|
|
||||||
usb_alloc_coherent
|
usb_alloc_coherent
|
||||||
|
usb_alloc_dev
|
||||||
usb_assign_descriptors
|
usb_assign_descriptors
|
||||||
usb_composite_setup_continue
|
usb_composite_setup_continue
|
||||||
usb_decode_ctrl
|
usb_decode_ctrl
|
||||||
|
@ -2267,6 +2318,7 @@
|
||||||
v4l2_m2m_register_media_controller
|
v4l2_m2m_register_media_controller
|
||||||
v4l2_m2m_request_queue
|
v4l2_m2m_request_queue
|
||||||
v4l2_m2m_unregister_media_controller
|
v4l2_m2m_unregister_media_controller
|
||||||
|
v4l2_s_ctrl
|
||||||
v4l2_subdev_call_wrappers
|
v4l2_subdev_call_wrappers
|
||||||
v4l2_subdev_init
|
v4l2_subdev_init
|
||||||
vb2_create_bufs
|
vb2_create_bufs
|
||||||
|
@ -2332,8 +2384,3 @@
|
||||||
xhci_set_interrupter_moderation
|
xhci_set_interrupter_moderation
|
||||||
xhci_stop_endpoint_sync
|
xhci_stop_endpoint_sync
|
||||||
zap_vma_ptes
|
zap_vma_ptes
|
||||||
gunyah_rm_call
|
|
||||||
gunyah_rm_notifier_register
|
|
||||||
gunyah_rm_notifier_unregister
|
|
||||||
gunyah_rm_register_platform_ops
|
|
||||||
gunyah_rm_unregister_platform_ops
|
|
||||||
|
|
|
@ -17,8 +17,14 @@
|
||||||
devm_extcon_register_notifier_all
|
devm_extcon_register_notifier_all
|
||||||
devm_hwspin_lock_request_specific
|
devm_hwspin_lock_request_specific
|
||||||
_dev_info
|
_dev_info
|
||||||
|
dev_pm_opp_calc_power
|
||||||
|
dev_pm_opp_of_register_em
|
||||||
dev_set_name
|
dev_set_name
|
||||||
__dynamic_netdev_dbg
|
__dynamic_netdev_dbg
|
||||||
|
em_dev_update_chip_binning
|
||||||
|
em_dev_update_perf_domain
|
||||||
|
em_pd_get
|
||||||
|
em_update_performance_limits
|
||||||
finish_wait
|
finish_wait
|
||||||
fortify_panic
|
fortify_panic
|
||||||
idr_alloc
|
idr_alloc
|
||||||
|
@ -27,10 +33,15 @@
|
||||||
init_timer_key
|
init_timer_key
|
||||||
init_wait_entry
|
init_wait_entry
|
||||||
__init_waitqueue_head
|
__init_waitqueue_head
|
||||||
|
irq_to_desc
|
||||||
|
kernel_cpustat
|
||||||
kfree
|
kfree
|
||||||
__kmalloc
|
__kmalloc
|
||||||
kmalloc_caches
|
kmalloc_caches
|
||||||
kmalloc_trace
|
kmalloc_trace
|
||||||
|
kstat
|
||||||
|
kstat_irqs_cpu
|
||||||
|
kstat_irqs_usr
|
||||||
ktime_get_boot_fast_ns
|
ktime_get_boot_fast_ns
|
||||||
__list_add_valid_or_report
|
__list_add_valid_or_report
|
||||||
__list_del_entry_valid_or_report
|
__list_del_entry_valid_or_report
|
||||||
|
@ -43,6 +54,9 @@
|
||||||
__mutex_init
|
__mutex_init
|
||||||
mutex_lock
|
mutex_lock
|
||||||
mutex_unlock
|
mutex_unlock
|
||||||
|
nr_cpu_ids
|
||||||
|
nr_ipi_get
|
||||||
|
nr_irqs
|
||||||
__per_cpu_offset
|
__per_cpu_offset
|
||||||
perf_aux_output_skip
|
perf_aux_output_skip
|
||||||
prepare_to_wait_event
|
prepare_to_wait_event
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
sched_setattr_nocheck
|
sched_setattr_nocheck
|
||||||
schedule_timeout_killable
|
schedule_timeout_killable
|
||||||
set_blocksize
|
set_blocksize
|
||||||
|
skb_orphan_partial
|
||||||
static_key_enable
|
static_key_enable
|
||||||
submit_bh
|
submit_bh
|
||||||
__kmalloc_node
|
__kmalloc_node
|
||||||
|
@ -49,6 +50,7 @@
|
||||||
__traceiter_android_vh_binder_restore_priority
|
__traceiter_android_vh_binder_restore_priority
|
||||||
__traceiter_android_vh_binder_special_task
|
__traceiter_android_vh_binder_special_task
|
||||||
__traceiter_android_vh_binder_wait_for_work
|
__traceiter_android_vh_binder_wait_for_work
|
||||||
|
__traceiter_android_vh_blk_fill_rwbs
|
||||||
__traceiter_android_vh_cgroup_attach
|
__traceiter_android_vh_cgroup_attach
|
||||||
__traceiter_android_vh_check_folio_look_around_ref
|
__traceiter_android_vh_check_folio_look_around_ref
|
||||||
__traceiter_android_vh_check_nanosleep_syscall
|
__traceiter_android_vh_check_nanosleep_syscall
|
||||||
|
@ -57,14 +59,18 @@
|
||||||
__traceiter_android_vh_configfs_uevent_work
|
__traceiter_android_vh_configfs_uevent_work
|
||||||
__traceiter_android_vh_count_workingset_refault
|
__traceiter_android_vh_count_workingset_refault
|
||||||
__traceiter_android_vh_do_anonymous_page
|
__traceiter_android_vh_do_anonymous_page
|
||||||
|
__traceiter_android_vh_do_new_mount_fc
|
||||||
__traceiter_android_vh_do_swap_page
|
__traceiter_android_vh_do_swap_page
|
||||||
__traceiter_android_vh_do_wp_page
|
__traceiter_android_vh_do_wp_page
|
||||||
__traceiter_android_vh_dup_task_struct
|
__traceiter_android_vh_dup_task_struct
|
||||||
__traceiter_android_vh_f2fs_file_open
|
__traceiter_android_vh_f2fs_file_open
|
||||||
|
__traceiter_android_vh_f2fs_ra_op_flags
|
||||||
__traceiter_android_vh_filemap_update_page
|
__traceiter_android_vh_filemap_update_page
|
||||||
__traceiter_android_vh_free_task
|
__traceiter_android_vh_free_task
|
||||||
__traceiter_android_vh_fuse_request_end
|
__traceiter_android_vh_fuse_request_end
|
||||||
__traceiter_android_vh_irqtime_account_process_tick
|
__traceiter_android_vh_irqtime_account_process_tick
|
||||||
|
__traceiter_android_vh_inode_lru_isolate
|
||||||
|
__traceiter_android_vh_invalidate_mapping_pagevec
|
||||||
__traceiter_android_vh_look_around
|
__traceiter_android_vh_look_around
|
||||||
__traceiter_android_vh_look_around_migrate_folio
|
__traceiter_android_vh_look_around_migrate_folio
|
||||||
__traceiter_android_vh_lock_folio_drop_mmap_end
|
__traceiter_android_vh_lock_folio_drop_mmap_end
|
||||||
|
@ -79,6 +85,7 @@
|
||||||
__traceiter_android_vh_percpu_rwsem_down_read
|
__traceiter_android_vh_percpu_rwsem_down_read
|
||||||
__traceiter_android_vh_percpu_rwsem_up_write
|
__traceiter_android_vh_percpu_rwsem_up_write
|
||||||
__traceiter_android_vh_percpu_rwsem_wq_add
|
__traceiter_android_vh_percpu_rwsem_wq_add
|
||||||
|
__traceiter_android_rvh_pr_set_vma_name_bypass
|
||||||
__traceiter_android_vh_queue_request_and_unlock
|
__traceiter_android_vh_queue_request_and_unlock
|
||||||
__traceiter_android_vh_record_rwsem_reader_owned
|
__traceiter_android_vh_record_rwsem_reader_owned
|
||||||
__traceiter_android_vh_record_rwsem_writer_owned
|
__traceiter_android_vh_record_rwsem_writer_owned
|
||||||
|
@ -94,6 +101,7 @@
|
||||||
__traceiter_android_vh_sd_init_unmap_multi_segment
|
__traceiter_android_vh_sd_init_unmap_multi_segment
|
||||||
__traceiter_android_vh_sd_setup_unmap_multi_segment
|
__traceiter_android_vh_sd_setup_unmap_multi_segment
|
||||||
__traceiter_android_vh_shmem_swapin_folio
|
__traceiter_android_vh_shmem_swapin_folio
|
||||||
|
__traceiter_android_vh_shrink_folio_list
|
||||||
__traceiter_android_vh_swapmem_gather_add_bypass
|
__traceiter_android_vh_swapmem_gather_add_bypass
|
||||||
__traceiter_android_vh_swapmem_gather_finish
|
__traceiter_android_vh_swapmem_gather_finish
|
||||||
__traceiter_android_vh_swapmem_gather_init
|
__traceiter_android_vh_swapmem_gather_init
|
||||||
|
@ -120,6 +128,7 @@
|
||||||
__traceiter_sched_waking
|
__traceiter_sched_waking
|
||||||
__traceiter_sys_exit
|
__traceiter_sys_exit
|
||||||
__traceiter_task_rename
|
__traceiter_task_rename
|
||||||
|
__traceiter_tcp_retransmit_skb
|
||||||
__traceiter_workqueue_execute_end
|
__traceiter_workqueue_execute_end
|
||||||
__traceiter_workqueue_execute_start
|
__traceiter_workqueue_execute_start
|
||||||
__tracepoint_android_rvh_alloc_and_link_pwqs
|
__tracepoint_android_rvh_alloc_and_link_pwqs
|
||||||
|
@ -142,6 +151,7 @@
|
||||||
__tracepoint_android_vh_binder_restore_priority
|
__tracepoint_android_vh_binder_restore_priority
|
||||||
__tracepoint_android_vh_binder_special_task
|
__tracepoint_android_vh_binder_special_task
|
||||||
__tracepoint_android_vh_binder_wait_for_work
|
__tracepoint_android_vh_binder_wait_for_work
|
||||||
|
__tracepoint_android_vh_blk_fill_rwbs
|
||||||
__tracepoint_android_vh_cgroup_attach
|
__tracepoint_android_vh_cgroup_attach
|
||||||
__tracepoint_android_vh_check_folio_look_around_ref
|
__tracepoint_android_vh_check_folio_look_around_ref
|
||||||
__tracepoint_android_vh_check_nanosleep_syscall
|
__tracepoint_android_vh_check_nanosleep_syscall
|
||||||
|
@ -150,13 +160,17 @@
|
||||||
__tracepoint_android_vh_configfs_uevent_work
|
__tracepoint_android_vh_configfs_uevent_work
|
||||||
__tracepoint_android_vh_count_workingset_refault
|
__tracepoint_android_vh_count_workingset_refault
|
||||||
__tracepoint_android_vh_do_anonymous_page
|
__tracepoint_android_vh_do_anonymous_page
|
||||||
|
__tracepoint_android_vh_do_new_mount_fc
|
||||||
__tracepoint_android_vh_do_swap_page
|
__tracepoint_android_vh_do_swap_page
|
||||||
__tracepoint_android_vh_do_wp_page
|
__tracepoint_android_vh_do_wp_page
|
||||||
__tracepoint_android_vh_dup_task_struct
|
__tracepoint_android_vh_dup_task_struct
|
||||||
__tracepoint_android_vh_f2fs_file_open
|
__tracepoint_android_vh_f2fs_file_open
|
||||||
|
__tracepoint_android_vh_f2fs_ra_op_flags
|
||||||
__tracepoint_android_vh_filemap_update_page
|
__tracepoint_android_vh_filemap_update_page
|
||||||
__tracepoint_android_vh_free_task
|
__tracepoint_android_vh_free_task
|
||||||
__tracepoint_android_vh_fuse_request_end
|
__tracepoint_android_vh_fuse_request_end
|
||||||
|
__tracepoint_android_vh_inode_lru_isolate
|
||||||
|
__tracepoint_android_vh_invalidate_mapping_pagevec
|
||||||
__tracepoint_android_vh_irqtime_account_process_tick
|
__tracepoint_android_vh_irqtime_account_process_tick
|
||||||
__tracepoint_android_vh_look_around
|
__tracepoint_android_vh_look_around
|
||||||
__tracepoint_android_vh_look_around_migrate_folio
|
__tracepoint_android_vh_look_around_migrate_folio
|
||||||
|
@ -172,6 +186,7 @@
|
||||||
__tracepoint_android_vh_percpu_rwsem_down_read
|
__tracepoint_android_vh_percpu_rwsem_down_read
|
||||||
__tracepoint_android_vh_percpu_rwsem_up_write
|
__tracepoint_android_vh_percpu_rwsem_up_write
|
||||||
__tracepoint_android_vh_percpu_rwsem_wq_add
|
__tracepoint_android_vh_percpu_rwsem_wq_add
|
||||||
|
__tracepoint_android_rvh_pr_set_vma_name_bypass
|
||||||
__tracepoint_android_vh_queue_request_and_unlock
|
__tracepoint_android_vh_queue_request_and_unlock
|
||||||
__tracepoint_android_vh_record_rwsem_reader_owned
|
__tracepoint_android_vh_record_rwsem_reader_owned
|
||||||
__tracepoint_android_vh_record_rwsem_writer_owned
|
__tracepoint_android_vh_record_rwsem_writer_owned
|
||||||
|
@ -187,6 +202,7 @@
|
||||||
__tracepoint_android_vh_sd_init_unmap_multi_segment
|
__tracepoint_android_vh_sd_init_unmap_multi_segment
|
||||||
__tracepoint_android_vh_sd_setup_unmap_multi_segment
|
__tracepoint_android_vh_sd_setup_unmap_multi_segment
|
||||||
__tracepoint_android_vh_shmem_swapin_folio
|
__tracepoint_android_vh_shmem_swapin_folio
|
||||||
|
__tracepoint_android_vh_shrink_folio_list
|
||||||
__tracepoint_android_vh_swapmem_gather_add_bypass
|
__tracepoint_android_vh_swapmem_gather_add_bypass
|
||||||
__tracepoint_android_vh_swapmem_gather_finish
|
__tracepoint_android_vh_swapmem_gather_finish
|
||||||
__tracepoint_android_vh_swapmem_gather_init
|
__tracepoint_android_vh_swapmem_gather_init
|
||||||
|
@ -213,6 +229,7 @@
|
||||||
__tracepoint_sched_waking
|
__tracepoint_sched_waking
|
||||||
__tracepoint_sys_exit
|
__tracepoint_sys_exit
|
||||||
__tracepoint_task_rename
|
__tracepoint_task_rename
|
||||||
|
__tracepoint_tcp_retransmit_skb
|
||||||
__tracepoint_workqueue_execute_end
|
__tracepoint_workqueue_execute_end
|
||||||
__tracepoint_workqueue_execute_start
|
__tracepoint_workqueue_execute_start
|
||||||
ucsi_send_command
|
ucsi_send_command
|
||||||
|
@ -226,6 +243,7 @@
|
||||||
__traceiter_android_rvh_udpv6_sendmsg
|
__traceiter_android_rvh_udpv6_sendmsg
|
||||||
__traceiter_android_rvh_udpv6_recvmsg
|
__traceiter_android_rvh_udpv6_recvmsg
|
||||||
__traceiter_android_rvh_tcp_select_window
|
__traceiter_android_rvh_tcp_select_window
|
||||||
|
__traceiter_android_rvh_tcp_rcv_spurious_retrans
|
||||||
__traceiter_android_rvh_inet_sock_create
|
__traceiter_android_rvh_inet_sock_create
|
||||||
__traceiter_android_rvh_inet_sock_release
|
__traceiter_android_rvh_inet_sock_release
|
||||||
__traceiter_android_vh_tcp_rtt_estimator
|
__traceiter_android_vh_tcp_rtt_estimator
|
||||||
|
@ -239,6 +257,7 @@
|
||||||
__tracepoint_android_rvh_udpv6_sendmsg
|
__tracepoint_android_rvh_udpv6_sendmsg
|
||||||
__tracepoint_android_rvh_udpv6_recvmsg
|
__tracepoint_android_rvh_udpv6_recvmsg
|
||||||
__tracepoint_android_rvh_tcp_select_window
|
__tracepoint_android_rvh_tcp_select_window
|
||||||
|
__tracepoint_android_rvh_tcp_rcv_spurious_retrans
|
||||||
__tracepoint_android_rvh_inet_sock_create
|
__tracepoint_android_rvh_inet_sock_create
|
||||||
__tracepoint_android_rvh_inet_sock_release
|
__tracepoint_android_rvh_inet_sock_release
|
||||||
__tracepoint_android_vh_tcp_rtt_estimator
|
__tracepoint_android_vh_tcp_rtt_estimator
|
||||||
|
|
|
@ -62,6 +62,8 @@
|
||||||
scsi_device_lookup
|
scsi_device_lookup
|
||||||
scsi_host_lookup
|
scsi_host_lookup
|
||||||
scsi_device_lookup
|
scsi_device_lookup
|
||||||
|
blk_mq_quiesce_tagset
|
||||||
|
blk_mq_unquiesce_tagset
|
||||||
__traceiter_android_vh_anon_vma_name_recog
|
__traceiter_android_vh_anon_vma_name_recog
|
||||||
__traceiter_android_vh_restore_mm_flags
|
__traceiter_android_vh_restore_mm_flags
|
||||||
__traceiter_android_vh_update_vma_flags
|
__traceiter_android_vh_update_vma_flags
|
||||||
|
@ -88,6 +90,7 @@
|
||||||
__tracepoint_android_rvh_dequeue_task
|
__tracepoint_android_rvh_dequeue_task
|
||||||
cpuset_cpus_allowed
|
cpuset_cpus_allowed
|
||||||
cpufreq_update_policy
|
cpufreq_update_policy
|
||||||
|
cgroup_threadgroup_rwsem
|
||||||
|
|
||||||
#required by millet.ko
|
#required by millet.ko
|
||||||
__traceiter_android_rvh_refrigerator
|
__traceiter_android_rvh_refrigerator
|
||||||
|
@ -126,6 +129,22 @@
|
||||||
__tracepoint_android_rvh_dequeue_task_fair
|
__tracepoint_android_rvh_dequeue_task_fair
|
||||||
__tracepoint_android_rvh_entity_tick
|
__tracepoint_android_rvh_entity_tick
|
||||||
|
|
||||||
|
#required by cpq.ko
|
||||||
|
elv_rb_former_request
|
||||||
|
elv_rb_latter_request
|
||||||
|
blk_mq_sched_try_merge
|
||||||
|
elv_rb_find
|
||||||
|
elv_bio_merge_ok
|
||||||
|
elv_rb_del
|
||||||
|
elv_rb_add
|
||||||
|
elv_rqhash_del
|
||||||
|
blk_mq_sched_mark_restart_hctx
|
||||||
|
__blk_req_zone_write_unlock
|
||||||
|
blk_mq_sched_try_insert_merge
|
||||||
|
elv_rqhash_add
|
||||||
|
blk_req_needs_zone_write_lock
|
||||||
|
__blk_req_zone_write_lock
|
||||||
|
|
||||||
#required by cifs.ko
|
#required by cifs.ko
|
||||||
add_swap_extent
|
add_swap_extent
|
||||||
asn1_ber_decoder
|
asn1_ber_decoder
|
||||||
|
|
|
@ -259,6 +259,7 @@ config ARM64
|
||||||
select TRACE_IRQFLAGS_SUPPORT
|
select TRACE_IRQFLAGS_SUPPORT
|
||||||
select TRACE_IRQFLAGS_NMI_SUPPORT
|
select TRACE_IRQFLAGS_NMI_SUPPORT
|
||||||
select HAVE_SOFTIRQ_ON_OWN_STACK
|
select HAVE_SOFTIRQ_ON_OWN_STACK
|
||||||
|
select USER_STACKTRACE_SUPPORT
|
||||||
help
|
help
|
||||||
ARM 64-bit (AArch64) Linux support.
|
ARM 64-bit (AArch64) Linux support.
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@ KBUILD_CFLAGS += -mgeneral-regs-only \
|
||||||
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
|
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
|
||||||
KBUILD_AFLAGS += $(compat_vdso)
|
KBUILD_AFLAGS += $(compat_vdso)
|
||||||
|
|
||||||
KBUILD_RUSTFLAGS += --target=aarch64-unknown-none -Ctarget-feature="-neon"
|
KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
|
||||||
|
|
||||||
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
|
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
|
||||||
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
|
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
|
||||||
|
@ -110,7 +110,6 @@ endif
|
||||||
|
|
||||||
ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
|
ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
|
||||||
KBUILD_CFLAGS += -ffixed-x18
|
KBUILD_CFLAGS += -ffixed-x18
|
||||||
KBUILD_RUSTFLAGS += -Ctarget-feature=+reserve-x18
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
|
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
|
||||||
|
|
|
@ -48,7 +48,6 @@ CONFIG_EXPERT=y
|
||||||
CONFIG_KALLSYMS_ALL=y
|
CONFIG_KALLSYMS_ALL=y
|
||||||
# CONFIG_RSEQ is not set
|
# CONFIG_RSEQ is not set
|
||||||
CONFIG_PROFILING=y
|
CONFIG_PROFILING=y
|
||||||
CONFIG_RUST=y
|
|
||||||
CONFIG_ARCH_SUNXI=y
|
CONFIG_ARCH_SUNXI=y
|
||||||
CONFIG_ARCH_HISI=y
|
CONFIG_ARCH_HISI=y
|
||||||
CONFIG_ARCH_QCOM=y
|
CONFIG_ARCH_QCOM=y
|
||||||
|
@ -603,7 +602,6 @@ CONFIG_DTPM=y
|
||||||
CONFIG_DTPM_CPU=y
|
CONFIG_DTPM_CPU=y
|
||||||
CONFIG_DTPM_DEVFREQ=y
|
CONFIG_DTPM_DEVFREQ=y
|
||||||
CONFIG_ANDROID_BINDER_IPC=y
|
CONFIG_ANDROID_BINDER_IPC=y
|
||||||
CONFIG_ANDROID_BINDER_IPC_RUST=m
|
|
||||||
CONFIG_ANDROID_BINDERFS=y
|
CONFIG_ANDROID_BINDERFS=y
|
||||||
CONFIG_ANDROID_DEBUG_SYMBOLS=y
|
CONFIG_ANDROID_DEBUG_SYMBOLS=y
|
||||||
CONFIG_ANDROID_VENDOR_HOOKS=y
|
CONFIG_ANDROID_VENDOR_HOOKS=y
|
||||||
|
|
|
@ -70,18 +70,22 @@
|
||||||
.word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
|
.word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
|
||||||
.word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
|
.word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
|
||||||
|
|
||||||
|
.macro load_round_constants tmp
|
||||||
|
adr_l \tmp, .Lsha2_rcon
|
||||||
|
ld1 { v0.4s- v3.4s}, [\tmp], #64
|
||||||
|
ld1 { v4.4s- v7.4s}, [\tmp], #64
|
||||||
|
ld1 { v8.4s-v11.4s}, [\tmp], #64
|
||||||
|
ld1 {v12.4s-v15.4s}, [\tmp]
|
||||||
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
|
* int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src,
|
||||||
* int blocks)
|
* int blocks)
|
||||||
*/
|
*/
|
||||||
.text
|
.text
|
||||||
SYM_FUNC_START(sha2_ce_transform)
|
SYM_FUNC_START(__sha256_ce_transform)
|
||||||
/* load round constants */
|
|
||||||
adr_l x8, .Lsha2_rcon
|
load_round_constants x8
|
||||||
ld1 { v0.4s- v3.4s}, [x8], #64
|
|
||||||
ld1 { v4.4s- v7.4s}, [x8], #64
|
|
||||||
ld1 { v8.4s-v11.4s}, [x8], #64
|
|
||||||
ld1 {v12.4s-v15.4s}, [x8]
|
|
||||||
|
|
||||||
/* load state */
|
/* load state */
|
||||||
ld1 {dgav.4s, dgbv.4s}, [x0]
|
ld1 {dgav.4s, dgbv.4s}, [x0]
|
||||||
|
@ -154,4 +158,269 @@ CPU_LE( rev32 v19.16b, v19.16b )
|
||||||
3: st1 {dgav.4s, dgbv.4s}, [x0]
|
3: st1 {dgav.4s, dgbv.4s}, [x0]
|
||||||
mov w0, w2
|
mov w0, w2
|
||||||
ret
|
ret
|
||||||
SYM_FUNC_END(sha2_ce_transform)
|
SYM_FUNC_END(__sha256_ce_transform)
|
||||||
|
|
||||||
|
.unreq dga
|
||||||
|
.unreq dgav
|
||||||
|
.unreq dgb
|
||||||
|
.unreq dgbv
|
||||||
|
.unreq t0
|
||||||
|
.unreq t1
|
||||||
|
.unreq dg0q
|
||||||
|
.unreq dg0v
|
||||||
|
.unreq dg1q
|
||||||
|
.unreq dg1v
|
||||||
|
.unreq dg2q
|
||||||
|
.unreq dg2v
|
||||||
|
|
||||||
|
// parameters for __sha256_ce_finup2x()
|
||||||
|
sctx .req x0
|
||||||
|
data1 .req x1
|
||||||
|
data2 .req x2
|
||||||
|
len .req w3
|
||||||
|
out1 .req x4
|
||||||
|
out2 .req x5
|
||||||
|
|
||||||
|
// other scalar variables
|
||||||
|
count .req x6
|
||||||
|
final_step .req w7
|
||||||
|
|
||||||
|
// x8-x9 are used as temporaries.
|
||||||
|
|
||||||
|
// v0-v15 are used to cache the SHA-256 round constants.
|
||||||
|
// v16-v19 are used for the message schedule for the first message.
|
||||||
|
// v20-v23 are used for the message schedule for the second message.
|
||||||
|
// v24-v31 are used for the state and temporaries as given below.
|
||||||
|
// *_a are for the first message and *_b for the second.
|
||||||
|
state0_a_q .req q24
|
||||||
|
state0_a .req v24
|
||||||
|
state1_a_q .req q25
|
||||||
|
state1_a .req v25
|
||||||
|
state0_b_q .req q26
|
||||||
|
state0_b .req v26
|
||||||
|
state1_b_q .req q27
|
||||||
|
state1_b .req v27
|
||||||
|
t0_a .req v28
|
||||||
|
t0_b .req v29
|
||||||
|
t1_a_q .req q30
|
||||||
|
t1_a .req v30
|
||||||
|
t1_b_q .req q31
|
||||||
|
t1_b .req v31
|
||||||
|
|
||||||
|
#define OFFSETOF_COUNT 32 // offsetof(struct sha256_state, count)
|
||||||
|
#define OFFSETOF_BUF 40 // offsetof(struct sha256_state, buf)
|
||||||
|
// offsetof(struct sha256_state, state) is assumed to be 0.
|
||||||
|
|
||||||
|
// Do 4 rounds of SHA-256 for each of two messages (interleaved). m0_a
|
||||||
|
// and m0_b contain the current 4 message schedule words for the first
|
||||||
|
// and second message respectively.
|
||||||
|
//
|
||||||
|
// If not all the message schedule words have been computed yet, then
|
||||||
|
// this also computes 4 more message schedule words for each message.
|
||||||
|
// m1_a-m3_a contain the next 3 groups of 4 message schedule words for
|
||||||
|
// the first message, and likewise m1_b-m3_b for the second. After
|
||||||
|
// consuming the current value of m0_a, this macro computes the group
|
||||||
|
// after m3_a and writes it to m0_a, and likewise for *_b. This means
|
||||||
|
// that the next (m0_a, m1_a, m2_a, m3_a) is the current (m1_a, m2_a,
|
||||||
|
// m3_a, m0_a), and likewise for *_b, so the caller must cycle through
|
||||||
|
// the registers accordingly.
|
||||||
|
.macro do_4rounds_2x i, k, m0_a, m1_a, m2_a, m3_a, \
|
||||||
|
m0_b, m1_b, m2_b, m3_b
|
||||||
|
add t0_a\().4s, \m0_a\().4s, \k\().4s
|
||||||
|
add t0_b\().4s, \m0_b\().4s, \k\().4s
|
||||||
|
.if \i < 48
|
||||||
|
sha256su0 \m0_a\().4s, \m1_a\().4s
|
||||||
|
sha256su0 \m0_b\().4s, \m1_b\().4s
|
||||||
|
sha256su1 \m0_a\().4s, \m2_a\().4s, \m3_a\().4s
|
||||||
|
sha256su1 \m0_b\().4s, \m2_b\().4s, \m3_b\().4s
|
||||||
|
.endif
|
||||||
|
mov t1_a.16b, state0_a.16b
|
||||||
|
mov t1_b.16b, state0_b.16b
|
||||||
|
sha256h state0_a_q, state1_a_q, t0_a\().4s
|
||||||
|
sha256h state0_b_q, state1_b_q, t0_b\().4s
|
||||||
|
sha256h2 state1_a_q, t1_a_q, t0_a\().4s
|
||||||
|
sha256h2 state1_b_q, t1_b_q, t0_b\().4s
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro do_16rounds_2x i, k0, k1, k2, k3
|
||||||
|
do_4rounds_2x \i + 0, \k0, v16, v17, v18, v19, v20, v21, v22, v23
|
||||||
|
do_4rounds_2x \i + 4, \k1, v17, v18, v19, v16, v21, v22, v23, v20
|
||||||
|
do_4rounds_2x \i + 8, \k2, v18, v19, v16, v17, v22, v23, v20, v21
|
||||||
|
do_4rounds_2x \i + 12, \k3, v19, v16, v17, v18, v23, v20, v21, v22
|
||||||
|
.endm
|
||||||
|
|
||||||
|
//
|
||||||
|
// void __sha256_ce_finup2x(const struct sha256_state *sctx,
|
||||||
|
// const u8 *data1, const u8 *data2, int len,
|
||||||
|
// u8 out1[SHA256_DIGEST_SIZE],
|
||||||
|
// u8 out2[SHA256_DIGEST_SIZE]);
|
||||||
|
//
|
||||||
|
// This function computes the SHA-256 digests of two messages |data1| and
|
||||||
|
// |data2| that are both |len| bytes long, starting from the initial state
|
||||||
|
// |sctx|. |len| must be at least SHA256_BLOCK_SIZE.
|
||||||
|
//
|
||||||
|
// The instructions for the two SHA-256 operations are interleaved. On many
|
||||||
|
// CPUs, this is almost twice as fast as hashing each message individually due
|
||||||
|
// to taking better advantage of the CPU's SHA-256 and SIMD throughput.
|
||||||
|
//
|
||||||
|
SYM_FUNC_START(__sha256_ce_finup2x)
|
||||||
|
sub sp, sp, #128
|
||||||
|
mov final_step, #0
|
||||||
|
load_round_constants x8
|
||||||
|
|
||||||
|
// Load the initial state from sctx->state.
|
||||||
|
ld1 {state0_a.4s-state1_a.4s}, [sctx]
|
||||||
|
|
||||||
|
// Load sctx->count. Take the mod 64 of it to get the number of bytes
|
||||||
|
// that are buffered in sctx->buf. Also save it in a register with len
|
||||||
|
// added to it.
|
||||||
|
ldr x8, [sctx, #OFFSETOF_COUNT]
|
||||||
|
add count, x8, len, sxtw
|
||||||
|
and x8, x8, #63
|
||||||
|
cbz x8, .Lfinup2x_enter_loop // No bytes buffered?
|
||||||
|
|
||||||
|
// x8 bytes (1 to 63) are currently buffered in sctx->buf. Load them
|
||||||
|
// followed by the first 64 - x8 bytes of data. Since len >= 64, we
|
||||||
|
// just load 64 bytes from each of sctx->buf, data1, and data2
|
||||||
|
// unconditionally and rearrange the data as needed.
|
||||||
|
add x9, sctx, #OFFSETOF_BUF
|
||||||
|
ld1 {v16.16b-v19.16b}, [x9]
|
||||||
|
st1 {v16.16b-v19.16b}, [sp]
|
||||||
|
|
||||||
|
ld1 {v16.16b-v19.16b}, [data1], #64
|
||||||
|
add x9, sp, x8
|
||||||
|
st1 {v16.16b-v19.16b}, [x9]
|
||||||
|
ld1 {v16.4s-v19.4s}, [sp]
|
||||||
|
|
||||||
|
ld1 {v20.16b-v23.16b}, [data2], #64
|
||||||
|
st1 {v20.16b-v23.16b}, [x9]
|
||||||
|
ld1 {v20.4s-v23.4s}, [sp]
|
||||||
|
|
||||||
|
sub len, len, #64
|
||||||
|
sub data1, data1, x8
|
||||||
|
sub data2, data2, x8
|
||||||
|
add len, len, w8
|
||||||
|
mov state0_b.16b, state0_a.16b
|
||||||
|
mov state1_b.16b, state1_a.16b
|
||||||
|
b .Lfinup2x_loop_have_data
|
||||||
|
|
||||||
|
.Lfinup2x_enter_loop:
|
||||||
|
sub len, len, #64
|
||||||
|
mov state0_b.16b, state0_a.16b
|
||||||
|
mov state1_b.16b, state1_a.16b
|
||||||
|
.Lfinup2x_loop:
|
||||||
|
// Load the next two data blocks.
|
||||||
|
ld1 {v16.4s-v19.4s}, [data1], #64
|
||||||
|
ld1 {v20.4s-v23.4s}, [data2], #64
|
||||||
|
.Lfinup2x_loop_have_data:
|
||||||
|
// Convert the words of the data blocks from big endian.
|
||||||
|
CPU_LE( rev32 v16.16b, v16.16b )
|
||||||
|
CPU_LE( rev32 v17.16b, v17.16b )
|
||||||
|
CPU_LE( rev32 v18.16b, v18.16b )
|
||||||
|
CPU_LE( rev32 v19.16b, v19.16b )
|
||||||
|
CPU_LE( rev32 v20.16b, v20.16b )
|
||||||
|
CPU_LE( rev32 v21.16b, v21.16b )
|
||||||
|
CPU_LE( rev32 v22.16b, v22.16b )
|
||||||
|
CPU_LE( rev32 v23.16b, v23.16b )
|
||||||
|
.Lfinup2x_loop_have_bswapped_data:
|
||||||
|
|
||||||
|
// Save the original state for each block.
|
||||||
|
st1 {state0_a.4s-state1_b.4s}, [sp]
|
||||||
|
|
||||||
|
// Do the SHA-256 rounds on each block.
|
||||||
|
do_16rounds_2x 0, v0, v1, v2, v3
|
||||||
|
do_16rounds_2x 16, v4, v5, v6, v7
|
||||||
|
do_16rounds_2x 32, v8, v9, v10, v11
|
||||||
|
do_16rounds_2x 48, v12, v13, v14, v15
|
||||||
|
|
||||||
|
// Add the original state for each block.
|
||||||
|
ld1 {v16.4s-v19.4s}, [sp]
|
||||||
|
add state0_a.4s, state0_a.4s, v16.4s
|
||||||
|
add state1_a.4s, state1_a.4s, v17.4s
|
||||||
|
add state0_b.4s, state0_b.4s, v18.4s
|
||||||
|
add state1_b.4s, state1_b.4s, v19.4s
|
||||||
|
|
||||||
|
// Update len and loop back if more blocks remain.
|
||||||
|
sub len, len, #64
|
||||||
|
tbz len, #31, .Lfinup2x_loop // len >= 0?
|
||||||
|
|
||||||
|
// Check if any final blocks need to be handled.
|
||||||
|
// final_step = 2: all done
|
||||||
|
// final_step = 1: need to do count-only padding block
|
||||||
|
// final_step = 0: need to do the block with 0x80 padding byte
|
||||||
|
tbnz final_step, #1, .Lfinup2x_done
|
||||||
|
tbnz final_step, #0, .Lfinup2x_finalize_countonly
|
||||||
|
add len, len, #64
|
||||||
|
cbz len, .Lfinup2x_finalize_blockaligned
|
||||||
|
|
||||||
|
// Not block-aligned; 1 <= len <= 63 data bytes remain. Pad the block.
|
||||||
|
// To do this, write the padding starting with the 0x80 byte to
|
||||||
|
// &sp[64]. Then for each message, copy the last 64 data bytes to sp
|
||||||
|
// and load from &sp[64 - len] to get the needed padding block. This
|
||||||
|
// code relies on the data buffers being >= 64 bytes in length.
|
||||||
|
sub w8, len, #64 // w8 = len - 64
|
||||||
|
add data1, data1, w8, sxtw // data1 += len - 64
|
||||||
|
add data2, data2, w8, sxtw // data2 += len - 64
|
||||||
|
mov x9, 0x80
|
||||||
|
fmov d16, x9
|
||||||
|
movi v17.16b, #0
|
||||||
|
stp q16, q17, [sp, #64]
|
||||||
|
stp q17, q17, [sp, #96]
|
||||||
|
sub x9, sp, w8, sxtw // x9 = &sp[64 - len]
|
||||||
|
cmp len, #56
|
||||||
|
b.ge 1f // will count spill into its own block?
|
||||||
|
lsl count, count, #3
|
||||||
|
rev count, count
|
||||||
|
str count, [x9, #56]
|
||||||
|
mov final_step, #2 // won't need count-only block
|
||||||
|
b 2f
|
||||||
|
1:
|
||||||
|
mov final_step, #1 // will need count-only block
|
||||||
|
2:
|
||||||
|
ld1 {v16.16b-v19.16b}, [data1]
|
||||||
|
st1 {v16.16b-v19.16b}, [sp]
|
||||||
|
ld1 {v16.4s-v19.4s}, [x9]
|
||||||
|
ld1 {v20.16b-v23.16b}, [data2]
|
||||||
|
st1 {v20.16b-v23.16b}, [sp]
|
||||||
|
ld1 {v20.4s-v23.4s}, [x9]
|
||||||
|
b .Lfinup2x_loop_have_data
|
||||||
|
|
||||||
|
// Prepare a padding block, either:
|
||||||
|
//
|
||||||
|
// {0x80, 0, 0, 0, ..., count (as __be64)}
|
||||||
|
// This is for a block aligned message.
|
||||||
|
//
|
||||||
|
// { 0, 0, 0, 0, ..., count (as __be64)}
|
||||||
|
// This is for a message whose length mod 64 is >= 56.
|
||||||
|
//
|
||||||
|
// Pre-swap the endianness of the words.
|
||||||
|
.Lfinup2x_finalize_countonly:
|
||||||
|
movi v16.2d, #0
|
||||||
|
b 1f
|
||||||
|
.Lfinup2x_finalize_blockaligned:
|
||||||
|
mov x8, #0x80000000
|
||||||
|
fmov d16, x8
|
||||||
|
1:
|
||||||
|
movi v17.2d, #0
|
||||||
|
movi v18.2d, #0
|
||||||
|
ror count, count, #29 // ror(lsl(count, 3), 32)
|
||||||
|
mov v19.d[0], xzr
|
||||||
|
mov v19.d[1], count
|
||||||
|
mov v20.16b, v16.16b
|
||||||
|
movi v21.2d, #0
|
||||||
|
movi v22.2d, #0
|
||||||
|
mov v23.16b, v19.16b
|
||||||
|
mov final_step, #2
|
||||||
|
b .Lfinup2x_loop_have_bswapped_data
|
||||||
|
|
||||||
|
.Lfinup2x_done:
|
||||||
|
// Write the two digests with all bytes in the correct order.
|
||||||
|
CPU_LE( rev32 state0_a.16b, state0_a.16b )
|
||||||
|
CPU_LE( rev32 state1_a.16b, state1_a.16b )
|
||||||
|
CPU_LE( rev32 state0_b.16b, state0_b.16b )
|
||||||
|
CPU_LE( rev32 state1_b.16b, state1_b.16b )
|
||||||
|
st1 {state0_a.4s-state1_a.4s}, [out1]
|
||||||
|
st1 {state0_b.4s-state1_b.4s}, [out2]
|
||||||
|
add sp, sp, #128
|
||||||
|
ret
|
||||||
|
SYM_FUNC_END(__sha256_ce_finup2x)
|
||||||
|
|
|
@ -30,18 +30,24 @@ struct sha256_ce_state {
|
||||||
extern const u32 sha256_ce_offsetof_count;
|
extern const u32 sha256_ce_offsetof_count;
|
||||||
extern const u32 sha256_ce_offsetof_finalize;
|
extern const u32 sha256_ce_offsetof_finalize;
|
||||||
|
|
||||||
asmlinkage int sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
|
asmlinkage int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src,
|
||||||
int blocks);
|
int blocks);
|
||||||
|
|
||||||
static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
|
asmlinkage void __sha256_ce_finup2x(const struct sha256_state *sctx,
|
||||||
|
const u8 *data1, const u8 *data2, int len,
|
||||||
|
u8 out1[SHA256_DIGEST_SIZE],
|
||||||
|
u8 out2[SHA256_DIGEST_SIZE]);
|
||||||
|
|
||||||
|
static void sha256_ce_transform(struct sha256_state *sst, u8 const *src,
|
||||||
int blocks)
|
int blocks)
|
||||||
{
|
{
|
||||||
while (blocks) {
|
while (blocks) {
|
||||||
int rem;
|
int rem;
|
||||||
|
|
||||||
kernel_neon_begin();
|
kernel_neon_begin();
|
||||||
rem = sha2_ce_transform(container_of(sst, struct sha256_ce_state,
|
rem = __sha256_ce_transform(container_of(sst,
|
||||||
sst), src, blocks);
|
struct sha256_ce_state,
|
||||||
|
sst), src, blocks);
|
||||||
kernel_neon_end();
|
kernel_neon_end();
|
||||||
src += (blocks - rem) * SHA256_BLOCK_SIZE;
|
src += (blocks - rem) * SHA256_BLOCK_SIZE;
|
||||||
blocks = rem;
|
blocks = rem;
|
||||||
|
@ -55,8 +61,8 @@ const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
|
||||||
|
|
||||||
asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
|
asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
|
||||||
|
|
||||||
static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
|
static void sha256_arm64_transform(struct sha256_state *sst, u8 const *src,
|
||||||
int blocks)
|
int blocks)
|
||||||
{
|
{
|
||||||
sha256_block_data_order(sst->state, src, blocks);
|
sha256_block_data_order(sst->state, src, blocks);
|
||||||
}
|
}
|
||||||
|
@ -68,10 +74,10 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
|
||||||
|
|
||||||
if (!crypto_simd_usable())
|
if (!crypto_simd_usable())
|
||||||
return sha256_base_do_update(desc, data, len,
|
return sha256_base_do_update(desc, data, len,
|
||||||
__sha256_block_data_order);
|
sha256_arm64_transform);
|
||||||
|
|
||||||
sctx->finalize = 0;
|
sctx->finalize = 0;
|
||||||
sha256_base_do_update(desc, data, len, __sha2_ce_transform);
|
sha256_base_do_update(desc, data, len, sha256_ce_transform);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -85,8 +91,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
|
||||||
if (!crypto_simd_usable()) {
|
if (!crypto_simd_usable()) {
|
||||||
if (len)
|
if (len)
|
||||||
sha256_base_do_update(desc, data, len,
|
sha256_base_do_update(desc, data, len,
|
||||||
__sha256_block_data_order);
|
sha256_arm64_transform);
|
||||||
sha256_base_do_finalize(desc, __sha256_block_data_order);
|
sha256_base_do_finalize(desc, sha256_arm64_transform);
|
||||||
return sha256_base_finish(desc, out);
|
return sha256_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,9 +102,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
|
||||||
*/
|
*/
|
||||||
sctx->finalize = finalize;
|
sctx->finalize = finalize;
|
||||||
|
|
||||||
sha256_base_do_update(desc, data, len, __sha2_ce_transform);
|
sha256_base_do_update(desc, data, len, sha256_ce_transform);
|
||||||
if (!finalize)
|
if (!finalize)
|
||||||
sha256_base_do_finalize(desc, __sha2_ce_transform);
|
sha256_base_do_finalize(desc, sha256_ce_transform);
|
||||||
return sha256_base_finish(desc, out);
|
return sha256_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,15 +113,55 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
|
||||||
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
|
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
if (!crypto_simd_usable()) {
|
if (!crypto_simd_usable()) {
|
||||||
sha256_base_do_finalize(desc, __sha256_block_data_order);
|
sha256_base_do_finalize(desc, sha256_arm64_transform);
|
||||||
return sha256_base_finish(desc, out);
|
return sha256_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
sctx->finalize = 0;
|
sctx->finalize = 0;
|
||||||
sha256_base_do_finalize(desc, __sha2_ce_transform);
|
sha256_base_do_finalize(desc, sha256_ce_transform);
|
||||||
return sha256_base_finish(desc, out);
|
return sha256_base_finish(desc, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int sha256_ce_digest(struct shash_desc *desc, const u8 *data,
|
||||||
|
unsigned int len, u8 *out)
|
||||||
|
{
|
||||||
|
sha256_base_init(desc);
|
||||||
|
return sha256_ce_finup(desc, data, len, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sha256_ce_finup_mb(struct shash_desc *desc,
|
||||||
|
const u8 * const data[], unsigned int len,
|
||||||
|
u8 * const outs[], unsigned int num_msgs)
|
||||||
|
{
|
||||||
|
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* num_msgs != 2 should not happen here, since this algorithm sets
|
||||||
|
* mb_max_msgs=2, and the crypto API handles num_msgs <= 1 before
|
||||||
|
* calling into the algorithm's finup_mb method.
|
||||||
|
*/
|
||||||
|
if (WARN_ON_ONCE(num_msgs != 2))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
if (unlikely(!crypto_simd_usable()))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
/* __sha256_ce_finup2x() assumes SHA256_BLOCK_SIZE <= len <= INT_MAX. */
|
||||||
|
if (unlikely(len < SHA256_BLOCK_SIZE || len > INT_MAX))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
/* __sha256_ce_finup2x() assumes the following offsets. */
|
||||||
|
BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
|
||||||
|
BUILD_BUG_ON(offsetof(struct sha256_state, count) != 32);
|
||||||
|
BUILD_BUG_ON(offsetof(struct sha256_state, buf) != 40);
|
||||||
|
|
||||||
|
kernel_neon_begin();
|
||||||
|
__sha256_ce_finup2x(&sctx->sst, data[0], data[1], len, outs[0],
|
||||||
|
outs[1]);
|
||||||
|
kernel_neon_end();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int sha256_ce_export(struct shash_desc *desc, void *out)
|
static int sha256_ce_export(struct shash_desc *desc, void *out)
|
||||||
{
|
{
|
||||||
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
|
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
|
||||||
|
@ -155,9 +201,12 @@ static struct shash_alg algs[] = { {
|
||||||
.update = sha256_ce_update,
|
.update = sha256_ce_update,
|
||||||
.final = sha256_ce_final,
|
.final = sha256_ce_final,
|
||||||
.finup = sha256_ce_finup,
|
.finup = sha256_ce_finup,
|
||||||
|
.digest = sha256_ce_digest,
|
||||||
|
.finup_mb = sha256_ce_finup_mb,
|
||||||
.export = sha256_ce_export,
|
.export = sha256_ce_export,
|
||||||
.import = sha256_ce_import,
|
.import = sha256_ce_import,
|
||||||
.descsize = sizeof(struct sha256_ce_state),
|
.descsize = sizeof(struct sha256_ce_state),
|
||||||
|
.mb_max_msgs = 2,
|
||||||
.statesize = sizeof(struct sha256_state),
|
.statesize = sizeof(struct sha256_state),
|
||||||
.digestsize = SHA256_DIGEST_SIZE,
|
.digestsize = SHA256_DIGEST_SIZE,
|
||||||
.base = {
|
.base = {
|
||||||
|
|
|
@ -130,8 +130,8 @@ void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __KVM_NVHE_HYPERVISOR__
|
#ifdef __KVM_NVHE_HYPERVISOR__
|
||||||
void __pkvm_init_switch_pgd(struct kvm_nvhe_init_params *params,
|
void __pkvm_init_switch_pgd(phys_addr_t pgd, unsigned long sp,
|
||||||
void (*finalize_fn)(void));
|
void (*fn)(void));
|
||||||
int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
|
int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
|
||||||
unsigned long *per_cpu_base, u32 hyp_va_bits);
|
unsigned long *per_cpu_base, u32 hyp_va_bits);
|
||||||
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
|
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
|
||||||
|
|
|
@ -47,11 +47,8 @@ void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa);
|
||||||
* machines, depending on the mode KVM is running in and on the type of guest
|
* machines, depending on the mode KVM is running in and on the type of guest
|
||||||
* that is running.
|
* that is running.
|
||||||
*
|
*
|
||||||
* The ALLOW masks represent a bitmask of feature fields that are allowed
|
* Each field in the masks represents the highest supported *unsigned* value for
|
||||||
* without any restrictions as long as they are supported by the system.
|
* the feature, if supported by the system.
|
||||||
*
|
|
||||||
* The RESTRICT_UNSIGNED masks, if present, represent unsigned fields for
|
|
||||||
* features that are restricted to support at most the specified feature.
|
|
||||||
*
|
*
|
||||||
* If a feature field is not present in either, than it is not supported.
|
* If a feature field is not present in either, than it is not supported.
|
||||||
*
|
*
|
||||||
|
@ -67,15 +64,7 @@ void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa);
|
||||||
* - Floating-point and Advanced SIMD
|
* - Floating-point and Advanced SIMD
|
||||||
* - GICv3(+) system register interface
|
* - GICv3(+) system register interface
|
||||||
* - Data Independent Timing
|
* - Data Independent Timing
|
||||||
*/
|
*
|
||||||
#define PVM_ID_AA64PFR0_ALLOW (\
|
|
||||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
|
|
||||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
|
|
||||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC) | \
|
|
||||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Restrict to the following *unsigned* features for protected VMs:
|
* Restrict to the following *unsigned* features for protected VMs:
|
||||||
* - AArch64 guests only (no support for AArch32 guests):
|
* - AArch64 guests only (no support for AArch32 guests):
|
||||||
* AArch32 adds complexity in trap handling, emulation, condition codes,
|
* AArch32 adds complexity in trap handling, emulation, condition codes,
|
||||||
|
@ -84,7 +73,11 @@ void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa);
|
||||||
* - RAS (v1)
|
* - RAS (v1)
|
||||||
* Supported by KVM
|
* Supported by KVM
|
||||||
*/
|
*/
|
||||||
#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\
|
#define PVM_ID_AA64PFR0_ALLOW (\
|
||||||
|
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
|
||||||
|
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
|
||||||
|
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC) | \
|
||||||
|
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \
|
||||||
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
|
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
|
||||||
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
|
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
|
||||||
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL2), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
|
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL2), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
|
||||||
|
@ -111,20 +104,16 @@ void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa);
|
||||||
* - Distinction between Secure and Non-secure Memory
|
* - Distinction between Secure and Non-secure Memory
|
||||||
* - Mixed-endian at EL0 only
|
* - Mixed-endian at EL0 only
|
||||||
* - Non-context synchronizing exception entry and exit
|
* - Non-context synchronizing exception entry and exit
|
||||||
|
*
|
||||||
|
* Restrict to the following *unsigned* features for protected VMs:
|
||||||
|
* - 40-bit IPA
|
||||||
|
* - 16-bit ASID
|
||||||
*/
|
*/
|
||||||
#define PVM_ID_AA64MMFR0_ALLOW (\
|
#define PVM_ID_AA64MMFR0_ALLOW (\
|
||||||
ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGEND) | \
|
ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGEND) | \
|
||||||
ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_SNSMEM) | \
|
ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_SNSMEM) | \
|
||||||
ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL0) | \
|
ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL0) | \
|
||||||
ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) \
|
ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) | \
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Restrict to the following *unsigned* features for protected VMs:
|
|
||||||
* - 40-bit IPA
|
|
||||||
* - 16-bit ASID
|
|
||||||
*/
|
|
||||||
#define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\
|
|
||||||
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_PARANGE), ID_AA64MMFR0_EL1_PARANGE_40) | \
|
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_PARANGE), ID_AA64MMFR0_EL1_PARANGE_40) | \
|
||||||
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASIDBITS), ID_AA64MMFR0_EL1_ASIDBITS_16) \
|
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASIDBITS), ID_AA64MMFR0_EL1_ASIDBITS_16) \
|
||||||
)
|
)
|
||||||
|
@ -227,15 +216,6 @@ void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa);
|
||||||
)
|
)
|
||||||
|
|
||||||
/* Restrict pointer authentication to the basic version. */
|
/* Restrict pointer authentication to the basic version. */
|
||||||
#define PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED (\
|
|
||||||
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), ID_AA64ISAR1_EL1_APA_PAuth) | \
|
|
||||||
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), ID_AA64ISAR1_EL1_API_PAuth) \
|
|
||||||
)
|
|
||||||
|
|
||||||
#define PVM_ID_AA64ISAR2_RESTRICT_UNSIGNED (\
|
|
||||||
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3), ID_AA64ISAR2_EL1_APA3_PAuth) \
|
|
||||||
)
|
|
||||||
|
|
||||||
#define PVM_ID_AA64ISAR1_ALLOW (\
|
#define PVM_ID_AA64ISAR1_ALLOW (\
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DPB) | \
|
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DPB) | \
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_JSCVT) | \
|
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_JSCVT) | \
|
||||||
|
@ -248,13 +228,16 @@ void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa);
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_SPECRES) | \
|
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_SPECRES) | \
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_BF16) | \
|
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_BF16) | \
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DGH) | \
|
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DGH) | \
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_I8MM) \
|
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_I8MM) | \
|
||||||
|
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), ID_AA64ISAR1_EL1_APA_PAuth) | \
|
||||||
|
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), ID_AA64ISAR1_EL1_API_PAuth) \
|
||||||
)
|
)
|
||||||
|
|
||||||
#define PVM_ID_AA64ISAR2_ALLOW (\
|
#define PVM_ID_AA64ISAR2_ALLOW (\
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_ATS1A) | \
|
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_ATS1A) | \
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3) | \
|
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3) | \
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS) \
|
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS) | \
|
||||||
|
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3), ID_AA64ISAR2_EL1_APA3_PAuth) \
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -331,8 +314,8 @@ void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa);
|
||||||
#define HFGxTR_nLS64 HFGxTR_EL2_nACCDATA_EL1
|
#define HFGxTR_nLS64 HFGxTR_EL2_nACCDATA_EL1
|
||||||
|
|
||||||
#define PVM_HFGXTR_EL2_SET \
|
#define PVM_HFGXTR_EL2_SET \
|
||||||
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) >= ID_AA64PFR0_EL1_RAS_IMP ? 0ULL : HFGxTR_RAS_IMP) | \
|
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), PVM_ID_AA64PFR0_ALLOW) >= ID_AA64PFR0_EL1_RAS_IMP ? 0ULL : HFGxTR_RAS_IMP) | \
|
||||||
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) >= ID_AA64PFR0_EL1_RAS_V1P1 ? 0ULL : HFGxTR_RAS_V1P1) | \
|
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), PVM_ID_AA64PFR0_ALLOW) >= ID_AA64PFR0_EL1_RAS_V1P1 ? 0ULL : HFGxTR_RAS_V1P1) | \
|
||||||
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), PVM_ID_AA64PFR0_ALLOW) ? 0ULL : HFGxTR_GIC) | \
|
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), PVM_ID_AA64PFR0_ALLOW) ? 0ULL : HFGxTR_GIC) | \
|
||||||
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), PVM_ID_AA64PFR0_ALLOW) ? 0ULL : HFGxTR_CSV2) | \
|
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), PVM_ID_AA64PFR0_ALLOW) ? 0ULL : HFGxTR_CSV2) | \
|
||||||
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), PVM_ID_AA64MMFR1_ALLOW) ? 0ULL : HFGxTR_LOR) | \
|
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), PVM_ID_AA64MMFR1_ALLOW) ? 0ULL : HFGxTR_LOR) | \
|
||||||
|
@ -454,8 +437,8 @@ void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa);
|
||||||
0
|
0
|
||||||
|
|
||||||
#define PVM_HCRX_EL2_CLR \
|
#define PVM_HCRX_EL2_CLR \
|
||||||
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED) < ID_AA64ISAR1_EL1_APA_PAuth_LR ? 0ULL : HCRX_nPAuth_LR) | \
|
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), PVM_ID_AA64ISAR1_ALLOW) < ID_AA64ISAR1_EL1_APA_PAuth_LR ? 0ULL : HCRX_nPAuth_LR) | \
|
||||||
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED) < ID_AA64ISAR1_EL1_APA_PAuth_LR ? 0ULL : HCRX_nPAuth_LR) | \
|
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), PVM_ID_AA64ISAR1_ALLOW) < ID_AA64ISAR1_EL1_APA_PAuth_LR ? 0ULL : HCRX_nPAuth_LR) | \
|
||||||
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS), PVM_ID_AA64PFR1_ALLOW) ? 0ULL : HCRX_nGCS) | \
|
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS), PVM_ID_AA64PFR1_ALLOW) ? 0ULL : HCRX_nGCS) | \
|
||||||
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_SYSREG_128), PVM_ID_AA64ISAR2_ALLOW) ? 0ULL : HCRX_nSYSREG128) | \
|
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_SYSREG_128), PVM_ID_AA64ISAR2_ALLOW) ? 0ULL : HCRX_nSYSREG128) | \
|
||||||
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR3_EL1_ADERR), PVM_ID_AA64MMFR3_ALLOW) ? 0ULL : HCRX_nADERR) | \
|
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR3_EL1_ADERR), PVM_ID_AA64MMFR3_ALLOW) ? 0ULL : HCRX_nADERR) | \
|
||||||
|
|
|
@ -12,6 +12,7 @@ typedef void (*dyn_hcall_t)(struct user_pt_regs *);
|
||||||
struct kvm_hyp_iommu;
|
struct kvm_hyp_iommu;
|
||||||
struct iommu_iotlb_gather;
|
struct iommu_iotlb_gather;
|
||||||
struct kvm_hyp_iommu_domain;
|
struct kvm_hyp_iommu_domain;
|
||||||
|
struct kvm_iommu_paddr_cache;
|
||||||
|
|
||||||
#ifdef CONFIG_MODULES
|
#ifdef CONFIG_MODULES
|
||||||
enum pkvm_psci_notification {
|
enum pkvm_psci_notification {
|
||||||
|
@ -213,7 +214,7 @@ struct pkvm_module_ops {
|
||||||
void (*iommu_reclaim_pages_atomic)(void *p, u8 order);
|
void (*iommu_reclaim_pages_atomic)(void *p, u8 order);
|
||||||
int (*iommu_snapshot_host_stage2)(struct kvm_hyp_iommu_domain *domain);
|
int (*iommu_snapshot_host_stage2)(struct kvm_hyp_iommu_domain *domain);
|
||||||
int (*hyp_smp_processor_id)(void);
|
int (*hyp_smp_processor_id)(void);
|
||||||
ANDROID_KABI_RESERVE(1);
|
ANDROID_KABI_USE(1, void (*iommu_flush_unmap_cache)(struct kvm_iommu_paddr_cache *cache));
|
||||||
ANDROID_KABI_RESERVE(2);
|
ANDROID_KABI_RESERVE(2);
|
||||||
ANDROID_KABI_RESERVE(3);
|
ANDROID_KABI_RESERVE(3);
|
||||||
ANDROID_KABI_RESERVE(4);
|
ANDROID_KABI_RESERVE(4);
|
||||||
|
|
|
@ -113,13 +113,21 @@
|
||||||
|
|
||||||
#define OVERFLOW_STACK_SIZE SZ_4K
|
#define OVERFLOW_STACK_SIZE SZ_4K
|
||||||
|
|
||||||
|
#if PAGE_SIZE == SZ_4K
|
||||||
|
#define NVHE_STACK_SHIFT (PAGE_SHIFT + 1)
|
||||||
|
#else
|
||||||
|
#define NVHE_STACK_SHIFT PAGE_SHIFT
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define NVHE_STACK_SIZE (UL(1) << NVHE_STACK_SHIFT)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* With the minimum frame size of [x29, x30], exactly half the combined
|
* With the minimum frame size of [x29, x30], exactly half the combined
|
||||||
* sizes of the hyp and overflow stacks is the maximum size needed to
|
* sizes of the hyp and overflow stacks is the maximum size needed to
|
||||||
* save the unwinded stacktrace; plus an additional entry to delimit the
|
* save the unwinded stacktrace; plus an additional entry to delimit the
|
||||||
* end.
|
* end.
|
||||||
*/
|
*/
|
||||||
#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + PAGE_SIZE) / 2 + sizeof(long))
|
#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + NVHE_STACK_SIZE) / 2 + sizeof(long))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Alignment of kernel segments (e.g. .text, .data).
|
* Alignment of kernel segments (e.g. .text, .data).
|
||||||
|
|
|
@ -47,7 +47,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
|
||||||
|
|
||||||
DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
|
DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
|
||||||
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
|
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
|
||||||
DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
|
DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
|
||||||
|
|
||||||
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);
|
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);
|
||||||
|
|
||||||
|
|
|
@ -10,95 +10,13 @@
|
||||||
|
|
||||||
#include <asm/pointer_auth.h>
|
#include <asm/pointer_auth.h>
|
||||||
|
|
||||||
struct frame_tail {
|
static bool callchain_trace(void *data, unsigned long pc)
|
||||||
struct frame_tail __user *fp;
|
|
||||||
unsigned long lr;
|
|
||||||
} __attribute__((packed));
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Get the return address for a single stackframe and return a pointer to the
|
|
||||||
* next frame tail.
|
|
||||||
*/
|
|
||||||
static struct frame_tail __user *
|
|
||||||
user_backtrace(struct frame_tail __user *tail,
|
|
||||||
struct perf_callchain_entry_ctx *entry)
|
|
||||||
{
|
{
|
||||||
struct frame_tail buftail;
|
struct perf_callchain_entry_ctx *entry = data;
|
||||||
unsigned long err;
|
|
||||||
unsigned long lr;
|
|
||||||
|
|
||||||
/* Also check accessibility of one struct frame_tail beyond */
|
return perf_callchain_store(entry, pc) == 0;
|
||||||
if (!access_ok(tail, sizeof(buftail)))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
pagefault_disable();
|
|
||||||
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
|
|
||||||
pagefault_enable();
|
|
||||||
|
|
||||||
if (err)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
lr = ptrauth_strip_user_insn_pac(buftail.lr);
|
|
||||||
|
|
||||||
perf_callchain_store(entry, lr);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Frame pointers should strictly progress back up the stack
|
|
||||||
* (towards higher addresses).
|
|
||||||
*/
|
|
||||||
if (tail >= buftail.fp)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return buftail.fp;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_COMPAT
|
|
||||||
/*
|
|
||||||
* The registers we're interested in are at the end of the variable
|
|
||||||
* length saved register structure. The fp points at the end of this
|
|
||||||
* structure so the address of this struct is:
|
|
||||||
* (struct compat_frame_tail *)(xxx->fp)-1
|
|
||||||
*
|
|
||||||
* This code has been adapted from the ARM OProfile support.
|
|
||||||
*/
|
|
||||||
struct compat_frame_tail {
|
|
||||||
compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
|
|
||||||
u32 sp;
|
|
||||||
u32 lr;
|
|
||||||
} __attribute__((packed));
|
|
||||||
|
|
||||||
static struct compat_frame_tail __user *
|
|
||||||
compat_user_backtrace(struct compat_frame_tail __user *tail,
|
|
||||||
struct perf_callchain_entry_ctx *entry)
|
|
||||||
{
|
|
||||||
struct compat_frame_tail buftail;
|
|
||||||
unsigned long err;
|
|
||||||
|
|
||||||
/* Also check accessibility of one struct frame_tail beyond */
|
|
||||||
if (!access_ok(tail, sizeof(buftail)))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
pagefault_disable();
|
|
||||||
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
|
|
||||||
pagefault_enable();
|
|
||||||
|
|
||||||
if (err)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
perf_callchain_store(entry, buftail.lr);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Frame pointers should strictly progress back up the stack
|
|
||||||
* (towards higher addresses).
|
|
||||||
*/
|
|
||||||
if (tail + 1 >= (struct compat_frame_tail __user *)
|
|
||||||
compat_ptr(buftail.fp))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_COMPAT */
|
|
||||||
|
|
||||||
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
@ -107,35 +25,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
perf_callchain_store(entry, regs->pc);
|
arch_stack_walk_user(callchain_trace, entry, regs);
|
||||||
|
|
||||||
if (!compat_user_mode(regs)) {
|
|
||||||
/* AARCH64 mode */
|
|
||||||
struct frame_tail __user *tail;
|
|
||||||
|
|
||||||
tail = (struct frame_tail __user *)regs->regs[29];
|
|
||||||
|
|
||||||
while (entry->nr < entry->max_stack &&
|
|
||||||
tail && !((unsigned long)tail & 0x7))
|
|
||||||
tail = user_backtrace(tail, entry);
|
|
||||||
} else {
|
|
||||||
#ifdef CONFIG_COMPAT
|
|
||||||
/* AARCH32 compat mode */
|
|
||||||
struct compat_frame_tail __user *tail;
|
|
||||||
|
|
||||||
tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
|
|
||||||
|
|
||||||
while ((entry->nr < entry->max_stack) &&
|
|
||||||
tail && !((unsigned long)tail & 0x3))
|
|
||||||
tail = compat_user_backtrace(tail, entry);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool callchain_trace(void *data, unsigned long pc)
|
|
||||||
{
|
|
||||||
struct perf_callchain_entry_ctx *entry = data;
|
|
||||||
return perf_callchain_store(entry, pc) == 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||||
|
|
|
@ -242,3 +242,123 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
|
||||||
dump_backtrace(NULL, tsk, loglvl);
|
dump_backtrace(NULL, tsk, loglvl);
|
||||||
barrier();
|
barrier();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The struct defined for userspace stack frame in AARCH64 mode.
|
||||||
|
*/
|
||||||
|
struct frame_tail {
|
||||||
|
struct frame_tail __user *fp;
|
||||||
|
unsigned long lr;
|
||||||
|
} __attribute__((packed));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Get the return address for a single stackframe and return a pointer to the
|
||||||
|
* next frame tail.
|
||||||
|
*/
|
||||||
|
static struct frame_tail __user *
|
||||||
|
unwind_user_frame(struct frame_tail __user *tail, void *cookie,
|
||||||
|
stack_trace_consume_fn consume_entry)
|
||||||
|
{
|
||||||
|
struct frame_tail buftail;
|
||||||
|
unsigned long err;
|
||||||
|
unsigned long lr;
|
||||||
|
|
||||||
|
/* Also check accessibility of one struct frame_tail beyond */
|
||||||
|
if (!access_ok(tail, sizeof(buftail)))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
pagefault_disable();
|
||||||
|
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
|
||||||
|
pagefault_enable();
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
lr = ptrauth_strip_user_insn_pac(buftail.lr);
|
||||||
|
|
||||||
|
if (!consume_entry(cookie, lr))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Frame pointers should strictly progress back up the stack
|
||||||
|
* (towards higher addresses).
|
||||||
|
*/
|
||||||
|
if (tail >= buftail.fp)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return buftail.fp;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_COMPAT
|
||||||
|
/*
|
||||||
|
* The registers we're interested in are at the end of the variable
|
||||||
|
* length saved register structure. The fp points at the end of this
|
||||||
|
* structure so the address of this struct is:
|
||||||
|
* (struct compat_frame_tail *)(xxx->fp)-1
|
||||||
|
*
|
||||||
|
* This code has been adapted from the ARM OProfile support.
|
||||||
|
*/
|
||||||
|
struct compat_frame_tail {
|
||||||
|
compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
|
||||||
|
u32 sp;
|
||||||
|
u32 lr;
|
||||||
|
} __attribute__((packed));
|
||||||
|
|
||||||
|
static struct compat_frame_tail __user *
|
||||||
|
unwind_compat_user_frame(struct compat_frame_tail __user *tail, void *cookie,
|
||||||
|
stack_trace_consume_fn consume_entry)
|
||||||
|
{
|
||||||
|
struct compat_frame_tail buftail;
|
||||||
|
unsigned long err;
|
||||||
|
|
||||||
|
/* Also check accessibility of one struct frame_tail beyond */
|
||||||
|
if (!access_ok(tail, sizeof(buftail)))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
pagefault_disable();
|
||||||
|
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
|
||||||
|
pagefault_enable();
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (!consume_entry(cookie, buftail.lr))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Frame pointers should strictly progress back up the stack
|
||||||
|
* (towards higher addresses).
|
||||||
|
*/
|
||||||
|
if (tail + 1 >= (struct compat_frame_tail __user *)
|
||||||
|
compat_ptr(buftail.fp))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_COMPAT */
|
||||||
|
|
||||||
|
|
||||||
|
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
|
||||||
|
const struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
if (!consume_entry(cookie, regs->pc))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!compat_user_mode(regs)) {
|
||||||
|
/* AARCH64 mode */
|
||||||
|
struct frame_tail __user *tail;
|
||||||
|
|
||||||
|
tail = (struct frame_tail __user *)regs->regs[29];
|
||||||
|
while (tail && !((unsigned long)tail & 0x7))
|
||||||
|
tail = unwind_user_frame(tail, cookie, consume_entry);
|
||||||
|
} else {
|
||||||
|
#ifdef CONFIG_COMPAT
|
||||||
|
/* AARCH32 compat mode */
|
||||||
|
struct compat_frame_tail __user *tail;
|
||||||
|
|
||||||
|
tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
|
||||||
|
while (tail && !((unsigned long)tail & 0x3))
|
||||||
|
tail = unwind_compat_user_frame(tail, cookie, consume_entry);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1167,7 +1167,7 @@ int __init early_brk64(unsigned long addr, unsigned long esr,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_CFI_CLANG
|
#ifdef CONFIG_CFI_CLANG
|
||||||
if ((esr_brk_comment(esr) & ~CFI_BRK_IMM_MASK) == CFI_BRK_IMM_BASE)
|
if (esr_is_cfi_brk(esr))
|
||||||
return cfi_handler(regs, esr) != DBG_HOOK_HANDLED;
|
return cfi_handler(regs, esr) != DBG_HOOK_HANDLED;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_KASAN_SW_TAGS
|
#ifdef CONFIG_KASAN_SW_TAGS
|
||||||
|
|
|
@ -52,7 +52,7 @@ static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
|
||||||
|
|
||||||
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
|
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
|
||||||
|
|
||||||
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
|
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
|
||||||
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
|
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
|
||||||
DECLARE_KVM_NVHE_PER_CPU(int, hyp_cpu_number);
|
DECLARE_KVM_NVHE_PER_CPU(int, hyp_cpu_number);
|
||||||
|
|
||||||
|
@ -384,7 +384,7 @@ static int pkvm_check_extension(struct kvm *kvm, long ext, int kvm_cap)
|
||||||
break;
|
break;
|
||||||
case KVM_CAP_ARM_SVE:
|
case KVM_CAP_ARM_SVE:
|
||||||
r = kvm_cap && FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE),
|
r = kvm_cap && FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE),
|
||||||
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
|
PVM_ID_AA64PFR0_ALLOW);
|
||||||
break;
|
break;
|
||||||
case KVM_CAP_ARM_PTRAUTH_ADDRESS:
|
case KVM_CAP_ARM_PTRAUTH_ADDRESS:
|
||||||
r = kvm_cap &&
|
r = kvm_cap &&
|
||||||
|
@ -2200,7 +2200,7 @@ static void __init teardown_hyp_mode(void)
|
||||||
|
|
||||||
free_hyp_pgds();
|
free_hyp_pgds();
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
|
free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT);
|
||||||
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
|
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
|
||||||
free_pages(kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu],
|
free_pages(kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu],
|
||||||
pkvm_host_fp_state_order());
|
pkvm_host_fp_state_order());
|
||||||
|
@ -2400,15 +2400,15 @@ static int __init init_hyp_mode(void)
|
||||||
* Allocate stack pages for Hypervisor-mode
|
* Allocate stack pages for Hypervisor-mode
|
||||||
*/
|
*/
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
unsigned long stack_page;
|
unsigned long stack_base;
|
||||||
|
|
||||||
stack_page = __get_free_page(GFP_KERNEL);
|
stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT);
|
||||||
if (!stack_page) {
|
if (!stack_base) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
|
per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2484,9 +2484,9 @@ static int __init init_hyp_mode(void)
|
||||||
*/
|
*/
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
|
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
|
||||||
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
|
char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu);
|
||||||
|
|
||||||
err = create_hyp_stack(__pa(stack_page), ¶ms->stack_hyp_va);
|
err = create_hyp_stack(__pa(stack_base), ¶ms->stack_hyp_va);
|
||||||
if (err) {
|
if (err) {
|
||||||
kvm_err("Cannot map hyp stack\n");
|
kvm_err("Cannot map hyp stack\n");
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
@ -2498,7 +2498,7 @@ static int __init init_hyp_mode(void)
|
||||||
* __hyp_pa() won't do the right thing there, since the stack
|
* __hyp_pa() won't do the right thing there, since the stack
|
||||||
* has been mapped in the flexible private VA space.
|
* has been mapped in the flexible private VA space.
|
||||||
*/
|
*/
|
||||||
params->stack_pa = __pa(stack_page);
|
params->stack_pa = __pa(stack_base);
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
|
|
|
@ -469,10 +469,15 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
|
||||||
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
|
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void print_nvhe_hyp_panic(const char *name, u64 panic_addr)
|
||||||
|
{
|
||||||
|
kvm_err("nVHE hyp %s at: [<%016llx>] %pB!\n", name, panic_addr,
|
||||||
|
(void *)(panic_addr + kaslr_offset()));
|
||||||
|
}
|
||||||
|
|
||||||
static void kvm_nvhe_report_cfi_failure(u64 panic_addr)
|
static void kvm_nvhe_report_cfi_failure(u64 panic_addr)
|
||||||
{
|
{
|
||||||
kvm_err("nVHE hyp CFI failure at: [<%016llx>] %pB!\n", panic_addr,
|
print_nvhe_hyp_panic("CFI failure", panic_addr);
|
||||||
(void *)(panic_addr + kaslr_offset()));
|
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
|
if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
|
||||||
kvm_err(" (CONFIG_CFI_PERMISSIVE ignored for hyp failures)\n");
|
kvm_err(" (CONFIG_CFI_PERMISSIVE ignored for hyp failures)\n");
|
||||||
|
@ -484,17 +489,11 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
|
||||||
u64 far, u64 hpfar)
|
u64 far, u64 hpfar)
|
||||||
{
|
{
|
||||||
u64 elr_in_kimg = __phys_to_kimg(elr_phys);
|
u64 elr_in_kimg = __phys_to_kimg(elr_phys);
|
||||||
u64 kaslr_off = kaslr_offset();
|
u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
|
||||||
u64 hyp_offset = elr_in_kimg - kaslr_off - elr_virt;
|
|
||||||
u64 mode = spsr & PSR_MODE_MASK;
|
u64 mode = spsr & PSR_MODE_MASK;
|
||||||
u64 panic_addr = elr_virt + hyp_offset;
|
u64 panic_addr = elr_virt + hyp_offset;
|
||||||
u64 mod_addr = pkvm_el2_mod_kern_va(elr_virt);
|
u64 mod_addr = pkvm_el2_mod_kern_va(elr_virt);
|
||||||
|
|
||||||
if (mod_addr) {
|
|
||||||
panic_addr = mod_addr;
|
|
||||||
kaslr_off = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
|
if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
|
||||||
kvm_err("Invalid host exception to nVHE hyp!\n");
|
kvm_err("Invalid host exception to nVHE hyp!\n");
|
||||||
} else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
|
} else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
|
||||||
|
@ -513,14 +512,18 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
|
||||||
|
|
||||||
if (file)
|
if (file)
|
||||||
kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
|
kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
|
||||||
|
else if (mod_addr)
|
||||||
|
kvm_err("nVHE hyp BUG at: [<%016llx>] %pB!\n", mod_addr,
|
||||||
|
(void *)mod_addr);
|
||||||
else
|
else
|
||||||
kvm_err("nVHE hyp BUG at: [<%016llx>] %pB!\n", panic_addr,
|
print_nvhe_hyp_panic("BUG", panic_addr);
|
||||||
(void *)(panic_addr + kaslr_off));
|
|
||||||
} else if (IS_ENABLED(CONFIG_CFI_CLANG) && esr_is_cfi_brk(esr)) {
|
} else if (IS_ENABLED(CONFIG_CFI_CLANG) && esr_is_cfi_brk(esr)) {
|
||||||
kvm_nvhe_report_cfi_failure(panic_addr);
|
kvm_nvhe_report_cfi_failure(panic_addr);
|
||||||
|
} else if (mod_addr) {
|
||||||
|
kvm_err("nVHE hyp panic at: [<%016llx>] %pB!\n", mod_addr,
|
||||||
|
(void *)mod_addr);
|
||||||
} else {
|
} else {
|
||||||
kvm_err("nVHE hyp panic at: [<%016llx>] %pB!\n", panic_addr,
|
print_nvhe_hyp_panic("panic", panic_addr);
|
||||||
(void *)(panic_addr + kaslr_off));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Dump the nVHE hypervisor backtrace */
|
/* Dump the nVHE hypervisor backtrace */
|
||||||
|
|
|
@ -83,16 +83,15 @@ alternative_else_nop_endif
|
||||||
eret
|
eret
|
||||||
sb
|
sb
|
||||||
|
|
||||||
SYM_INNER_LABEL(__hyp_restore_elr_and_panic, SYM_L_GLOBAL)
|
SYM_INNER_LABEL(__guest_exit_restore_elr_and_panic, SYM_L_GLOBAL)
|
||||||
// x0-x29,lr: hyp regs
|
// x2-x29,lr: vcpu regs
|
||||||
|
// vcpu x0-x1 on the stack
|
||||||
|
|
||||||
stp x0, x1, [sp, #-16]!
|
|
||||||
adr_this_cpu x0, kvm_hyp_ctxt, x1
|
adr_this_cpu x0, kvm_hyp_ctxt, x1
|
||||||
ldr x0, [x0, #CPU_ELR_EL2]
|
ldr x0, [x0, #CPU_ELR_EL2]
|
||||||
msr elr_el2, x0
|
msr elr_el2, x0
|
||||||
ldp x0, x1, [sp], #16
|
|
||||||
|
|
||||||
SYM_INNER_LABEL(__hyp_panic, SYM_L_GLOBAL)
|
SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
|
||||||
// x2-x29,lr: vcpu regs
|
// x2-x29,lr: vcpu regs
|
||||||
// vcpu x0-x1 on the stack
|
// vcpu x0-x1 on the stack
|
||||||
|
|
||||||
|
@ -110,7 +109,7 @@ SYM_INNER_LABEL(__hyp_panic, SYM_L_GLOBAL)
|
||||||
// accurate if the guest had been completely restored.
|
// accurate if the guest had been completely restored.
|
||||||
adr_this_cpu x0, kvm_hyp_ctxt, x1
|
adr_this_cpu x0, kvm_hyp_ctxt, x1
|
||||||
adr_l x1, hyp_panic
|
adr_l x1, hyp_panic
|
||||||
str x1, [x0, #CPU_LR_OFFSET]
|
str x1, [x0, #CPU_XREG_OFFSET(30)]
|
||||||
|
|
||||||
get_vcpu_ptr x1, x0
|
get_vcpu_ptr x1, x0
|
||||||
|
|
||||||
|
|
|
@ -122,10 +122,9 @@ el2_error:
|
||||||
eret
|
eret
|
||||||
sb
|
sb
|
||||||
|
|
||||||
.macro invalid_vector label, target = __hyp_panic
|
.macro invalid_vector label, target = __guest_exit_panic
|
||||||
.align 2
|
.align 2
|
||||||
SYM_CODE_START_LOCAL(\label)
|
SYM_CODE_START_LOCAL(\label)
|
||||||
stp x0, x1, [sp, #-16]!
|
|
||||||
b \target
|
b \target
|
||||||
SYM_CODE_END(\label)
|
SYM_CODE_END(\label)
|
||||||
.endm
|
.endm
|
||||||
|
|
|
@ -720,7 +720,7 @@ guest:
|
||||||
|
|
||||||
static inline void __kvm_unexpected_el2_exception(void)
|
static inline void __kvm_unexpected_el2_exception(void)
|
||||||
{
|
{
|
||||||
extern char __hyp_restore_elr_and_panic[];
|
extern char __guest_exit_restore_elr_and_panic[];
|
||||||
unsigned long addr, fixup;
|
unsigned long addr, fixup;
|
||||||
struct kvm_exception_table_entry *entry, *end;
|
struct kvm_exception_table_entry *entry, *end;
|
||||||
unsigned long elr_el2 = read_sysreg(elr_el2);
|
unsigned long elr_el2 = read_sysreg(elr_el2);
|
||||||
|
@ -743,7 +743,7 @@ static inline void __kvm_unexpected_el2_exception(void)
|
||||||
|
|
||||||
/* Trigger a panic after restoring the hyp context. */
|
/* Trigger a panic after restoring the hyp context. */
|
||||||
this_cpu_ptr(&kvm_hyp_ctxt)->sys_regs[ELR_EL2] = elr_el2;
|
this_cpu_ptr(&kvm_hyp_ctxt)->sys_regs[ELR_EL2] = elr_el2;
|
||||||
write_sysreg(__hyp_restore_elr_and_panic, elr_el2);
|
write_sysreg(__guest_exit_restore_elr_and_panic, elr_el2);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* __ARM64_KVM_HYP_SWITCH_H__ */
|
#endif /* __ARM64_KVM_HYP_SWITCH_H__ */
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
|
|
||||||
#include <kvm/iommu.h>
|
#include <kvm/iommu.h>
|
||||||
#include <linux/io-pgtable.h>
|
#include <linux/io-pgtable.h>
|
||||||
|
#include <nvhe/spinlock.h>
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_ARM_SMMU_V3_PKVM)
|
#if IS_ENABLED(CONFIG_ARM_SMMU_V3_PKVM)
|
||||||
#include <linux/io-pgtable-arm.h>
|
#include <linux/io-pgtable-arm.h>
|
||||||
|
@ -70,6 +71,30 @@ struct kvm_iommu_paddr_cache {
|
||||||
size_t pgsize[KVM_IOMMU_PADDR_CACHE_MAX];
|
size_t pgsize[KVM_IOMMU_PADDR_CACHE_MAX];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void kvm_iommu_flush_unmap_cache(struct kvm_iommu_paddr_cache *cache);
|
||||||
|
|
||||||
|
static inline hyp_spinlock_t *kvm_iommu_get_lock(struct kvm_hyp_iommu *iommu)
|
||||||
|
{
|
||||||
|
/* See struct kvm_hyp_iommu */
|
||||||
|
BUILD_BUG_ON(sizeof(iommu->lock) != sizeof(hyp_spinlock_t));
|
||||||
|
return (hyp_spinlock_t *)(&iommu->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void kvm_iommu_lock_init(struct kvm_hyp_iommu *iommu)
|
||||||
|
{
|
||||||
|
hyp_spin_lock_init(kvm_iommu_get_lock(iommu));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void kvm_iommu_lock(struct kvm_hyp_iommu *iommu)
|
||||||
|
{
|
||||||
|
hyp_spin_lock(kvm_iommu_get_lock(iommu));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void kvm_iommu_unlock(struct kvm_hyp_iommu *iommu)
|
||||||
|
{
|
||||||
|
hyp_spin_unlock(kvm_iommu_get_lock(iommu));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct kvm_iommu_ops - KVM iommu ops
|
* struct kvm_iommu_ops - KVM iommu ops
|
||||||
* @init: init the driver called once before the kernel de-privilege
|
* @init: init the driver called once before the kernel de-privilege
|
||||||
|
|
|
@ -183,27 +183,26 @@ SYM_FUNC_END(__host_hvc)
|
||||||
.endif
|
.endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro __host_el2_vect handler:req
|
.macro invalid_host_el2_vect
|
||||||
.align 7
|
.align 7
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Test whether the SP has overflowed, without corrupting a GPR.
|
* Test whether the SP has overflowed, without corrupting a GPR.
|
||||||
* nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit
|
* nVHE hypervisor stacks are aligned so that the NVHE_STACK_SHIFT bit
|
||||||
* of SP should always be 1.
|
* of SP should always be 1.
|
||||||
*/
|
*/
|
||||||
add sp, sp, x0 // sp' = sp + x0
|
add sp, sp, x0 // sp' = sp + x0
|
||||||
sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
|
sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
|
||||||
tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
|
tbz x0, #NVHE_STACK_SHIFT, .L__hyp_sp_overflow\@
|
||||||
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
|
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
|
||||||
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
|
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
|
||||||
/* If a guest is loaded, panic out of it. */
|
|
||||||
/*
|
/*
|
||||||
* The panic may not be clean if the exception is taken before the host
|
* The panic may not be clean if the exception is taken before the host
|
||||||
* context has been saved by __host_exit or after the hyp context has
|
* context has been saved by __host_exit or after the hyp context has
|
||||||
* been partially clobbered by __host_enter.
|
* been partially clobbered by __host_enter.
|
||||||
*/
|
*/
|
||||||
stp x0, x1, [sp, #-16]!
|
b hyp_panic
|
||||||
b \handler
|
|
||||||
|
|
||||||
.L__hyp_sp_overflow\@:
|
.L__hyp_sp_overflow\@:
|
||||||
/* Switch to the overflow stack */
|
/* Switch to the overflow stack */
|
||||||
|
@ -213,10 +212,6 @@ SYM_FUNC_END(__host_hvc)
|
||||||
ASM_BUG()
|
ASM_BUG()
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro host_el2_sync_vect
|
|
||||||
__host_el2_vect __hyp_panic
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro invalid_host_el1_vect
|
.macro invalid_host_el1_vect
|
||||||
.align 7
|
.align 7
|
||||||
mov x0, xzr /* restore_host = false */
|
mov x0, xzr /* restore_host = false */
|
||||||
|
@ -226,10 +221,6 @@ SYM_FUNC_END(__host_hvc)
|
||||||
b __hyp_do_panic
|
b __hyp_do_panic
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro invalid_host_el2_vect
|
|
||||||
__host_el2_vect __hyp_panic
|
|
||||||
.endm
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The host vector does not use an ESB instruction in order to avoid consuming
|
* The host vector does not use an ESB instruction in order to avoid consuming
|
||||||
* SErrors that should only be consumed by the host. Guest entry is deferred by
|
* SErrors that should only be consumed by the host. Guest entry is deferred by
|
||||||
|
@ -247,7 +238,7 @@ SYM_CODE_START(__kvm_hyp_host_vector)
|
||||||
invalid_host_el2_vect // FIQ EL2t
|
invalid_host_el2_vect // FIQ EL2t
|
||||||
invalid_host_el2_vect // Error EL2t
|
invalid_host_el2_vect // Error EL2t
|
||||||
|
|
||||||
host_el2_sync_vect // Synchronous EL2h
|
invalid_host_el2_vect // Synchronous EL2h
|
||||||
invalid_host_el2_vect // IRQ EL2h
|
invalid_host_el2_vect // IRQ EL2h
|
||||||
invalid_host_el2_vect // FIQ EL2h
|
invalid_host_el2_vect // FIQ EL2h
|
||||||
invalid_host_el2_vect // Error EL2h
|
invalid_host_el2_vect // Error EL2h
|
||||||
|
|
|
@ -278,39 +278,35 @@ alternative_else_nop_endif
|
||||||
SYM_CODE_END(__kvm_handle_stub_hvc)
|
SYM_CODE_END(__kvm_handle_stub_hvc)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* void __pkvm_init_switch_pgd(struct kvm_nvhe_init_params *params,
|
* void __pkvm_init_switch_pgd(phys_addr_t pgd, unsigned long sp,
|
||||||
* void (*finalize_fn)(void));
|
* void (*fn)(void));
|
||||||
*
|
*
|
||||||
* SYM_TYPED_FUNC_START() allows C to call this ID-mapped function indirectly
|
* SYM_TYPED_FUNC_START() allows C to call this ID-mapped function indirectly
|
||||||
* using a physical pointer without triggering a kCFI failure.
|
* using a physical pointer without triggering a kCFI failure.
|
||||||
*/
|
*/
|
||||||
SYM_TYPED_FUNC_START(__pkvm_init_switch_pgd)
|
SYM_TYPED_FUNC_START(__pkvm_init_switch_pgd)
|
||||||
/* Load the inputs from the VA pointer before turning the MMU off */
|
|
||||||
ldr x5, [x0, #NVHE_INIT_PGD_PA]
|
|
||||||
ldr x0, [x0, #NVHE_INIT_STACK_HYP_VA]
|
|
||||||
|
|
||||||
/* Turn the MMU off */
|
/* Turn the MMU off */
|
||||||
pre_disable_mmu_workaround
|
pre_disable_mmu_workaround
|
||||||
mrs x2, sctlr_el2
|
mrs x3, sctlr_el2
|
||||||
bic x3, x2, #SCTLR_ELx_M
|
bic x4, x3, #SCTLR_ELx_M
|
||||||
msr sctlr_el2, x3
|
msr sctlr_el2, x4
|
||||||
isb
|
isb
|
||||||
|
|
||||||
tlbi alle2
|
tlbi alle2
|
||||||
|
|
||||||
/* Install the new pgtables */
|
/* Install the new pgtables */
|
||||||
phys_to_ttbr x4, x5
|
phys_to_ttbr x5, x0
|
||||||
alternative_if ARM64_HAS_CNP
|
alternative_if ARM64_HAS_CNP
|
||||||
orr x4, x4, #TTBR_CNP_BIT
|
orr x5, x5, #TTBR_CNP_BIT
|
||||||
alternative_else_nop_endif
|
alternative_else_nop_endif
|
||||||
msr ttbr0_el2, x4
|
msr ttbr0_el2, x5
|
||||||
|
|
||||||
/* Set the new stack pointer */
|
/* Set the new stack pointer */
|
||||||
mov sp, x0
|
mov sp, x1
|
||||||
|
|
||||||
/* And turn the MMU back on! */
|
/* And turn the MMU back on! */
|
||||||
set_sctlr_el2 x2
|
set_sctlr_el2 x3
|
||||||
ret x1
|
ret x2
|
||||||
SYM_FUNC_END(__pkvm_init_switch_pgd)
|
SYM_FUNC_END(__pkvm_init_switch_pgd)
|
||||||
|
|
||||||
.popsection
|
.popsection
|
||||||
|
|
|
@ -331,7 +331,7 @@ size_t kvm_iommu_map_pages(pkvm_handle_t domain_id, unsigned long iova,
|
||||||
* so far.
|
* so far.
|
||||||
*/
|
*/
|
||||||
if (pgcount)
|
if (pgcount)
|
||||||
__pkvm_host_unuse_dma(paddr, pgcount * pgsize);
|
__pkvm_host_unuse_dma(paddr + total_mapped, pgcount * pgsize);
|
||||||
|
|
||||||
domain_put(domain);
|
domain_put(domain);
|
||||||
return total_mapped;
|
return total_mapped;
|
||||||
|
@ -380,7 +380,7 @@ void kvm_iommu_iotlb_gather_add_page(struct kvm_hyp_iommu_domain *domain,
|
||||||
kvm_iommu_iotlb_gather_add_range(gather, iova, size);
|
kvm_iommu_iotlb_gather_add_range(gather, iova, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvm_iommu_flush_unmap_cache(struct kvm_iommu_paddr_cache *cache)
|
void kvm_iommu_flush_unmap_cache(struct kvm_iommu_paddr_cache *cache)
|
||||||
{
|
{
|
||||||
while (cache->ptr) {
|
while (cache->ptr) {
|
||||||
cache->ptr--;
|
cache->ptr--;
|
||||||
|
@ -471,13 +471,13 @@ static int iommu_power_on(struct kvm_power_domain *pd)
|
||||||
bool prev;
|
bool prev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
hyp_spin_lock(&iommu->lock);
|
kvm_iommu_lock(iommu);
|
||||||
prev = iommu->power_is_off;
|
prev = iommu->power_is_off;
|
||||||
iommu->power_is_off = false;
|
iommu->power_is_off = false;
|
||||||
ret = kvm_iommu_ops->resume ? kvm_iommu_ops->resume(iommu) : 0;
|
ret = kvm_iommu_ops->resume ? kvm_iommu_ops->resume(iommu) : 0;
|
||||||
if (ret)
|
if (ret)
|
||||||
iommu->power_is_off = prev;
|
iommu->power_is_off = prev;
|
||||||
hyp_spin_unlock(&iommu->lock);
|
kvm_iommu_unlock(iommu);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -488,13 +488,13 @@ static int iommu_power_off(struct kvm_power_domain *pd)
|
||||||
bool prev;
|
bool prev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
hyp_spin_lock(&iommu->lock);
|
kvm_iommu_lock(iommu);
|
||||||
prev = iommu->power_is_off;
|
prev = iommu->power_is_off;
|
||||||
iommu->power_is_off = true;
|
iommu->power_is_off = true;
|
||||||
ret = kvm_iommu_ops->suspend ? kvm_iommu_ops->suspend(iommu) : 0;
|
ret = kvm_iommu_ops->suspend ? kvm_iommu_ops->suspend(iommu) : 0;
|
||||||
if (ret)
|
if (ret)
|
||||||
iommu->power_is_off = prev;
|
iommu->power_is_off = prev;
|
||||||
hyp_spin_unlock(&iommu->lock);
|
kvm_iommu_unlock(iommu);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -505,8 +505,7 @@ static const struct kvm_power_domain_ops iommu_power_ops = {
|
||||||
|
|
||||||
int kvm_iommu_init_device(struct kvm_hyp_iommu *iommu)
|
int kvm_iommu_init_device(struct kvm_hyp_iommu *iommu)
|
||||||
{
|
{
|
||||||
/* See struct kvm_hyp_iommu */
|
kvm_iommu_lock_init(iommu);
|
||||||
BUILD_BUG_ON(sizeof(u32) != sizeof(hyp_spinlock_t));
|
|
||||||
|
|
||||||
return pkvm_init_power_domain(&iommu->power_domain, &iommu_power_ops);
|
return pkvm_init_power_domain(&iommu->power_domain, &iommu_power_ops);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1545,7 +1545,7 @@ static int guest_request_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
||||||
phys_addr_t phys;
|
phys_addr_t phys;
|
||||||
|
|
||||||
state = guest_get_page_state(pte, 0);
|
state = guest_get_page_state(pte, 0);
|
||||||
if ((data->desired_state & data->desired_mask) != state)
|
if (data->desired_state != (state & data->desired_mask))
|
||||||
return (state & PKVM_NOPAGE) ? -EFAULT : -EINVAL;
|
return (state & PKVM_NOPAGE) ? -EFAULT : -EINVAL;
|
||||||
|
|
||||||
if (state & PKVM_NOPAGE) {
|
if (state & PKVM_NOPAGE) {
|
||||||
|
@ -2826,7 +2826,7 @@ int __pkvm_host_reclaim_page(struct pkvm_hyp_vm *vm, u64 pfn, u64 ipa, u8 order)
|
||||||
case PKVM_PAGE_OWNED:
|
case PKVM_PAGE_OWNED:
|
||||||
WARN_ON(__host_check_page_state_range(phys, page_size, PKVM_NOPAGE));
|
WARN_ON(__host_check_page_state_range(phys, page_size, PKVM_NOPAGE));
|
||||||
hyp_poison_page(phys);
|
hyp_poison_page(phys);
|
||||||
psci_mem_protect_dec(order);
|
psci_mem_protect_dec(1 << order);
|
||||||
break;
|
break;
|
||||||
case PKVM_PAGE_SHARED_BORROWED:
|
case PKVM_PAGE_SHARED_BORROWED:
|
||||||
case PKVM_PAGE_SHARED_BORROWED | PKVM_PAGE_RESTRICTED_PROT:
|
case PKVM_PAGE_SHARED_BORROWED | PKVM_PAGE_RESTRICTED_PROT:
|
||||||
|
|
|
@ -512,10 +512,10 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
|
||||||
|
|
||||||
prev_base = __io_map_base;
|
prev_base = __io_map_base;
|
||||||
/*
|
/*
|
||||||
* Efficient stack verification using the PAGE_SHIFT bit implies
|
* Efficient stack verification using the NVHE_STACK_SHIFT bit implies
|
||||||
* an alignment of our allocation on the order of the size.
|
* an alignment of our allocation on the order of the size.
|
||||||
*/
|
*/
|
||||||
size = PAGE_SIZE * 2;
|
size = NVHE_STACK_SIZE * 2;
|
||||||
addr = ALIGN(__io_map_base, size);
|
addr = ALIGN(__io_map_base, size);
|
||||||
|
|
||||||
ret = __pkvm_alloc_private_va_range(addr, size);
|
ret = __pkvm_alloc_private_va_range(addr, size);
|
||||||
|
@ -525,12 +525,12 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
|
||||||
* at the higher address and leave the lower guard page
|
* at the higher address and leave the lower guard page
|
||||||
* unbacked.
|
* unbacked.
|
||||||
*
|
*
|
||||||
* Any valid stack address now has the PAGE_SHIFT bit as 1
|
* Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
|
||||||
* and addresses corresponding to the guard page have the
|
* and addresses corresponding to the guard page have the
|
||||||
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
|
* NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
|
||||||
*/
|
*/
|
||||||
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + PAGE_SIZE,
|
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + NVHE_STACK_SIZE,
|
||||||
PAGE_SIZE, phys, PAGE_HYP);
|
NVHE_STACK_SIZE, phys, PAGE_HYP);
|
||||||
if (ret)
|
if (ret)
|
||||||
__io_map_base = prev_base;
|
__io_map_base = prev_base;
|
||||||
}
|
}
|
||||||
|
|
|
@ -152,6 +152,7 @@ const struct pkvm_module_ops module_ops = {
|
||||||
.iommu_reclaim_pages_atomic = kvm_iommu_reclaim_pages_atomic,
|
.iommu_reclaim_pages_atomic = kvm_iommu_reclaim_pages_atomic,
|
||||||
.iommu_snapshot_host_stage2 = kvm_iommu_snapshot_host_stage2,
|
.iommu_snapshot_host_stage2 = kvm_iommu_snapshot_host_stage2,
|
||||||
.hyp_smp_processor_id = _hyp_smp_processor_id,
|
.hyp_smp_processor_id = _hyp_smp_processor_id,
|
||||||
|
.iommu_flush_unmap_cache = kvm_iommu_flush_unmap_cache,
|
||||||
};
|
};
|
||||||
|
|
||||||
int __pkvm_init_module(void *module_init)
|
int __pkvm_init_module(void *module_init)
|
||||||
|
|
|
@ -85,9 +85,9 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
/* Protected KVM does not support AArch32 guests. */
|
/* Protected KVM does not support AArch32 guests. */
|
||||||
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
|
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
|
||||||
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
|
PVM_ID_AA64PFR0_ALLOW) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
|
||||||
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
|
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
|
||||||
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
|
PVM_ID_AA64PFR0_ALLOW) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Linux guests assume support for floating-point and Advanced SIMD. Do
|
* Linux guests assume support for floating-point and Advanced SIMD. Do
|
||||||
|
@ -510,11 +510,11 @@ static void pkvm_vcpu_init_features_from_host(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||||
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), PVM_ID_AA64DFR0_ALLOW))
|
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), PVM_ID_AA64DFR0_ALLOW))
|
||||||
set_bit(KVM_ARM_VCPU_PMU_V3, allowed_features);
|
set_bit(KVM_ARM_VCPU_PMU_V3, allowed_features);
|
||||||
|
|
||||||
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), PVM_ID_AA64PFR0_RESTRICT_UNSIGNED))
|
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), PVM_ID_AA64PFR0_ALLOW))
|
||||||
set_bit(KVM_ARM_VCPU_SVE, allowed_features);
|
set_bit(KVM_ARM_VCPU_SVE, allowed_features);
|
||||||
|
|
||||||
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED) &&
|
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), PVM_ID_AA64ISAR1_ALLOW) &&
|
||||||
FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED))
|
FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), PVM_ID_AA64ISAR1_ALLOW))
|
||||||
set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, allowed_features);
|
set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, allowed_features);
|
||||||
|
|
||||||
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI), PVM_ID_AA64ISAR1_ALLOW) &&
|
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI), PVM_ID_AA64ISAR1_ALLOW) &&
|
||||||
|
|
|
@ -400,6 +400,7 @@ out:
|
||||||
int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
|
int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
|
||||||
unsigned long *per_cpu_base, u32 hyp_va_bits)
|
unsigned long *per_cpu_base, u32 hyp_va_bits)
|
||||||
{
|
{
|
||||||
|
struct kvm_nvhe_init_params *params;
|
||||||
void *virt = hyp_phys_to_virt(phys);
|
void *virt = hyp_phys_to_virt(phys);
|
||||||
typeof(__pkvm_init_switch_pgd) *fn;
|
typeof(__pkvm_init_switch_pgd) *fn;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -427,8 +428,9 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
|
||||||
update_nvhe_init_params();
|
update_nvhe_init_params();
|
||||||
|
|
||||||
/* Jump in the idmap page to switch to the new page-tables */
|
/* Jump in the idmap page to switch to the new page-tables */
|
||||||
|
params = this_cpu_ptr(&kvm_init_params);
|
||||||
fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
|
fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
|
||||||
fn(this_cpu_ptr(&kvm_init_params), __pkvm_init_finalise);
|
fn(params->pgd_pa, params->stack_hyp_va, __pkvm_init_finalise);
|
||||||
|
|
||||||
unreachable();
|
unreachable();
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
|
||||||
struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
|
struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
|
||||||
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
|
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
|
||||||
|
|
||||||
stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
|
stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - NVHE_STACK_SIZE);
|
||||||
stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
|
stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
|
||||||
stacktrace_info->fp = fp;
|
stacktrace_info->fp = fp;
|
||||||
stacktrace_info->pc = pc;
|
stacktrace_info->pc = pc;
|
||||||
|
@ -54,7 +54,7 @@ static struct stack_info stackinfo_get_hyp(void)
|
||||||
{
|
{
|
||||||
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
|
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
|
||||||
unsigned long high = params->stack_hyp_va;
|
unsigned long high = params->stack_hyp_va;
|
||||||
unsigned long low = high - PAGE_SIZE;
|
unsigned long low = high - NVHE_STACK_SIZE;
|
||||||
|
|
||||||
return (struct stack_info) {
|
return (struct stack_info) {
|
||||||
.low = low,
|
.low = low,
|
||||||
|
|
|
@ -53,7 +53,6 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
|
||||||
/*
|
/*
|
||||||
* Returns the restricted features values of the feature register based on the
|
* Returns the restricted features values of the feature register based on the
|
||||||
* limitations in restrict_fields.
|
* limitations in restrict_fields.
|
||||||
* A feature id field value of 0b0000 does not impose any restrictions.
|
|
||||||
* Note: Use only for unsigned feature field values.
|
* Note: Use only for unsigned feature field values.
|
||||||
*/
|
*/
|
||||||
static u64 get_restricted_features_unsigned(u64 sys_reg_val,
|
static u64 get_restricted_features_unsigned(u64 sys_reg_val,
|
||||||
|
@ -86,33 +85,32 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,
|
||||||
|
|
||||||
static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
|
static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
u64 set_mask = 0;
|
u64 value = get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
|
||||||
u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
|
PVM_ID_AA64PFR0_ALLOW);
|
||||||
|
|
||||||
set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
|
|
||||||
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
|
|
||||||
|
|
||||||
if (!vcpu_has_sve(vcpu))
|
if (!vcpu_has_sve(vcpu))
|
||||||
set_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
|
value &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
|
||||||
|
|
||||||
return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
|
static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
|
const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
|
||||||
u64 allow_mask = PVM_ID_AA64PFR1_ALLOW;
|
u64 value = get_restricted_features_unsigned(id_aa64pfr1_el1_sys_val,
|
||||||
|
PVM_ID_AA64PFR1_ALLOW);
|
||||||
|
|
||||||
if (!kvm_has_mte(kvm))
|
if (!kvm_has_mte(kvm))
|
||||||
allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
|
value &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
|
||||||
|
|
||||||
return id_aa64pfr1_el1_sys_val & allow_mask;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu)
|
static u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (vcpu_has_sve(vcpu))
|
if (vcpu_has_sve(vcpu))
|
||||||
return id_aa64zfr0_el1_sys_val & PVM_ID_AA64ZFR0_ALLOW;
|
return get_restricted_features_unsigned(id_aa64zfr0_el1_sys_val,
|
||||||
|
PVM_ID_AA64ZFR0_ALLOW);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -164,46 +162,46 @@ static u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu)
|
static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
u64 allow_mask = PVM_ID_AA64ISAR1_ALLOW;
|
u64 value = get_restricted_features_unsigned(id_aa64isar1_el1_sys_val,
|
||||||
|
PVM_ID_AA64ISAR1_ALLOW);
|
||||||
|
|
||||||
if (!vcpu_has_ptrauth(vcpu))
|
if (!vcpu_has_ptrauth(vcpu))
|
||||||
allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
|
value &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
|
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
|
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
|
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
|
||||||
|
|
||||||
return id_aa64isar1_el1_sys_val & allow_mask;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 get_pvm_id_aa64isar2(const struct kvm_vcpu *vcpu)
|
static u64 get_pvm_id_aa64isar2(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
u64 allow_mask = PVM_ID_AA64ISAR2_ALLOW;
|
u64 value = get_restricted_features_unsigned(id_aa64isar2_el1_sys_val,
|
||||||
|
PVM_ID_AA64ISAR2_ALLOW);
|
||||||
|
|
||||||
if (!vcpu_has_ptrauth(vcpu))
|
if (!vcpu_has_ptrauth(vcpu))
|
||||||
allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
|
value &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
|
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
|
||||||
|
|
||||||
return id_aa64isar2_el1_sys_val & allow_mask;
|
return id_aa64isar2_el1_sys_val & value;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu)
|
static u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
u64 set_mask;
|
return get_restricted_features_unsigned(id_aa64mmfr0_el1_sys_val,
|
||||||
|
PVM_ID_AA64MMFR0_ALLOW);
|
||||||
set_mask = get_restricted_features_unsigned(id_aa64mmfr0_el1_sys_val,
|
|
||||||
PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED);
|
|
||||||
|
|
||||||
return (id_aa64mmfr0_el1_sys_val & PVM_ID_AA64MMFR0_ALLOW) | set_mask;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu)
|
static u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return id_aa64mmfr1_el1_sys_val & PVM_ID_AA64MMFR1_ALLOW;
|
return get_restricted_features_unsigned(id_aa64mmfr1_el1_sys_val,
|
||||||
|
PVM_ID_AA64MMFR1_ALLOW);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu)
|
static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return id_aa64mmfr2_el1_sys_val & PVM_ID_AA64MMFR2_ALLOW;
|
return get_restricted_features_unsigned(id_aa64mmfr2_el1_sys_val,
|
||||||
|
PVM_ID_AA64MMFR2_ALLOW);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Read a sanitized cpufeature ID register by its encoding */
|
/* Read a sanitized cpufeature ID register by its encoding */
|
||||||
|
@ -278,7 +276,7 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
|
||||||
* of AArch32 feature id registers.
|
* of AArch32 feature id registers.
|
||||||
*/
|
*/
|
||||||
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
|
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
|
||||||
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
|
PVM_ID_AA64PFR0_ALLOW) > ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
|
||||||
|
|
||||||
return pvm_access_raz_wi(vcpu, p, r);
|
return pvm_access_raz_wi(vcpu, p, r);
|
||||||
}
|
}
|
||||||
|
|
|
@ -278,7 +278,7 @@ struct hyp_event *hyp_trace_find_event(int id)
|
||||||
table = rcu_dereference(mod_event_tables.tables);
|
table = rcu_dereference(mod_event_tables.tables);
|
||||||
|
|
||||||
for (int i = 0; i < mod_event_tables.nr_tables; i++) {
|
for (int i = 0; i < mod_event_tables.nr_tables; i++) {
|
||||||
if (table->nr_events < id) {
|
if (table->nr_events <= id) {
|
||||||
id -= table->nr_events;
|
id -= table->nr_events;
|
||||||
table++;
|
table++;
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -802,10 +802,10 @@ int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
|
||||||
|
|
||||||
mutex_lock(&kvm_hyp_pgd_mutex);
|
mutex_lock(&kvm_hyp_pgd_mutex);
|
||||||
/*
|
/*
|
||||||
* Efficient stack verification using the PAGE_SHIFT bit implies
|
* Efficient stack verification using the NVHE_STACK_SHIFT bit implies
|
||||||
* an alignment of our allocation on the order of the size.
|
* an alignment of our allocation on the order of the size.
|
||||||
*/
|
*/
|
||||||
size = PAGE_SIZE * 2;
|
size = NVHE_STACK_SIZE * 2;
|
||||||
base = ALIGN_DOWN(io_map_base - size, size);
|
base = ALIGN_DOWN(io_map_base - size, size);
|
||||||
|
|
||||||
ret = __hyp_alloc_private_va_range(base);
|
ret = __hyp_alloc_private_va_range(base);
|
||||||
|
@ -822,12 +822,12 @@ int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
|
||||||
* at the higher address and leave the lower guard page
|
* at the higher address and leave the lower guard page
|
||||||
* unbacked.
|
* unbacked.
|
||||||
*
|
*
|
||||||
* Any valid stack address now has the PAGE_SHIFT bit as 1
|
* Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
|
||||||
* and addresses corresponding to the guard page have the
|
* and addresses corresponding to the guard page have the
|
||||||
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
|
* NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
|
||||||
*/
|
*/
|
||||||
ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr,
|
ret = __create_hyp_mappings(base + NVHE_STACK_SIZE, NVHE_STACK_SIZE,
|
||||||
PAGE_HYP);
|
phys_addr, PAGE_HYP);
|
||||||
if (ret)
|
if (ret)
|
||||||
kvm_err("Cannot map hyp stack\n");
|
kvm_err("Cannot map hyp stack\n");
|
||||||
|
|
||||||
|
|
|
@ -51,7 +51,7 @@ static struct stack_info stackinfo_get_hyp(void)
|
||||||
struct kvm_nvhe_stacktrace_info *stacktrace_info
|
struct kvm_nvhe_stacktrace_info *stacktrace_info
|
||||||
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
|
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
|
||||||
unsigned long low = (unsigned long)stacktrace_info->stack_base;
|
unsigned long low = (unsigned long)stacktrace_info->stack_base;
|
||||||
unsigned long high = low + PAGE_SIZE;
|
unsigned long high = low + NVHE_STACK_SIZE;
|
||||||
|
|
||||||
return (struct stack_info) {
|
return (struct stack_info) {
|
||||||
.low = low,
|
.low = low,
|
||||||
|
@ -61,8 +61,8 @@ static struct stack_info stackinfo_get_hyp(void)
|
||||||
|
|
||||||
static struct stack_info stackinfo_get_hyp_kern_va(void)
|
static struct stack_info stackinfo_get_hyp_kern_va(void)
|
||||||
{
|
{
|
||||||
unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
|
unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_base);
|
||||||
unsigned long high = low + PAGE_SIZE;
|
unsigned long high = low + NVHE_STACK_SIZE;
|
||||||
|
|
||||||
return (struct stack_info) {
|
return (struct stack_info) {
|
||||||
.low = low,
|
.low = low,
|
||||||
|
|
|
@ -267,6 +267,9 @@ void ioremap_phys_range_hook(phys_addr_t phys_addr, size_t size, pgprot_t prot)
|
||||||
|
|
||||||
VM_BUG_ON(!PAGE_ALIGNED(phys_addr) || !PAGE_ALIGNED(size));
|
VM_BUG_ON(!PAGE_ALIGNED(phys_addr) || !PAGE_ALIGNED(size));
|
||||||
|
|
||||||
|
size = ALIGN(size, guard_granule);
|
||||||
|
phys_addr = ALIGN_DOWN(phys_addr, guard_granule);
|
||||||
|
|
||||||
mutex_lock(&ioremap_guard_lock);
|
mutex_lock(&ioremap_guard_lock);
|
||||||
mas_lock(&mas);
|
mas_lock(&mas);
|
||||||
|
|
||||||
|
@ -311,6 +314,9 @@ void iounmap_phys_range_hook(phys_addr_t phys_addr, size_t size)
|
||||||
|
|
||||||
VM_BUG_ON(!PAGE_ALIGNED(phys_addr) || !PAGE_ALIGNED(size));
|
VM_BUG_ON(!PAGE_ALIGNED(phys_addr) || !PAGE_ALIGNED(size));
|
||||||
|
|
||||||
|
size = ALIGN(size, guard_granule);
|
||||||
|
phys_addr = ALIGN_DOWN(phys_addr, guard_granule);
|
||||||
|
|
||||||
mutex_lock(&ioremap_guard_lock);
|
mutex_lock(&ioremap_guard_lock);
|
||||||
mas_lock(&mas);
|
mas_lock(&mas);
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,6 @@ CONFIG_EXPERT=y
|
||||||
CONFIG_KALLSYMS_ALL=y
|
CONFIG_KALLSYMS_ALL=y
|
||||||
# CONFIG_RSEQ is not set
|
# CONFIG_RSEQ is not set
|
||||||
CONFIG_PROFILING=y
|
CONFIG_PROFILING=y
|
||||||
CONFIG_RUST=y
|
|
||||||
CONFIG_SMP=y
|
CONFIG_SMP=y
|
||||||
CONFIG_X86_X2APIC=y
|
CONFIG_X86_X2APIC=y
|
||||||
CONFIG_HYPERVISOR_GUEST=y
|
CONFIG_HYPERVISOR_GUEST=y
|
||||||
|
@ -538,7 +537,6 @@ CONFIG_POWERCAP=y
|
||||||
CONFIG_IDLE_INJECT=y
|
CONFIG_IDLE_INJECT=y
|
||||||
CONFIG_DTPM=y
|
CONFIG_DTPM=y
|
||||||
CONFIG_ANDROID_BINDER_IPC=y
|
CONFIG_ANDROID_BINDER_IPC=y
|
||||||
CONFIG_ANDROID_BINDER_IPC_RUST=m
|
|
||||||
CONFIG_ANDROID_BINDERFS=y
|
CONFIG_ANDROID_BINDERFS=y
|
||||||
CONFIG_ANDROID_DEBUG_SYMBOLS=y
|
CONFIG_ANDROID_DEBUG_SYMBOLS=y
|
||||||
CONFIG_ANDROID_VENDOR_HOOKS=y
|
CONFIG_ANDROID_VENDOR_HOOKS=y
|
||||||
|
|
|
@ -329,6 +329,374 @@ SYM_TYPED_FUNC_START(sha256_ni_transform)
|
||||||
RET
|
RET
|
||||||
SYM_FUNC_END(sha256_ni_transform)
|
SYM_FUNC_END(sha256_ni_transform)
|
||||||
|
|
||||||
|
#undef DIGEST_PTR
|
||||||
|
#undef DATA_PTR
|
||||||
|
#undef NUM_BLKS
|
||||||
|
#undef SHA256CONSTANTS
|
||||||
|
#undef MSG
|
||||||
|
#undef STATE0
|
||||||
|
#undef STATE1
|
||||||
|
#undef MSG0
|
||||||
|
#undef MSG1
|
||||||
|
#undef MSG2
|
||||||
|
#undef MSG3
|
||||||
|
#undef TMP
|
||||||
|
#undef SHUF_MASK
|
||||||
|
#undef ABEF_SAVE
|
||||||
|
#undef CDGH_SAVE
|
||||||
|
|
||||||
|
// parameters for __sha256_ni_finup2x()
|
||||||
|
#define SCTX %rdi
|
||||||
|
#define DATA1 %rsi
|
||||||
|
#define DATA2 %rdx
|
||||||
|
#define LEN %ecx
|
||||||
|
#define LEN8 %cl
|
||||||
|
#define LEN64 %rcx
|
||||||
|
#define OUT1 %r8
|
||||||
|
#define OUT2 %r9
|
||||||
|
|
||||||
|
// other scalar variables
|
||||||
|
#define SHA256CONSTANTS %rax
|
||||||
|
#define COUNT %r10
|
||||||
|
#define COUNT32 %r10d
|
||||||
|
#define FINAL_STEP %r11d
|
||||||
|
|
||||||
|
// rbx is used as a temporary.
|
||||||
|
|
||||||
|
#define MSG %xmm0 // sha256rnds2 implicit operand
|
||||||
|
#define STATE0_A %xmm1
|
||||||
|
#define STATE1_A %xmm2
|
||||||
|
#define STATE0_B %xmm3
|
||||||
|
#define STATE1_B %xmm4
|
||||||
|
#define TMP_A %xmm5
|
||||||
|
#define TMP_B %xmm6
|
||||||
|
#define MSG0_A %xmm7
|
||||||
|
#define MSG1_A %xmm8
|
||||||
|
#define MSG2_A %xmm9
|
||||||
|
#define MSG3_A %xmm10
|
||||||
|
#define MSG0_B %xmm11
|
||||||
|
#define MSG1_B %xmm12
|
||||||
|
#define MSG2_B %xmm13
|
||||||
|
#define MSG3_B %xmm14
|
||||||
|
#define SHUF_MASK %xmm15
|
||||||
|
|
||||||
|
#define OFFSETOF_STATE 0 // offsetof(struct sha256_state, state)
|
||||||
|
#define OFFSETOF_COUNT 32 // offsetof(struct sha256_state, count)
|
||||||
|
#define OFFSETOF_BUF 40 // offsetof(struct sha256_state, buf)
|
||||||
|
|
||||||
|
// Do 4 rounds of SHA-256 for each of two messages (interleaved). m0_a and m0_b
|
||||||
|
// contain the current 4 message schedule words for the first and second message
|
||||||
|
// respectively.
|
||||||
|
//
|
||||||
|
// If not all the message schedule words have been computed yet, then this also
|
||||||
|
// computes 4 more message schedule words for each message. m1_a-m3_a contain
|
||||||
|
// the next 3 groups of 4 message schedule words for the first message, and
|
||||||
|
// likewise m1_b-m3_b for the second. After consuming the current value of
|
||||||
|
// m0_a, this macro computes the group after m3_a and writes it to m0_a, and
|
||||||
|
// likewise for *_b. This means that the next (m0_a, m1_a, m2_a, m3_a) is the
|
||||||
|
// current (m1_a, m2_a, m3_a, m0_a), and likewise for *_b, so the caller must
|
||||||
|
// cycle through the registers accordingly.
|
||||||
|
.macro do_4rounds_2x i, m0_a, m1_a, m2_a, m3_a, m0_b, m1_b, m2_b, m3_b
|
||||||
|
movdqa (\i-32)*4(SHA256CONSTANTS), TMP_A
|
||||||
|
movdqa TMP_A, TMP_B
|
||||||
|
paddd \m0_a, TMP_A
|
||||||
|
paddd \m0_b, TMP_B
|
||||||
|
.if \i < 48
|
||||||
|
sha256msg1 \m1_a, \m0_a
|
||||||
|
sha256msg1 \m1_b, \m0_b
|
||||||
|
.endif
|
||||||
|
movdqa TMP_A, MSG
|
||||||
|
sha256rnds2 STATE0_A, STATE1_A
|
||||||
|
movdqa TMP_B, MSG
|
||||||
|
sha256rnds2 STATE0_B, STATE1_B
|
||||||
|
pshufd $0x0E, TMP_A, MSG
|
||||||
|
sha256rnds2 STATE1_A, STATE0_A
|
||||||
|
pshufd $0x0E, TMP_B, MSG
|
||||||
|
sha256rnds2 STATE1_B, STATE0_B
|
||||||
|
.if \i < 48
|
||||||
|
movdqa \m3_a, TMP_A
|
||||||
|
movdqa \m3_b, TMP_B
|
||||||
|
palignr $4, \m2_a, TMP_A
|
||||||
|
palignr $4, \m2_b, TMP_B
|
||||||
|
paddd TMP_A, \m0_a
|
||||||
|
paddd TMP_B, \m0_b
|
||||||
|
sha256msg2 \m3_a, \m0_a
|
||||||
|
sha256msg2 \m3_b, \m0_b
|
||||||
|
.endif
|
||||||
|
.endm
|
||||||
|
|
||||||
|
//
|
||||||
|
// void __sha256_ni_finup2x(const struct sha256_state *sctx,
|
||||||
|
// const u8 *data1, const u8 *data2, int len,
|
||||||
|
// u8 out1[SHA256_DIGEST_SIZE],
|
||||||
|
// u8 out2[SHA256_DIGEST_SIZE]);
|
||||||
|
//
|
||||||
|
// This function computes the SHA-256 digests of two messages |data1| and
|
||||||
|
// |data2| that are both |len| bytes long, starting from the initial state
|
||||||
|
// |sctx|. |len| must be at least SHA256_BLOCK_SIZE.
|
||||||
|
//
|
||||||
|
// The instructions for the two SHA-256 operations are interleaved. On many
|
||||||
|
// CPUs, this is almost twice as fast as hashing each message individually due
|
||||||
|
// to taking better advantage of the CPU's SHA-256 and SIMD throughput.
|
||||||
|
//
|
||||||
|
SYM_FUNC_START(__sha256_ni_finup2x)
|
||||||
|
// Allocate 128 bytes of stack space, 16-byte aligned.
|
||||||
|
push %rbx
|
||||||
|
push %rbp
|
||||||
|
mov %rsp, %rbp
|
||||||
|
sub $128, %rsp
|
||||||
|
and $~15, %rsp
|
||||||
|
|
||||||
|
// Load the shuffle mask for swapping the endianness of 32-bit words.
|
||||||
|
movdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), SHUF_MASK
|
||||||
|
|
||||||
|
// Set up pointer to the round constants.
|
||||||
|
lea K256+32*4(%rip), SHA256CONSTANTS
|
||||||
|
|
||||||
|
// Initially we're not processing the final blocks.
|
||||||
|
xor FINAL_STEP, FINAL_STEP
|
||||||
|
|
||||||
|
// Load the initial state from sctx->state.
|
||||||
|
movdqu OFFSETOF_STATE+0*16(SCTX), STATE0_A // DCBA
|
||||||
|
movdqu OFFSETOF_STATE+1*16(SCTX), STATE1_A // HGFE
|
||||||
|
movdqa STATE0_A, TMP_A
|
||||||
|
punpcklqdq STATE1_A, STATE0_A // FEBA
|
||||||
|
punpckhqdq TMP_A, STATE1_A // DCHG
|
||||||
|
pshufd $0x1B, STATE0_A, STATE0_A // ABEF
|
||||||
|
pshufd $0xB1, STATE1_A, STATE1_A // CDGH
|
||||||
|
|
||||||
|
// Load sctx->count. Take the mod 64 of it to get the number of bytes
|
||||||
|
// that are buffered in sctx->buf. Also save it in a register with LEN
|
||||||
|
// added to it.
|
||||||
|
mov LEN, LEN
|
||||||
|
mov OFFSETOF_COUNT(SCTX), %rbx
|
||||||
|
lea (%rbx, LEN64, 1), COUNT
|
||||||
|
and $63, %ebx
|
||||||
|
jz .Lfinup2x_enter_loop // No bytes buffered?
|
||||||
|
|
||||||
|
// %ebx bytes (1 to 63) are currently buffered in sctx->buf. Load them
|
||||||
|
// followed by the first 64 - %ebx bytes of data. Since LEN >= 64, we
|
||||||
|
// just load 64 bytes from each of sctx->buf, DATA1, and DATA2
|
||||||
|
// unconditionally and rearrange the data as needed.
|
||||||
|
|
||||||
|
movdqu OFFSETOF_BUF+0*16(SCTX), MSG0_A
|
||||||
|
movdqu OFFSETOF_BUF+1*16(SCTX), MSG1_A
|
||||||
|
movdqu OFFSETOF_BUF+2*16(SCTX), MSG2_A
|
||||||
|
movdqu OFFSETOF_BUF+3*16(SCTX), MSG3_A
|
||||||
|
movdqa MSG0_A, 0*16(%rsp)
|
||||||
|
movdqa MSG1_A, 1*16(%rsp)
|
||||||
|
movdqa MSG2_A, 2*16(%rsp)
|
||||||
|
movdqa MSG3_A, 3*16(%rsp)
|
||||||
|
|
||||||
|
movdqu 0*16(DATA1), MSG0_A
|
||||||
|
movdqu 1*16(DATA1), MSG1_A
|
||||||
|
movdqu 2*16(DATA1), MSG2_A
|
||||||
|
movdqu 3*16(DATA1), MSG3_A
|
||||||
|
movdqu MSG0_A, 0*16(%rsp,%rbx)
|
||||||
|
movdqu MSG1_A, 1*16(%rsp,%rbx)
|
||||||
|
movdqu MSG2_A, 2*16(%rsp,%rbx)
|
||||||
|
movdqu MSG3_A, 3*16(%rsp,%rbx)
|
||||||
|
movdqa 0*16(%rsp), MSG0_A
|
||||||
|
movdqa 1*16(%rsp), MSG1_A
|
||||||
|
movdqa 2*16(%rsp), MSG2_A
|
||||||
|
movdqa 3*16(%rsp), MSG3_A
|
||||||
|
|
||||||
|
movdqu 0*16(DATA2), MSG0_B
|
||||||
|
movdqu 1*16(DATA2), MSG1_B
|
||||||
|
movdqu 2*16(DATA2), MSG2_B
|
||||||
|
movdqu 3*16(DATA2), MSG3_B
|
||||||
|
movdqu MSG0_B, 0*16(%rsp,%rbx)
|
||||||
|
movdqu MSG1_B, 1*16(%rsp,%rbx)
|
||||||
|
movdqu MSG2_B, 2*16(%rsp,%rbx)
|
||||||
|
movdqu MSG3_B, 3*16(%rsp,%rbx)
|
||||||
|
movdqa 0*16(%rsp), MSG0_B
|
||||||
|
movdqa 1*16(%rsp), MSG1_B
|
||||||
|
movdqa 2*16(%rsp), MSG2_B
|
||||||
|
movdqa 3*16(%rsp), MSG3_B
|
||||||
|
|
||||||
|
sub $64, %rbx // rbx = buffered - 64
|
||||||
|
sub %rbx, DATA1 // DATA1 += 64 - buffered
|
||||||
|
sub %rbx, DATA2 // DATA2 += 64 - buffered
|
||||||
|
add %ebx, LEN // LEN += buffered - 64
|
||||||
|
movdqa STATE0_A, STATE0_B
|
||||||
|
movdqa STATE1_A, STATE1_B
|
||||||
|
jmp .Lfinup2x_loop_have_data
|
||||||
|
|
||||||
|
.Lfinup2x_enter_loop:
|
||||||
|
sub $64, LEN
|
||||||
|
movdqa STATE0_A, STATE0_B
|
||||||
|
movdqa STATE1_A, STATE1_B
|
||||||
|
.Lfinup2x_loop:
|
||||||
|
// Load the next two data blocks.
|
||||||
|
movdqu 0*16(DATA1), MSG0_A
|
||||||
|
movdqu 0*16(DATA2), MSG0_B
|
||||||
|
movdqu 1*16(DATA1), MSG1_A
|
||||||
|
movdqu 1*16(DATA2), MSG1_B
|
||||||
|
movdqu 2*16(DATA1), MSG2_A
|
||||||
|
movdqu 2*16(DATA2), MSG2_B
|
||||||
|
movdqu 3*16(DATA1), MSG3_A
|
||||||
|
movdqu 3*16(DATA2), MSG3_B
|
||||||
|
add $64, DATA1
|
||||||
|
add $64, DATA2
|
||||||
|
.Lfinup2x_loop_have_data:
|
||||||
|
// Convert the words of the data blocks from big endian.
|
||||||
|
pshufb SHUF_MASK, MSG0_A
|
||||||
|
pshufb SHUF_MASK, MSG0_B
|
||||||
|
pshufb SHUF_MASK, MSG1_A
|
||||||
|
pshufb SHUF_MASK, MSG1_B
|
||||||
|
pshufb SHUF_MASK, MSG2_A
|
||||||
|
pshufb SHUF_MASK, MSG2_B
|
||||||
|
pshufb SHUF_MASK, MSG3_A
|
||||||
|
pshufb SHUF_MASK, MSG3_B
|
||||||
|
.Lfinup2x_loop_have_bswapped_data:
|
||||||
|
|
||||||
|
// Save the original state for each block.
|
||||||
|
movdqa STATE0_A, 0*16(%rsp)
|
||||||
|
movdqa STATE0_B, 1*16(%rsp)
|
||||||
|
movdqa STATE1_A, 2*16(%rsp)
|
||||||
|
movdqa STATE1_B, 3*16(%rsp)
|
||||||
|
|
||||||
|
// Do the SHA-256 rounds on each block.
|
||||||
|
.irp i, 0, 16, 32, 48
|
||||||
|
do_4rounds_2x (\i + 0), MSG0_A, MSG1_A, MSG2_A, MSG3_A, \
|
||||||
|
MSG0_B, MSG1_B, MSG2_B, MSG3_B
|
||||||
|
do_4rounds_2x (\i + 4), MSG1_A, MSG2_A, MSG3_A, MSG0_A, \
|
||||||
|
MSG1_B, MSG2_B, MSG3_B, MSG0_B
|
||||||
|
do_4rounds_2x (\i + 8), MSG2_A, MSG3_A, MSG0_A, MSG1_A, \
|
||||||
|
MSG2_B, MSG3_B, MSG0_B, MSG1_B
|
||||||
|
do_4rounds_2x (\i + 12), MSG3_A, MSG0_A, MSG1_A, MSG2_A, \
|
||||||
|
MSG3_B, MSG0_B, MSG1_B, MSG2_B
|
||||||
|
.endr
|
||||||
|
|
||||||
|
// Add the original state for each block.
|
||||||
|
paddd 0*16(%rsp), STATE0_A
|
||||||
|
paddd 1*16(%rsp), STATE0_B
|
||||||
|
paddd 2*16(%rsp), STATE1_A
|
||||||
|
paddd 3*16(%rsp), STATE1_B
|
||||||
|
|
||||||
|
// Update LEN and loop back if more blocks remain.
|
||||||
|
sub $64, LEN
|
||||||
|
jge .Lfinup2x_loop
|
||||||
|
|
||||||
|
// Check if any final blocks need to be handled.
|
||||||
|
// FINAL_STEP = 2: all done
|
||||||
|
// FINAL_STEP = 1: need to do count-only padding block
|
||||||
|
// FINAL_STEP = 0: need to do the block with 0x80 padding byte
|
||||||
|
cmp $1, FINAL_STEP
|
||||||
|
jg .Lfinup2x_done
|
||||||
|
je .Lfinup2x_finalize_countonly
|
||||||
|
add $64, LEN
|
||||||
|
jz .Lfinup2x_finalize_blockaligned
|
||||||
|
|
||||||
|
// Not block-aligned; 1 <= LEN <= 63 data bytes remain. Pad the block.
|
||||||
|
// To do this, write the padding starting with the 0x80 byte to
|
||||||
|
// &sp[64]. Then for each message, copy the last 64 data bytes to sp
|
||||||
|
// and load from &sp[64 - LEN] to get the needed padding block. This
|
||||||
|
// code relies on the data buffers being >= 64 bytes in length.
|
||||||
|
mov $64, %ebx
|
||||||
|
sub LEN, %ebx // ebx = 64 - LEN
|
||||||
|
sub %rbx, DATA1 // DATA1 -= 64 - LEN
|
||||||
|
sub %rbx, DATA2 // DATA2 -= 64 - LEN
|
||||||
|
mov $0x80, FINAL_STEP // using FINAL_STEP as a temporary
|
||||||
|
movd FINAL_STEP, MSG0_A
|
||||||
|
pxor MSG1_A, MSG1_A
|
||||||
|
movdqa MSG0_A, 4*16(%rsp)
|
||||||
|
movdqa MSG1_A, 5*16(%rsp)
|
||||||
|
movdqa MSG1_A, 6*16(%rsp)
|
||||||
|
movdqa MSG1_A, 7*16(%rsp)
|
||||||
|
cmp $56, LEN
|
||||||
|
jge 1f // will COUNT spill into its own block?
|
||||||
|
shl $3, COUNT
|
||||||
|
bswap COUNT
|
||||||
|
mov COUNT, 56(%rsp,%rbx)
|
||||||
|
mov $2, FINAL_STEP // won't need count-only block
|
||||||
|
jmp 2f
|
||||||
|
1:
|
||||||
|
mov $1, FINAL_STEP // will need count-only block
|
||||||
|
2:
|
||||||
|
movdqu 0*16(DATA1), MSG0_A
|
||||||
|
movdqu 1*16(DATA1), MSG1_A
|
||||||
|
movdqu 2*16(DATA1), MSG2_A
|
||||||
|
movdqu 3*16(DATA1), MSG3_A
|
||||||
|
movdqa MSG0_A, 0*16(%rsp)
|
||||||
|
movdqa MSG1_A, 1*16(%rsp)
|
||||||
|
movdqa MSG2_A, 2*16(%rsp)
|
||||||
|
movdqa MSG3_A, 3*16(%rsp)
|
||||||
|
movdqu 0*16(%rsp,%rbx), MSG0_A
|
||||||
|
movdqu 1*16(%rsp,%rbx), MSG1_A
|
||||||
|
movdqu 2*16(%rsp,%rbx), MSG2_A
|
||||||
|
movdqu 3*16(%rsp,%rbx), MSG3_A
|
||||||
|
|
||||||
|
movdqu 0*16(DATA2), MSG0_B
|
||||||
|
movdqu 1*16(DATA2), MSG1_B
|
||||||
|
movdqu 2*16(DATA2), MSG2_B
|
||||||
|
movdqu 3*16(DATA2), MSG3_B
|
||||||
|
movdqa MSG0_B, 0*16(%rsp)
|
||||||
|
movdqa MSG1_B, 1*16(%rsp)
|
||||||
|
movdqa MSG2_B, 2*16(%rsp)
|
||||||
|
movdqa MSG3_B, 3*16(%rsp)
|
||||||
|
movdqu 0*16(%rsp,%rbx), MSG0_B
|
||||||
|
movdqu 1*16(%rsp,%rbx), MSG1_B
|
||||||
|
movdqu 2*16(%rsp,%rbx), MSG2_B
|
||||||
|
movdqu 3*16(%rsp,%rbx), MSG3_B
|
||||||
|
jmp .Lfinup2x_loop_have_data
|
||||||
|
|
||||||
|
// Prepare a padding block, either:
|
||||||
|
//
|
||||||
|
// {0x80, 0, 0, 0, ..., count (as __be64)}
|
||||||
|
// This is for a block aligned message.
|
||||||
|
//
|
||||||
|
// { 0, 0, 0, 0, ..., count (as __be64)}
|
||||||
|
// This is for a message whose length mod 64 is >= 56.
|
||||||
|
//
|
||||||
|
// Pre-swap the endianness of the words.
|
||||||
|
.Lfinup2x_finalize_countonly:
|
||||||
|
pxor MSG0_A, MSG0_A
|
||||||
|
jmp 1f
|
||||||
|
|
||||||
|
.Lfinup2x_finalize_blockaligned:
|
||||||
|
mov $0x80000000, %ebx
|
||||||
|
movd %ebx, MSG0_A
|
||||||
|
1:
|
||||||
|
pxor MSG1_A, MSG1_A
|
||||||
|
pxor MSG2_A, MSG2_A
|
||||||
|
ror $29, COUNT
|
||||||
|
movq COUNT, MSG3_A
|
||||||
|
pslldq $8, MSG3_A
|
||||||
|
movdqa MSG0_A, MSG0_B
|
||||||
|
pxor MSG1_B, MSG1_B
|
||||||
|
pxor MSG2_B, MSG2_B
|
||||||
|
movdqa MSG3_A, MSG3_B
|
||||||
|
mov $2, FINAL_STEP
|
||||||
|
jmp .Lfinup2x_loop_have_bswapped_data
|
||||||
|
|
||||||
|
.Lfinup2x_done:
|
||||||
|
// Write the two digests with all bytes in the correct order.
|
||||||
|
movdqa STATE0_A, TMP_A
|
||||||
|
movdqa STATE0_B, TMP_B
|
||||||
|
punpcklqdq STATE1_A, STATE0_A // GHEF
|
||||||
|
punpcklqdq STATE1_B, STATE0_B
|
||||||
|
punpckhqdq TMP_A, STATE1_A // ABCD
|
||||||
|
punpckhqdq TMP_B, STATE1_B
|
||||||
|
pshufd $0xB1, STATE0_A, STATE0_A // HGFE
|
||||||
|
pshufd $0xB1, STATE0_B, STATE0_B
|
||||||
|
pshufd $0x1B, STATE1_A, STATE1_A // DCBA
|
||||||
|
pshufd $0x1B, STATE1_B, STATE1_B
|
||||||
|
pshufb SHUF_MASK, STATE0_A
|
||||||
|
pshufb SHUF_MASK, STATE0_B
|
||||||
|
pshufb SHUF_MASK, STATE1_A
|
||||||
|
pshufb SHUF_MASK, STATE1_B
|
||||||
|
movdqu STATE0_A, 1*16(OUT1)
|
||||||
|
movdqu STATE0_B, 1*16(OUT2)
|
||||||
|
movdqu STATE1_A, 0*16(OUT1)
|
||||||
|
movdqu STATE1_B, 0*16(OUT2)
|
||||||
|
|
||||||
|
mov %rbp, %rsp
|
||||||
|
pop %rbp
|
||||||
|
pop %rbx
|
||||||
|
RET
|
||||||
|
SYM_FUNC_END(__sha256_ni_finup2x)
|
||||||
|
|
||||||
.section .rodata.cst256.K256, "aM", @progbits, 256
|
.section .rodata.cst256.K256, "aM", @progbits, 256
|
||||||
.align 64
|
.align 64
|
||||||
K256:
|
K256:
|
||||||
|
|
|
@ -107,12 +107,20 @@ static int sha256_ssse3_final(struct shash_desc *desc, u8 *out)
|
||||||
return sha256_ssse3_finup(desc, NULL, 0, out);
|
return sha256_ssse3_finup(desc, NULL, 0, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int sha256_ssse3_digest(struct shash_desc *desc, const u8 *data,
|
||||||
|
unsigned int len, u8 *out)
|
||||||
|
{
|
||||||
|
return sha256_base_init(desc) ?:
|
||||||
|
sha256_ssse3_finup(desc, data, len, out);
|
||||||
|
}
|
||||||
|
|
||||||
static struct shash_alg sha256_ssse3_algs[] = { {
|
static struct shash_alg sha256_ssse3_algs[] = { {
|
||||||
.digestsize = SHA256_DIGEST_SIZE,
|
.digestsize = SHA256_DIGEST_SIZE,
|
||||||
.init = sha256_base_init,
|
.init = sha256_base_init,
|
||||||
.update = sha256_ssse3_update,
|
.update = sha256_ssse3_update,
|
||||||
.final = sha256_ssse3_final,
|
.final = sha256_ssse3_final,
|
||||||
.finup = sha256_ssse3_finup,
|
.finup = sha256_ssse3_finup,
|
||||||
|
.digest = sha256_ssse3_digest,
|
||||||
.descsize = sizeof(struct sha256_state),
|
.descsize = sizeof(struct sha256_state),
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "sha256",
|
.cra_name = "sha256",
|
||||||
|
@ -172,12 +180,20 @@ static int sha256_avx_final(struct shash_desc *desc, u8 *out)
|
||||||
return sha256_avx_finup(desc, NULL, 0, out);
|
return sha256_avx_finup(desc, NULL, 0, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int sha256_avx_digest(struct shash_desc *desc, const u8 *data,
|
||||||
|
unsigned int len, u8 *out)
|
||||||
|
{
|
||||||
|
return sha256_base_init(desc) ?:
|
||||||
|
sha256_avx_finup(desc, data, len, out);
|
||||||
|
}
|
||||||
|
|
||||||
static struct shash_alg sha256_avx_algs[] = { {
|
static struct shash_alg sha256_avx_algs[] = { {
|
||||||
.digestsize = SHA256_DIGEST_SIZE,
|
.digestsize = SHA256_DIGEST_SIZE,
|
||||||
.init = sha256_base_init,
|
.init = sha256_base_init,
|
||||||
.update = sha256_avx_update,
|
.update = sha256_avx_update,
|
||||||
.final = sha256_avx_final,
|
.final = sha256_avx_final,
|
||||||
.finup = sha256_avx_finup,
|
.finup = sha256_avx_finup,
|
||||||
|
.digest = sha256_avx_digest,
|
||||||
.descsize = sizeof(struct sha256_state),
|
.descsize = sizeof(struct sha256_state),
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "sha256",
|
.cra_name = "sha256",
|
||||||
|
@ -248,12 +264,20 @@ static int sha256_avx2_final(struct shash_desc *desc, u8 *out)
|
||||||
return sha256_avx2_finup(desc, NULL, 0, out);
|
return sha256_avx2_finup(desc, NULL, 0, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int sha256_avx2_digest(struct shash_desc *desc, const u8 *data,
|
||||||
|
unsigned int len, u8 *out)
|
||||||
|
{
|
||||||
|
return sha256_base_init(desc) ?:
|
||||||
|
sha256_avx2_finup(desc, data, len, out);
|
||||||
|
}
|
||||||
|
|
||||||
static struct shash_alg sha256_avx2_algs[] = { {
|
static struct shash_alg sha256_avx2_algs[] = { {
|
||||||
.digestsize = SHA256_DIGEST_SIZE,
|
.digestsize = SHA256_DIGEST_SIZE,
|
||||||
.init = sha256_base_init,
|
.init = sha256_base_init,
|
||||||
.update = sha256_avx2_update,
|
.update = sha256_avx2_update,
|
||||||
.final = sha256_avx2_final,
|
.final = sha256_avx2_final,
|
||||||
.finup = sha256_avx2_finup,
|
.finup = sha256_avx2_finup,
|
||||||
|
.digest = sha256_avx2_digest,
|
||||||
.descsize = sizeof(struct sha256_state),
|
.descsize = sizeof(struct sha256_state),
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "sha256",
|
.cra_name = "sha256",
|
||||||
|
@ -306,6 +330,11 @@ static void unregister_sha256_avx2(void)
|
||||||
asmlinkage void sha256_ni_transform(struct sha256_state *digest,
|
asmlinkage void sha256_ni_transform(struct sha256_state *digest,
|
||||||
const u8 *data, int rounds);
|
const u8 *data, int rounds);
|
||||||
|
|
||||||
|
asmlinkage void __sha256_ni_finup2x(const struct sha256_state *sctx,
|
||||||
|
const u8 *data1, const u8 *data2, int len,
|
||||||
|
u8 out1[SHA256_DIGEST_SIZE],
|
||||||
|
u8 out2[SHA256_DIGEST_SIZE]);
|
||||||
|
|
||||||
static int sha256_ni_update(struct shash_desc *desc, const u8 *data,
|
static int sha256_ni_update(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
{
|
{
|
||||||
|
@ -323,13 +352,55 @@ static int sha256_ni_final(struct shash_desc *desc, u8 *out)
|
||||||
return sha256_ni_finup(desc, NULL, 0, out);
|
return sha256_ni_finup(desc, NULL, 0, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int sha256_ni_digest(struct shash_desc *desc, const u8 *data,
|
||||||
|
unsigned int len, u8 *out)
|
||||||
|
{
|
||||||
|
return sha256_base_init(desc) ?:
|
||||||
|
sha256_ni_finup(desc, data, len, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sha256_ni_finup_mb(struct shash_desc *desc,
|
||||||
|
const u8 * const data[], unsigned int len,
|
||||||
|
u8 * const outs[], unsigned int num_msgs)
|
||||||
|
{
|
||||||
|
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* num_msgs != 2 should not happen here, since this algorithm sets
|
||||||
|
* mb_max_msgs=2, and the crypto API handles num_msgs <= 1 before
|
||||||
|
* calling into the algorithm's finup_mb method.
|
||||||
|
*/
|
||||||
|
if (WARN_ON_ONCE(num_msgs != 2))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
if (unlikely(!crypto_simd_usable()))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
/* __sha256_ni_finup2x() assumes SHA256_BLOCK_SIZE <= len <= INT_MAX. */
|
||||||
|
if (unlikely(len < SHA256_BLOCK_SIZE || len > INT_MAX))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
/* __sha256_ni_finup2x() assumes the following offsets. */
|
||||||
|
BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
|
||||||
|
BUILD_BUG_ON(offsetof(struct sha256_state, count) != 32);
|
||||||
|
BUILD_BUG_ON(offsetof(struct sha256_state, buf) != 40);
|
||||||
|
|
||||||
|
kernel_fpu_begin();
|
||||||
|
__sha256_ni_finup2x(sctx, data[0], data[1], len, outs[0], outs[1]);
|
||||||
|
kernel_fpu_end();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct shash_alg sha256_ni_algs[] = { {
|
static struct shash_alg sha256_ni_algs[] = { {
|
||||||
.digestsize = SHA256_DIGEST_SIZE,
|
.digestsize = SHA256_DIGEST_SIZE,
|
||||||
.init = sha256_base_init,
|
.init = sha256_base_init,
|
||||||
.update = sha256_ni_update,
|
.update = sha256_ni_update,
|
||||||
.final = sha256_ni_final,
|
.final = sha256_ni_final,
|
||||||
.finup = sha256_ni_finup,
|
.finup = sha256_ni_finup,
|
||||||
|
.digest = sha256_ni_digest,
|
||||||
|
.finup_mb = sha256_ni_finup_mb,
|
||||||
.descsize = sizeof(struct sha256_state),
|
.descsize = sizeof(struct sha256_state),
|
||||||
|
.mb_max_msgs = 2,
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "sha256",
|
.cra_name = "sha256",
|
||||||
.cra_driver_name = "sha256-ni",
|
.cra_driver_name = "sha256-ni",
|
||||||
|
|
|
@ -57,6 +57,7 @@ config CRYPTO_FIPS_VERSION
|
||||||
config CRYPTO_FIPS140_MOD
|
config CRYPTO_FIPS140_MOD
|
||||||
tristate "Enable FIPS 140 cryptographic module"
|
tristate "Enable FIPS 140 cryptographic module"
|
||||||
depends on ARM64
|
depends on ARM64
|
||||||
|
depends on CC_IS_CLANG
|
||||||
depends on m
|
depends on m
|
||||||
select CRYPTO_FIPS140_MERGE_MOD_SECTIONS
|
select CRYPTO_FIPS140_MERGE_MOD_SECTIONS
|
||||||
help
|
help
|
||||||
|
|
|
@ -696,18 +696,8 @@ static bool update_fips140_library_routines(void)
|
||||||
return ret == 0;
|
return ret == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* Initialize the FIPS 140 module */
|
||||||
* Initialize the FIPS 140 module.
|
static int __init fips140_init(void)
|
||||||
*
|
|
||||||
* Note: this routine iterates over the contents of the initcall section, which
|
|
||||||
* consists of an array of function pointers that was emitted by the linker
|
|
||||||
* rather than the compiler. This means that these function pointers lack the
|
|
||||||
* usual CFI stubs that the compiler emits when CFI codegen is enabled. So
|
|
||||||
* let's disable CFI locally when handling the initcall array, to avoid
|
|
||||||
* surpises.
|
|
||||||
*/
|
|
||||||
static int __init __attribute__((__no_sanitize__("cfi")))
|
|
||||||
fips140_init(void)
|
|
||||||
{
|
{
|
||||||
const initcall_entry_t *initcall;
|
const initcall_entry_t *initcall;
|
||||||
|
|
||||||
|
@ -720,7 +710,7 @@ fips140_init(void)
|
||||||
for (initcall = fips140_initcalls_start + 1;
|
for (initcall = fips140_initcalls_start + 1;
|
||||||
initcall < &__fips140_initcalls_end;
|
initcall < &__fips140_initcalls_end;
|
||||||
initcall++) {
|
initcall++) {
|
||||||
int (*init)(void) = offset_to_ptr(initcall);
|
initcall_t init = offset_to_ptr(initcall);
|
||||||
int err = init();
|
int err = init();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -216,6 +216,53 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_shash_finup);
|
EXPORT_SYMBOL_GPL(crypto_shash_finup);
|
||||||
|
|
||||||
|
static noinline_for_stack int
|
||||||
|
shash_finup_mb_fallback(struct shash_desc *desc, const u8 * const data[],
|
||||||
|
unsigned int len, u8 * const outs[],
|
||||||
|
unsigned int num_msgs)
|
||||||
|
{
|
||||||
|
struct crypto_shash *tfm = desc->tfm;
|
||||||
|
SHASH_DESC_ON_STACK(desc2, tfm);
|
||||||
|
unsigned int i;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
for (i = 0; i < num_msgs - 1; i++) {
|
||||||
|
desc2->tfm = tfm;
|
||||||
|
memcpy(shash_desc_ctx(desc2), shash_desc_ctx(desc),
|
||||||
|
crypto_shash_descsize(tfm));
|
||||||
|
err = crypto_shash_finup(desc2, data[i], len, outs[i]);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
return crypto_shash_finup(desc, data[i], len, outs[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
int crypto_shash_finup_mb(struct shash_desc *desc, const u8 * const data[],
|
||||||
|
unsigned int len, u8 * const outs[],
|
||||||
|
unsigned int num_msgs)
|
||||||
|
{
|
||||||
|
struct shash_alg *alg = crypto_shash_alg(desc->tfm);
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (num_msgs == 1)
|
||||||
|
return crypto_shash_finup(desc, data[0], len, outs[0]);
|
||||||
|
|
||||||
|
if (num_msgs == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(num_msgs > alg->mb_max_msgs))
|
||||||
|
goto fallback;
|
||||||
|
|
||||||
|
err = alg->finup_mb(desc, data, len, outs, num_msgs);
|
||||||
|
if (unlikely(err == -EOPNOTSUPP))
|
||||||
|
goto fallback;
|
||||||
|
return err;
|
||||||
|
|
||||||
|
fallback:
|
||||||
|
return shash_finup_mb_fallback(desc, data, len, outs, num_msgs);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_shash_finup_mb);
|
||||||
|
|
||||||
static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data,
|
static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data,
|
||||||
unsigned int len, u8 *out)
|
unsigned int len, u8 *out)
|
||||||
{
|
{
|
||||||
|
@ -648,6 +695,17 @@ static int shash_prepare_alg(struct shash_alg *alg)
|
||||||
if ((alg->export && !alg->import) || (alg->import && !alg->export))
|
if ((alg->export && !alg->import) || (alg->import && !alg->export))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (alg->mb_max_msgs > 1) {
|
||||||
|
if (alg->mb_max_msgs > HASH_MAX_MB_MSGS)
|
||||||
|
return -EINVAL;
|
||||||
|
if (!alg->finup_mb)
|
||||||
|
return -EINVAL;
|
||||||
|
} else {
|
||||||
|
if (alg->finup_mb)
|
||||||
|
return -EINVAL;
|
||||||
|
alg->mb_max_msgs = 1;
|
||||||
|
}
|
||||||
|
|
||||||
err = hash_prepare_alg(&alg->halg);
|
err = hash_prepare_alg(&alg->halg);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -229,6 +229,7 @@ enum flush_type {
|
||||||
enum finalization_type {
|
enum finalization_type {
|
||||||
FINALIZATION_TYPE_FINAL, /* use final() */
|
FINALIZATION_TYPE_FINAL, /* use final() */
|
||||||
FINALIZATION_TYPE_FINUP, /* use finup() */
|
FINALIZATION_TYPE_FINUP, /* use finup() */
|
||||||
|
FINALIZATION_TYPE_FINUP_MB, /* use finup_mb() */
|
||||||
FINALIZATION_TYPE_DIGEST, /* use digest() */
|
FINALIZATION_TYPE_DIGEST, /* use digest() */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -292,6 +293,10 @@ struct test_sg_division {
|
||||||
* @key_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
|
* @key_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
|
||||||
* the @key_offset
|
* the @key_offset
|
||||||
* @finalization_type: what finalization function to use for hashes
|
* @finalization_type: what finalization function to use for hashes
|
||||||
|
* @multibuffer_index: random number used to generate the message index to use
|
||||||
|
* for finup_mb (when finup_mb is used).
|
||||||
|
* @multibuffer_count: random number used to generate the num_msgs parameter to
|
||||||
|
* finup_mb (when finup_mb is used).
|
||||||
* @nosimd: execute with SIMD disabled? Requires !CRYPTO_TFM_REQ_MAY_SLEEP.
|
* @nosimd: execute with SIMD disabled? Requires !CRYPTO_TFM_REQ_MAY_SLEEP.
|
||||||
*/
|
*/
|
||||||
struct testvec_config {
|
struct testvec_config {
|
||||||
|
@ -305,6 +310,8 @@ struct testvec_config {
|
||||||
bool iv_offset_relative_to_alignmask;
|
bool iv_offset_relative_to_alignmask;
|
||||||
bool key_offset_relative_to_alignmask;
|
bool key_offset_relative_to_alignmask;
|
||||||
enum finalization_type finalization_type;
|
enum finalization_type finalization_type;
|
||||||
|
unsigned int multibuffer_index;
|
||||||
|
unsigned int multibuffer_count;
|
||||||
bool nosimd;
|
bool nosimd;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -905,14 +912,20 @@ static unsigned int generate_random_length(struct rnd_state *rng,
|
||||||
|
|
||||||
switch (prandom_u32_below(rng, 4)) {
|
switch (prandom_u32_below(rng, 4)) {
|
||||||
case 0:
|
case 0:
|
||||||
return len % 64;
|
len %= 64;
|
||||||
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
return len % 256;
|
len %= 256;
|
||||||
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
return len % 1024;
|
len %= 1024;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return len;
|
break;
|
||||||
}
|
}
|
||||||
|
if (len && prandom_u32_below(rng, 4) == 0)
|
||||||
|
len = rounddown_pow_of_two(len);
|
||||||
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Flip a random bit in the given nonempty data buffer */
|
/* Flip a random bit in the given nonempty data buffer */
|
||||||
|
@ -1008,6 +1021,8 @@ static char *generate_random_sgl_divisions(struct rnd_state *rng,
|
||||||
|
|
||||||
if (div == &divs[max_divs - 1] || prandom_bool(rng))
|
if (div == &divs[max_divs - 1] || prandom_bool(rng))
|
||||||
this_len = remaining;
|
this_len = remaining;
|
||||||
|
else if (prandom_u32_below(rng, 4) == 0)
|
||||||
|
this_len = (remaining + 1) / 2;
|
||||||
else
|
else
|
||||||
this_len = prandom_u32_inclusive(rng, 1, remaining);
|
this_len = prandom_u32_inclusive(rng, 1, remaining);
|
||||||
div->proportion_of_total = this_len;
|
div->proportion_of_total = this_len;
|
||||||
|
@ -1105,15 +1120,23 @@ static void generate_random_testvec_config(struct rnd_state *rng,
|
||||||
p += scnprintf(p, end - p, " may_sleep");
|
p += scnprintf(p, end - p, " may_sleep");
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (prandom_u32_below(rng, 4)) {
|
switch (prandom_u32_below(rng, 8)) {
|
||||||
case 0:
|
case 0:
|
||||||
|
case 1:
|
||||||
cfg->finalization_type = FINALIZATION_TYPE_FINAL;
|
cfg->finalization_type = FINALIZATION_TYPE_FINAL;
|
||||||
p += scnprintf(p, end - p, " use_final");
|
p += scnprintf(p, end - p, " use_final");
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 2:
|
||||||
cfg->finalization_type = FINALIZATION_TYPE_FINUP;
|
cfg->finalization_type = FINALIZATION_TYPE_FINUP;
|
||||||
p += scnprintf(p, end - p, " use_finup");
|
p += scnprintf(p, end - p, " use_finup");
|
||||||
break;
|
break;
|
||||||
|
case 3:
|
||||||
|
case 4:
|
||||||
|
cfg->finalization_type = FINALIZATION_TYPE_FINUP_MB;
|
||||||
|
cfg->multibuffer_index = prandom_u32_state(rng);
|
||||||
|
cfg->multibuffer_count = prandom_u32_state(rng);
|
||||||
|
p += scnprintf(p, end - p, " use_finup_mb");
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
cfg->finalization_type = FINALIZATION_TYPE_DIGEST;
|
cfg->finalization_type = FINALIZATION_TYPE_DIGEST;
|
||||||
p += scnprintf(p, end - p, " use_digest");
|
p += scnprintf(p, end - p, " use_digest");
|
||||||
|
@ -1266,6 +1289,33 @@ static inline int check_shash_op(const char *op, int err,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int do_finup_mb(struct shash_desc *desc,
|
||||||
|
const u8 *data, unsigned int len, u8 *result,
|
||||||
|
const struct testvec_config *cfg,
|
||||||
|
const struct test_sglist *tsgl)
|
||||||
|
{
|
||||||
|
struct crypto_shash *tfm = desc->tfm;
|
||||||
|
const u8 *unused_data = tsgl->bufs[XBUFSIZE - 1];
|
||||||
|
u8 unused_result[HASH_MAX_DIGESTSIZE];
|
||||||
|
const u8 *datas[HASH_MAX_MB_MSGS];
|
||||||
|
u8 *outs[HASH_MAX_MB_MSGS];
|
||||||
|
unsigned int num_msgs;
|
||||||
|
unsigned int msg_idx;
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
num_msgs = 1 + (cfg->multibuffer_count % crypto_shash_mb_max_msgs(tfm));
|
||||||
|
if (WARN_ON_ONCE(num_msgs > HASH_MAX_MB_MSGS))
|
||||||
|
return -EINVAL;
|
||||||
|
msg_idx = cfg->multibuffer_index % num_msgs;
|
||||||
|
for (i = 0; i < num_msgs; i++) {
|
||||||
|
datas[i] = unused_data;
|
||||||
|
outs[i] = unused_result;
|
||||||
|
}
|
||||||
|
datas[msg_idx] = data;
|
||||||
|
outs[msg_idx] = result;
|
||||||
|
return crypto_shash_finup_mb(desc, datas, len, outs, num_msgs);
|
||||||
|
}
|
||||||
|
|
||||||
/* Test one hash test vector in one configuration, using the shash API */
|
/* Test one hash test vector in one configuration, using the shash API */
|
||||||
static int test_shash_vec_cfg(const struct hash_testvec *vec,
|
static int test_shash_vec_cfg(const struct hash_testvec *vec,
|
||||||
const char *vec_name,
|
const char *vec_name,
|
||||||
|
@ -1343,7 +1393,10 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
|
||||||
goto result_ready;
|
goto result_ready;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Using init(), zero or more update(), then final() or finup() */
|
/*
|
||||||
|
* Using init(), zero or more update(), then either final(), finup(), or
|
||||||
|
* finup_mb().
|
||||||
|
*/
|
||||||
|
|
||||||
if (cfg->nosimd)
|
if (cfg->nosimd)
|
||||||
crypto_disable_simd_for_test();
|
crypto_disable_simd_for_test();
|
||||||
|
@ -1355,12 +1408,14 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
for (i = 0; i < tsgl->nents; i++) {
|
for (i = 0; i < tsgl->nents; i++) {
|
||||||
|
const u8 *data = sg_virt(&tsgl->sgl[i]);
|
||||||
|
unsigned int len = tsgl->sgl[i].length;
|
||||||
|
|
||||||
if (i + 1 == tsgl->nents &&
|
if (i + 1 == tsgl->nents &&
|
||||||
cfg->finalization_type == FINALIZATION_TYPE_FINUP) {
|
cfg->finalization_type == FINALIZATION_TYPE_FINUP) {
|
||||||
if (divs[i]->nosimd)
|
if (divs[i]->nosimd)
|
||||||
crypto_disable_simd_for_test();
|
crypto_disable_simd_for_test();
|
||||||
err = crypto_shash_finup(desc, sg_virt(&tsgl->sgl[i]),
|
err = crypto_shash_finup(desc, data, len, result);
|
||||||
tsgl->sgl[i].length, result);
|
|
||||||
if (divs[i]->nosimd)
|
if (divs[i]->nosimd)
|
||||||
crypto_reenable_simd_for_test();
|
crypto_reenable_simd_for_test();
|
||||||
err = check_shash_op("finup", err, driver, vec_name,
|
err = check_shash_op("finup", err, driver, vec_name,
|
||||||
|
@ -1369,10 +1424,22 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
|
||||||
return err;
|
return err;
|
||||||
goto result_ready;
|
goto result_ready;
|
||||||
}
|
}
|
||||||
|
if (i + 1 == tsgl->nents &&
|
||||||
|
cfg->finalization_type == FINALIZATION_TYPE_FINUP_MB) {
|
||||||
|
if (divs[i]->nosimd)
|
||||||
|
crypto_disable_simd_for_test();
|
||||||
|
err = do_finup_mb(desc, data, len, result, cfg, tsgl);
|
||||||
|
if (divs[i]->nosimd)
|
||||||
|
crypto_reenable_simd_for_test();
|
||||||
|
err = check_shash_op("finup_mb", err, driver, vec_name,
|
||||||
|
cfg);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
goto result_ready;
|
||||||
|
}
|
||||||
if (divs[i]->nosimd)
|
if (divs[i]->nosimd)
|
||||||
crypto_disable_simd_for_test();
|
crypto_disable_simd_for_test();
|
||||||
err = crypto_shash_update(desc, sg_virt(&tsgl->sgl[i]),
|
err = crypto_shash_update(desc, data, len);
|
||||||
tsgl->sgl[i].length);
|
|
||||||
if (divs[i]->nosimd)
|
if (divs[i]->nosimd)
|
||||||
crypto_reenable_simd_for_test();
|
crypto_reenable_simd_for_test();
|
||||||
err = check_shash_op("update", err, driver, vec_name, cfg);
|
err = check_shash_op("update", err, driver, vec_name, cfg);
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <linux/swap.h>
|
#include <linux/swap.h>
|
||||||
#include <linux/compaction.h>
|
#include <linux/compaction.h>
|
||||||
#include <linux/cma.h>
|
#include <linux/cma.h>
|
||||||
|
#include "../mm/slab.h"
|
||||||
|
|
||||||
struct ads_entry {
|
struct ads_entry {
|
||||||
char *name;
|
char *name;
|
||||||
|
@ -50,6 +51,8 @@ static const struct ads_entry ads_entries[ADS_END] = {
|
||||||
ADS_ENTRY(ADS_COMPACT_PAGES, try_to_compact_pages),
|
ADS_ENTRY(ADS_COMPACT_PAGES, try_to_compact_pages),
|
||||||
ADS_ENTRY(ADS_SHOW_MEM, __show_mem),
|
ADS_ENTRY(ADS_SHOW_MEM, __show_mem),
|
||||||
ADS_ENTRY(ADS_TOTAL_CMA, &totalcma_pages),
|
ADS_ENTRY(ADS_TOTAL_CMA, &totalcma_pages),
|
||||||
|
ADS_ENTRY(ADS_SLAB_CACHES, &slab_caches),
|
||||||
|
ADS_ENTRY(ADS_SLAB_MUTEX, &slab_mutex),
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -139,8 +139,6 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
|
||||||
int minor, ret;
|
int minor, ret;
|
||||||
struct dentry *dentry, *root;
|
struct dentry *dentry, *root;
|
||||||
rust_binder_device device = NULL;
|
rust_binder_device device = NULL;
|
||||||
char *name = NULL;
|
|
||||||
size_t name_len;
|
|
||||||
struct inode *inode = NULL;
|
struct inode *inode = NULL;
|
||||||
struct super_block *sb = ref_inode->i_sb;
|
struct super_block *sb = ref_inode->i_sb;
|
||||||
struct binderfs_info *info = sb->s_fs_info;
|
struct binderfs_info *info = sb->s_fs_info;
|
||||||
|
@ -168,13 +166,8 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
|
req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
|
||||||
name_len = strlen(req->name);
|
|
||||||
/* Make sure to include terminating NUL byte */
|
|
||||||
name = kmemdup(req->name, name_len + 1, GFP_KERNEL);
|
|
||||||
if (!name)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
device = rust_binder_new_device(name);
|
device = rust_binder_new_device(req->name);
|
||||||
if (!device)
|
if (!device)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
@ -202,7 +195,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
|
||||||
inode_lock(d_inode(root));
|
inode_lock(d_inode(root));
|
||||||
|
|
||||||
/* look it up */
|
/* look it up */
|
||||||
dentry = lookup_one_len(name, root, name_len);
|
dentry = lookup_one_len(req->name, root, strlen(req->name));
|
||||||
if (IS_ERR(dentry)) {
|
if (IS_ERR(dentry)) {
|
||||||
inode_unlock(d_inode(root));
|
inode_unlock(d_inode(root));
|
||||||
ret = PTR_ERR(dentry);
|
ret = PTR_ERR(dentry);
|
||||||
|
@ -225,7 +218,6 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
kfree(name);
|
|
||||||
rust_binder_remove_device(device);
|
rust_binder_remove_device(device);
|
||||||
mutex_lock(&binderfs_minors_mutex);
|
mutex_lock(&binderfs_minors_mutex);
|
||||||
--info->device_count;
|
--info->device_count;
|
||||||
|
|
|
@ -848,9 +848,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
||||||
|
|
||||||
alloc->buffer = vma->vm_start;
|
alloc->buffer = vma->vm_start;
|
||||||
|
|
||||||
alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
|
alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
|
||||||
sizeof(alloc->pages[0]),
|
sizeof(alloc->pages[0]),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (alloc->pages == NULL) {
|
if (alloc->pages == NULL) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
failure_string = "alloc page array";
|
failure_string = "alloc page array";
|
||||||
|
@ -881,7 +881,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_alloc_buf_struct_failed:
|
err_alloc_buf_struct_failed:
|
||||||
kfree(alloc->pages);
|
kvfree(alloc->pages);
|
||||||
alloc->pages = NULL;
|
alloc->pages = NULL;
|
||||||
err_alloc_pages_failed:
|
err_alloc_pages_failed:
|
||||||
alloc->buffer = 0;
|
alloc->buffer = 0;
|
||||||
|
@ -953,7 +953,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
||||||
__free_page(alloc->pages[i].page_ptr);
|
__free_page(alloc->pages[i].page_ptr);
|
||||||
page_count++;
|
page_count++;
|
||||||
}
|
}
|
||||||
kfree(alloc->pages);
|
kvfree(alloc->pages);
|
||||||
}
|
}
|
||||||
spin_unlock(&alloc->lock);
|
spin_unlock(&alloc->lock);
|
||||||
if (alloc->mm)
|
if (alloc->mm)
|
||||||
|
|
|
@ -31,7 +31,6 @@
|
||||||
#include <trace/hooks/madvise.h>
|
#include <trace/hooks/madvise.h>
|
||||||
#include <trace/hooks/iommu.h>
|
#include <trace/hooks/iommu.h>
|
||||||
#include <trace/hooks/net.h>
|
#include <trace/hooks/net.h>
|
||||||
#include <trace/hooks/pm_domain.h>
|
|
||||||
#include <trace/hooks/cpuidle_psci.h>
|
#include <trace/hooks/cpuidle_psci.h>
|
||||||
#include <trace/hooks/vmscan.h>
|
#include <trace/hooks/vmscan.h>
|
||||||
#include <trace/hooks/avc.h>
|
#include <trace/hooks/avc.h>
|
||||||
|
@ -75,13 +74,12 @@
|
||||||
#include <trace/hooks/psi.h>
|
#include <trace/hooks/psi.h>
|
||||||
#include <trace/hooks/blk.h>
|
#include <trace/hooks/blk.h>
|
||||||
#include <trace/hooks/suspend.h>
|
#include <trace/hooks/suspend.h>
|
||||||
|
#include <trace/hooks/fsnotify.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Export tracepoints that act as a bare tracehook (ie: have no trace event
|
* Export tracepoints that act as a bare tracehook (ie: have no trace event
|
||||||
* associated with them) to allow external modules to probe them.
|
* associated with them) to allow external modules to probe them.
|
||||||
*/
|
*/
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sk_alloc);
|
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sk_free);
|
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_tcp_sendmsg);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_tcp_sendmsg);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_tcp_recvmsg);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_tcp_recvmsg);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_udp_sendmsg);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_udp_sendmsg);
|
||||||
|
@ -92,6 +90,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_tcp_select_window);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_inet_sock_create);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_inet_sock_create);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_inet_sock_release);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_inet_sock_release);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_bpf_skb_load_bytes);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_bpf_skb_load_bytes);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_tcp_rcv_spurious_retrans);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tcp_rtt_estimator);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tcp_rtt_estimator);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_udp_enqueue_schedule_skb);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_udp_enqueue_schedule_skb);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_build_skb_around);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_build_skb_around);
|
||||||
|
@ -138,6 +137,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freq_table_limits);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_resolve_freq);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_resolve_freq);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_fast_switch);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_fast_switch);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_target);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_target);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_online);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_attach);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_attach);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_online);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_online);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuinfo_c_show);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuinfo_c_show);
|
||||||
|
@ -146,6 +146,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_mem_available_adjust);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_meminfo_adjust);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_meminfo_adjust);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_thermal_pm_notify_suspend);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_thermal_pm_notify_suspend);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_fill_prdt);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_fill_prdt);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ufs_reprogram_all_keys);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ufs_complete_init);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_prepare_command);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_prepare_command);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_update_sysfs);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_update_sysfs);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_command);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_command);
|
||||||
|
@ -161,7 +163,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_uic_command);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_tm_command);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_tm_command);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_check_int_errors);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_check_int_errors);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_update_sdev);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_update_sdev);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_clock_scaling);
|
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cgroup_attach);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cgroup_attach);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_setup_dma_ops);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_setup_dma_ops);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_alloc_insert_iova);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_alloc_insert_iova);
|
||||||
|
@ -173,7 +174,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_psci_cpu_suspend);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_iovad_init_alloc_algo);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_iovad_init_alloc_algo);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_limit_align_shift);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_limit_align_shift);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ptype_head);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ptype_head);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_allow_domain_state);
|
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpuidle_psci_enter);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpuidle_psci_enter);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpuidle_psci_exit);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpuidle_psci_exit);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpufreq_transition);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpufreq_transition);
|
||||||
|
@ -264,7 +264,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sha256);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_aes_expandkey);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_aes_expandkey);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_aes_encrypt);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_aes_encrypt);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_aes_decrypt);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_aes_decrypt);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmc_update_mmc_queue);
|
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_downgrade_wake_finish);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_downgrade_wake_finish);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_wake_finish);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_wake_finish);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_meminfo_proc_show);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_meminfo_proc_show);
|
||||||
|
@ -345,6 +344,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_new_ref);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_del_ref);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_del_ref);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_logbuf_pr_cont);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_logbuf_pr_cont);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_slowpath);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_slowpath);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_slowpath_start);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_slowpath_end);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmap_region);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmap_region);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tcp_write_timeout_estab_retrans);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tcp_write_timeout_estab_retrans);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tcp_connect);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tcp_connect);
|
||||||
|
@ -360,6 +361,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_direct_reclaim_enter);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_direct_reclaim_exit);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_direct_reclaim_exit);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_may_oom_exit);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_may_oom_exit);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_vmscan_kswapd_done);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_vmscan_kswapd_done);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_folio_list);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_inode_lru_isolate);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_invalidate_mapping_pagevec);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_customize_alloc_gfp);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_customize_alloc_gfp);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_capacity_show);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_capacity_show);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shmem_swapin_folio);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shmem_swapin_folio);
|
||||||
|
@ -376,12 +380,15 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_percpu_rwsem_down_read);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_percpu_rwsem_up_write);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_percpu_rwsem_up_write);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_percpu_rwsem_wait_complete);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_percpu_rwsem_wait_complete);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_read_trylock_failed);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_read_trylock_failed);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sk_alloc);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sk_free);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sd_init_unmap_multi_segment);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sd_init_unmap_multi_segment);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sd_setup_unmap_multi_segment);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sd_setup_unmap_multi_segment);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_scan_type);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_scan_type);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_queue_request_and_unlock);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_queue_request_and_unlock);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_fuse_request_end);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_fuse_request_end);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuset_fork);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuset_fork);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_pageout_swap_entry);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_swapin_walk_pmd_entry);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_swapin_walk_pmd_entry);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_process_madvise);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_process_madvise);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_smaps_pte_entry);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_smaps_pte_entry);
|
||||||
|
@ -415,14 +422,26 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mark_page_accessed);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_cold_pageout_skip);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_cold_pageout_skip);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_event);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_event);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_group);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_group);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_update_triggers);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmc_sdio_pm_flag_set);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmc_sdio_pm_flag_set);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_read_lazy_flag);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_read_lazy_flag);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_tsk_need_resched_lazy);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_tsk_need_resched_lazy);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_usb_dev_suspend);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_usb_dev_suspend);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_usb_dev_resume);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_usb_dev_resume);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_bd_link_disk_holder);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_bd_link_disk_holder);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_new_mount_fc);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_fill_rwbs);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_f2fs_ra_op_flags);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_use_amu_fie);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_use_amu_fie);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_resume_begin);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_resume_begin);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_resume_end);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_resume_end);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_early_resume_begin);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_early_resume_begin);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_enable_thermal_genl_check);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_enable_thermal_genl_check);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_pr_set_vma_name_bypass);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rebalance_anon_lru_bypass);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_debug_show_areas);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_contig_range_not_isolated);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_warn_alloc_tune_ratelimit);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_warn_alloc_show_mem_bypass);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_use_vm_swappiness);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_fsnotify_open);
|
||||||
|
|
|
@ -12,8 +12,6 @@
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
#include <linux/ktime.h>
|
#include <linux/ktime.h>
|
||||||
|
|
||||||
#include <trace/hooks/pm_domain.h>
|
|
||||||
|
|
||||||
static int dev_update_qos_constraint(struct device *dev, void *data)
|
static int dev_update_qos_constraint(struct device *dev, void *data)
|
||||||
{
|
{
|
||||||
s64 *constraint_ns_p = data;
|
s64 *constraint_ns_p = data;
|
||||||
|
@ -181,11 +179,6 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
|
||||||
struct pm_domain_data *pdd;
|
struct pm_domain_data *pdd;
|
||||||
s64 min_off_time_ns;
|
s64 min_off_time_ns;
|
||||||
s64 off_on_time_ns;
|
s64 off_on_time_ns;
|
||||||
bool allow = true;
|
|
||||||
|
|
||||||
trace_android_vh_allow_domain_state(genpd, state, &allow);
|
|
||||||
if (!allow)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
off_on_time_ns = genpd->states[state].power_off_latency_ns +
|
off_on_time_ns = genpd->states[state].power_off_latency_ns +
|
||||||
genpd->states[state].power_on_latency_ns;
|
genpd->states[state].power_on_latency_ns;
|
||||||
|
|
|
@ -1497,6 +1497,8 @@ static int cpufreq_online(unsigned int cpu)
|
||||||
goto out_destroy_policy;
|
goto out_destroy_policy;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
trace_android_vh_cpufreq_online(policy);
|
||||||
|
|
||||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||||
CPUFREQ_CREATE_POLICY, policy);
|
CPUFREQ_CREATE_POLICY, policy);
|
||||||
}
|
}
|
||||||
|
|
|
@ -390,6 +390,24 @@ static char *dma_heap_devnode(const struct device *dev, umode_t *mode)
|
||||||
return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev));
|
return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
long dma_heap_try_get_pool_size_kb(void)
|
||||||
|
{
|
||||||
|
struct dma_heap *heap;
|
||||||
|
u64 total_pool_size = 0;
|
||||||
|
|
||||||
|
if (!mutex_trylock(&heap_list_lock))
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
list_for_each_entry(heap, &heap_list, list) {
|
||||||
|
if (heap->ops->get_pool_size)
|
||||||
|
total_pool_size += heap->ops->get_pool_size(heap);
|
||||||
|
}
|
||||||
|
mutex_unlock(&heap_list_lock);
|
||||||
|
|
||||||
|
return (long)(total_pool_size / 1024);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(dma_heap_try_get_pool_size_kb);
|
||||||
|
|
||||||
static ssize_t total_pools_kb_show(struct kobject *kobj,
|
static ssize_t total_pools_kb_show(struct kobject *kobj,
|
||||||
struct kobj_attribute *attr, char *buf)
|
struct kobj_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
|
|
|
@ -41,6 +41,7 @@ extern const struct pkvm_module_ops *mod_ops;
|
||||||
#define kvm_iommu_donate_pages_atomic(x) CALL_FROM_OPS(iommu_donate_pages_atomic, x)
|
#define kvm_iommu_donate_pages_atomic(x) CALL_FROM_OPS(iommu_donate_pages_atomic, x)
|
||||||
#define kvm_iommu_reclaim_pages_atomic(x, y) CALL_FROM_OPS(iommu_reclaim_pages_atomic, x, y)
|
#define kvm_iommu_reclaim_pages_atomic(x, y) CALL_FROM_OPS(iommu_reclaim_pages_atomic, x, y)
|
||||||
#define kvm_iommu_snapshot_host_stage2(x) CALL_FROM_OPS(iommu_snapshot_host_stage2, x)
|
#define kvm_iommu_snapshot_host_stage2(x) CALL_FROM_OPS(iommu_snapshot_host_stage2, x)
|
||||||
|
#define kvm_iommu_flush_unmap_cache(x) CALL_FROM_OPS(iommu_flush_unmap_cache, x)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* __ARM_SMMU_V3_MODULE__ */
|
#endif /* __ARM_SMMU_V3_MODULE__ */
|
||||||
|
|
|
@ -551,13 +551,13 @@ static void smmu_tlb_flush_all(void *cookie)
|
||||||
hyp_read_lock(&smmu_domain->lock);
|
hyp_read_lock(&smmu_domain->lock);
|
||||||
list_for_each_entry(iommu_node, &smmu_domain->iommu_list, list) {
|
list_for_each_entry(iommu_node, &smmu_domain->iommu_list, list) {
|
||||||
smmu = to_smmu(iommu_node->iommu);
|
smmu = to_smmu(iommu_node->iommu);
|
||||||
hyp_spin_lock(&smmu->iommu.lock);
|
kvm_iommu_lock(&smmu->iommu);
|
||||||
if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on) {
|
if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on) {
|
||||||
hyp_spin_unlock(&smmu->iommu.lock);
|
kvm_iommu_unlock(&smmu->iommu);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
WARN_ON(smmu_send_cmd(smmu, &cmd));
|
WARN_ON(smmu_send_cmd(smmu, &cmd));
|
||||||
hyp_spin_unlock(&smmu->iommu.lock);
|
kvm_iommu_unlock(&smmu->iommu);
|
||||||
}
|
}
|
||||||
hyp_read_unlock(&smmu_domain->lock);
|
hyp_read_unlock(&smmu_domain->lock);
|
||||||
}
|
}
|
||||||
|
@ -572,7 +572,7 @@ static int smmu_tlb_inv_range_smmu(struct hyp_arm_smmu_v3_device *smmu,
|
||||||
size_t inv_range = granule;
|
size_t inv_range = granule;
|
||||||
struct hyp_arm_smmu_v3_domain *smmu_domain = domain->priv;
|
struct hyp_arm_smmu_v3_domain *smmu_domain = domain->priv;
|
||||||
|
|
||||||
hyp_spin_lock(&smmu->iommu.lock);
|
kvm_iommu_lock(&smmu->iommu);
|
||||||
if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on)
|
if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on)
|
||||||
goto out_ret;
|
goto out_ret;
|
||||||
|
|
||||||
|
@ -633,7 +633,7 @@ static int smmu_tlb_inv_range_smmu(struct hyp_arm_smmu_v3_device *smmu,
|
||||||
|
|
||||||
ret = smmu_sync_cmd(smmu);
|
ret = smmu_sync_cmd(smmu);
|
||||||
out_ret:
|
out_ret:
|
||||||
hyp_spin_unlock(&smmu->iommu.lock);
|
kvm_iommu_unlock(&smmu->iommu);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -997,7 +997,7 @@ static int smmu_attach_dev(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_dom
|
||||||
struct domain_iommu_node *iommu_node = NULL;
|
struct domain_iommu_node *iommu_node = NULL;
|
||||||
|
|
||||||
hyp_write_lock(&smmu_domain->lock);
|
hyp_write_lock(&smmu_domain->lock);
|
||||||
hyp_spin_lock(&iommu->lock);
|
kvm_iommu_lock(iommu);
|
||||||
dst = smmu_get_ste_ptr(smmu, sid);
|
dst = smmu_get_ste_ptr(smmu, sid);
|
||||||
if (!dst)
|
if (!dst)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
@ -1087,7 +1087,7 @@ static int smmu_attach_dev(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_dom
|
||||||
out_unlock:
|
out_unlock:
|
||||||
if (ret && iommu_node)
|
if (ret && iommu_node)
|
||||||
hyp_free(iommu_node);
|
hyp_free(iommu_node);
|
||||||
hyp_spin_unlock(&iommu->lock);
|
kvm_iommu_unlock(iommu);
|
||||||
hyp_write_unlock(&smmu_domain->lock);
|
hyp_write_unlock(&smmu_domain->lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1103,7 +1103,7 @@ static int smmu_detach_dev(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_dom
|
||||||
u64 *cd_table, *cd;
|
u64 *cd_table, *cd;
|
||||||
|
|
||||||
hyp_write_lock(&smmu_domain->lock);
|
hyp_write_lock(&smmu_domain->lock);
|
||||||
hyp_spin_lock(&iommu->lock);
|
kvm_iommu_lock(iommu);
|
||||||
dst = smmu_get_ste_ptr(smmu, sid);
|
dst = smmu_get_ste_ptr(smmu, sid);
|
||||||
if (!dst)
|
if (!dst)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
@ -1145,7 +1145,7 @@ static int smmu_detach_dev(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_dom
|
||||||
|
|
||||||
smmu_put_ref_domain(smmu, smmu_domain);
|
smmu_put_ref_domain(smmu, smmu_domain);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
hyp_spin_unlock(&iommu->lock);
|
kvm_iommu_unlock(iommu);
|
||||||
hyp_write_unlock(&smmu_domain->lock);
|
hyp_write_unlock(&smmu_domain->lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1300,14 +1300,20 @@ static void kvm_iommu_unmap_walker(struct io_pgtable_ctxt *ctxt)
|
||||||
struct kvm_iommu_walk_data *data = (struct kvm_iommu_walk_data *)ctxt->arg;
|
struct kvm_iommu_walk_data *data = (struct kvm_iommu_walk_data *)ctxt->arg;
|
||||||
struct kvm_iommu_paddr_cache *cache = data->cache;
|
struct kvm_iommu_paddr_cache *cache = data->cache;
|
||||||
|
|
||||||
cache->paddr[cache->ptr] = ctxt->addr;
|
|
||||||
cache->pgsize[cache->ptr++] = ctxt->size;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It is guaranteed unmap is called with max of the cache size,
|
* It is guaranteed unmap is called with max of the cache size,
|
||||||
* see kvm_iommu_unmap_pages()
|
* see kvm_iommu_unmap_pages()
|
||||||
*/
|
*/
|
||||||
WARN_ON(cache->ptr == KVM_IOMMU_PADDR_CACHE_MAX);
|
cache->paddr[cache->ptr] = ctxt->addr;
|
||||||
|
cache->pgsize[cache->ptr++] = ctxt->size;
|
||||||
|
|
||||||
|
/* Make more space. */
|
||||||
|
if(cache->ptr == KVM_IOMMU_PADDR_CACHE_MAX) {
|
||||||
|
/* Must invalidate TLB first. */
|
||||||
|
smmu_iotlb_sync(data->cookie, data->iotlb_gather);
|
||||||
|
iommu_iotlb_gather_init(data->iotlb_gather);
|
||||||
|
kvm_iommu_flush_unmap_cache(cache);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t smmu_unmap_pages(struct kvm_hyp_iommu_domain *domain, unsigned long iova,
|
static size_t smmu_unmap_pages(struct kvm_hyp_iommu_domain *domain, unsigned long iova,
|
||||||
|
|
|
@ -186,13 +186,11 @@ error:
|
||||||
static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
|
static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
u8 *want_digest, u8 *data)
|
u8 *want_digest, u8 *data)
|
||||||
{
|
{
|
||||||
if (unlikely(verity_hash(v, verity_io_hash_req(v, io),
|
if (unlikely(verity_hash(v, io, data, 1 << v->data_dev_block_bits,
|
||||||
data, 1 << v->data_dev_block_bits,
|
io->tmp_digest, true)))
|
||||||
verity_io_real_digest(v, io), true)))
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return memcmp(verity_io_real_digest(v, io), want_digest,
|
return memcmp(io->tmp_digest, want_digest, v->digest_size) != 0;
|
||||||
v->digest_size) != 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -363,7 +361,7 @@ static void fec_init_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
|
||||||
*/
|
*/
|
||||||
static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
|
static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
struct dm_verity_fec_io *fio, u64 rsb, u64 offset,
|
struct dm_verity_fec_io *fio, u64 rsb, u64 offset,
|
||||||
bool use_erasures)
|
const u8 *want_digest, bool use_erasures)
|
||||||
{
|
{
|
||||||
int r, neras = 0;
|
int r, neras = 0;
|
||||||
unsigned int pos;
|
unsigned int pos;
|
||||||
|
@ -388,14 +386,12 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Always re-validate the corrected block against the expected hash */
|
/* Always re-validate the corrected block against the expected hash */
|
||||||
r = verity_hash(v, verity_io_hash_req(v, io), fio->output,
|
r = verity_hash(v, io, fio->output, 1 << v->data_dev_block_bits,
|
||||||
1 << v->data_dev_block_bits,
|
io->tmp_digest, true);
|
||||||
verity_io_real_digest(v, io), true);
|
|
||||||
if (unlikely(r < 0))
|
if (unlikely(r < 0))
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (memcmp(verity_io_real_digest(v, io), verity_io_want_digest(v, io),
|
if (memcmp(io->tmp_digest, want_digest, v->digest_size)) {
|
||||||
v->digest_size)) {
|
|
||||||
DMERR_LIMIT("%s: FEC %llu: failed to correct (%d erasures)",
|
DMERR_LIMIT("%s: FEC %llu: failed to correct (%d erasures)",
|
||||||
v->data_dev->name, (unsigned long long)rsb, neras);
|
v->data_dev->name, (unsigned long long)rsb, neras);
|
||||||
return -EILSEQ;
|
return -EILSEQ;
|
||||||
|
@ -404,24 +400,10 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fec_bv_copy(struct dm_verity *v, struct dm_verity_io *io, u8 *data,
|
/* Correct errors in a block. Copies corrected block to dest. */
|
||||||
size_t len)
|
|
||||||
{
|
|
||||||
struct dm_verity_fec_io *fio = fec_io(io);
|
|
||||||
|
|
||||||
memcpy(data, &fio->output[fio->output_pos], len);
|
|
||||||
fio->output_pos += len;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Correct errors in a block. Copies corrected block to dest if non-NULL,
|
|
||||||
* otherwise to a bio_vec starting from iter.
|
|
||||||
*/
|
|
||||||
int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
|
int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
enum verity_block_type type, sector_t block, u8 *dest,
|
enum verity_block_type type, const u8 *want_digest,
|
||||||
struct bvec_iter *iter)
|
sector_t block, u8 *dest)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
struct dm_verity_fec_io *fio = fec_io(io);
|
struct dm_verity_fec_io *fio = fec_io(io);
|
||||||
|
@ -464,19 +446,14 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
* them first. Do a second attempt with erasures if the corruption is
|
* them first. Do a second attempt with erasures if the corruption is
|
||||||
* bad enough.
|
* bad enough.
|
||||||
*/
|
*/
|
||||||
r = fec_decode_rsb(v, io, fio, rsb, offset, false);
|
r = fec_decode_rsb(v, io, fio, rsb, offset, want_digest, false);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
r = fec_decode_rsb(v, io, fio, rsb, offset, true);
|
r = fec_decode_rsb(v, io, fio, rsb, offset, want_digest, true);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dest)
|
memcpy(dest, fio->output, 1 << v->data_dev_block_bits);
|
||||||
memcpy(dest, fio->output, 1 << v->data_dev_block_bits);
|
|
||||||
else if (iter) {
|
|
||||||
fio->output_pos = 0;
|
|
||||||
r = verity_for_bv_block(v, io, iter, fec_bv_copy);
|
|
||||||
}
|
|
||||||
|
|
||||||
done:
|
done:
|
||||||
fio->level--;
|
fio->level--;
|
||||||
|
|
|
@ -57,7 +57,6 @@ struct dm_verity_fec_io {
|
||||||
u8 *bufs[DM_VERITY_FEC_BUF_MAX]; /* bufs for deinterleaving */
|
u8 *bufs[DM_VERITY_FEC_BUF_MAX]; /* bufs for deinterleaving */
|
||||||
unsigned int nbufs; /* number of buffers allocated */
|
unsigned int nbufs; /* number of buffers allocated */
|
||||||
u8 *output; /* buffer for corrected output */
|
u8 *output; /* buffer for corrected output */
|
||||||
size_t output_pos;
|
|
||||||
unsigned int level; /* recursion level */
|
unsigned int level; /* recursion level */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -69,8 +68,8 @@ struct dm_verity_fec_io {
|
||||||
extern bool verity_fec_is_enabled(struct dm_verity *v);
|
extern bool verity_fec_is_enabled(struct dm_verity *v);
|
||||||
|
|
||||||
extern int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
|
extern int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
enum verity_block_type type, sector_t block,
|
enum verity_block_type type, const u8 *want_digest,
|
||||||
u8 *dest, struct bvec_iter *iter);
|
sector_t block, u8 *dest);
|
||||||
|
|
||||||
extern unsigned int verity_fec_status_table(struct dm_verity *v, unsigned int sz,
|
extern unsigned int verity_fec_status_table(struct dm_verity *v, unsigned int sz,
|
||||||
char *result, unsigned int maxlen);
|
char *result, unsigned int maxlen);
|
||||||
|
@ -100,8 +99,8 @@ static inline bool verity_fec_is_enabled(struct dm_verity *v)
|
||||||
static inline int verity_fec_decode(struct dm_verity *v,
|
static inline int verity_fec_decode(struct dm_verity *v,
|
||||||
struct dm_verity_io *io,
|
struct dm_verity_io *io,
|
||||||
enum verity_block_type type,
|
enum verity_block_type type,
|
||||||
sector_t block, u8 *dest,
|
const u8 *want_digest,
|
||||||
struct bvec_iter *iter)
|
sector_t block, u8 *dest)
|
||||||
{
|
{
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,6 +48,9 @@ module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, 0644);
|
||||||
|
|
||||||
static DEFINE_STATIC_KEY_FALSE(use_tasklet_enabled);
|
static DEFINE_STATIC_KEY_FALSE(use_tasklet_enabled);
|
||||||
|
|
||||||
|
/* Is at least one dm-verity instance using ahash_tfm instead of shash_tfm? */
|
||||||
|
static DEFINE_STATIC_KEY_FALSE(ahash_enabled);
|
||||||
|
|
||||||
struct dm_verity_prefetch_work {
|
struct dm_verity_prefetch_work {
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
struct dm_verity *v;
|
struct dm_verity *v;
|
||||||
|
@ -102,7 +105,7 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
|
||||||
return block >> (level * v->hash_per_block_bits);
|
return block >> (level * v->hash_per_block_bits);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
|
static int verity_ahash_update(struct dm_verity *v, struct ahash_request *req,
|
||||||
const u8 *data, size_t len,
|
const u8 *data, size_t len,
|
||||||
struct crypto_wait *wait)
|
struct crypto_wait *wait)
|
||||||
{
|
{
|
||||||
|
@ -135,12 +138,12 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
|
||||||
/*
|
/*
|
||||||
* Wrapper for crypto_ahash_init, which handles verity salting.
|
* Wrapper for crypto_ahash_init, which handles verity salting.
|
||||||
*/
|
*/
|
||||||
static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
|
static int verity_ahash_init(struct dm_verity *v, struct ahash_request *req,
|
||||||
struct crypto_wait *wait, bool may_sleep)
|
struct crypto_wait *wait, bool may_sleep)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
ahash_request_set_tfm(req, v->tfm);
|
ahash_request_set_tfm(req, v->ahash_tfm);
|
||||||
ahash_request_set_callback(req,
|
ahash_request_set_callback(req,
|
||||||
may_sleep ? CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG : 0,
|
may_sleep ? CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG : 0,
|
||||||
crypto_req_done, (void *)wait);
|
crypto_req_done, (void *)wait);
|
||||||
|
@ -155,18 +158,18 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(v->salt_size && (v->version >= 1)))
|
if (likely(v->salt_size && (v->version >= 1)))
|
||||||
r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
|
r = verity_ahash_update(v, req, v->salt, v->salt_size, wait);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
|
static int verity_ahash_final(struct dm_verity *v, struct ahash_request *req,
|
||||||
u8 *digest, struct crypto_wait *wait)
|
u8 *digest, struct crypto_wait *wait)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (unlikely(v->salt_size && (!v->version))) {
|
if (unlikely(v->salt_size && (!v->version))) {
|
||||||
r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
|
r = verity_ahash_update(v, req, v->salt, v->salt_size, wait);
|
||||||
|
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
DMERR("%s failed updating salt: %d", __func__, r);
|
DMERR("%s failed updating salt: %d", __func__, r);
|
||||||
|
@ -180,23 +183,61 @@ out:
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
int verity_hash(struct dm_verity *v, struct ahash_request *req,
|
static int verity_ahash(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
|
const u8 *data, size_t len, u8 *digest, bool may_sleep)
|
||||||
|
{
|
||||||
|
struct ahash_request *req = verity_io_hash_req(v, io);
|
||||||
|
struct crypto_wait wait;
|
||||||
|
|
||||||
|
return verity_ahash_init(v, req, &wait, may_sleep) ?:
|
||||||
|
verity_ahash_update(v, req, data, len, &wait) ?:
|
||||||
|
verity_ahash_final(v, req, digest, &wait);
|
||||||
|
}
|
||||||
|
|
||||||
|
int verity_hash(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
const u8 *data, size_t len, u8 *digest, bool may_sleep)
|
const u8 *data, size_t len, u8 *digest, bool may_sleep)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
struct crypto_wait wait;
|
|
||||||
|
|
||||||
r = verity_hash_init(v, req, &wait, may_sleep);
|
if (static_branch_unlikely(&ahash_enabled) && !v->shash_tfm) {
|
||||||
if (unlikely(r < 0))
|
r = verity_ahash(v, io, data, len, digest, may_sleep);
|
||||||
goto out;
|
} else {
|
||||||
|
struct shash_desc *desc = verity_io_hash_req(v, io);
|
||||||
|
|
||||||
r = verity_hash_update(v, req, data, len, &wait);
|
desc->tfm = v->shash_tfm;
|
||||||
if (unlikely(r < 0))
|
r = crypto_shash_import(desc, v->initial_hashstate) ?:
|
||||||
goto out;
|
crypto_shash_finup(desc, data, len, digest);
|
||||||
|
}
|
||||||
|
if (unlikely(r))
|
||||||
|
DMERR("Error hashing block: %d", r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
r = verity_hash_final(v, req, digest, &wait);
|
static int verity_hash_mb(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
|
const u8 *data[], size_t len, u8 *digests[],
|
||||||
|
int num_blocks)
|
||||||
|
{
|
||||||
|
int r = 0;
|
||||||
|
|
||||||
out:
|
if (static_branch_unlikely(&ahash_enabled) && !v->shash_tfm) {
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* Note: in practice num_blocks is always 1 in this case. */
|
||||||
|
for (i = 0; i < num_blocks; i++) {
|
||||||
|
r = verity_ahash(v, io, data[i], len, digests[i],
|
||||||
|
!io->in_tasklet);
|
||||||
|
if (r)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
struct shash_desc *desc = verity_io_hash_req(v, io);
|
||||||
|
|
||||||
|
desc->tfm = v->shash_tfm;
|
||||||
|
r = crypto_shash_import(desc, v->initial_hashstate) ?:
|
||||||
|
crypto_shash_finup_mb(desc, data, len, digests, num_blocks);
|
||||||
|
}
|
||||||
|
if (unlikely(r))
|
||||||
|
DMERR("Error hashing blocks: %d", r);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -278,12 +319,12 @@ out:
|
||||||
* Verify hash of a metadata block pertaining to the specified data block
|
* Verify hash of a metadata block pertaining to the specified data block
|
||||||
* ("block" argument) at a specified level ("level" argument).
|
* ("block" argument) at a specified level ("level" argument).
|
||||||
*
|
*
|
||||||
* On successful return, verity_io_want_digest(v, io) contains the hash value
|
* On successful return, want_digest contains the hash value for a lower tree
|
||||||
* for a lower tree level or for the data block (if we're at the lowest level).
|
* level or for the data block (if we're at the lowest level).
|
||||||
*
|
*
|
||||||
* If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
|
* If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
|
||||||
* If "skip_unverified" is false, unverified buffer is hashed and verified
|
* If "skip_unverified" is false, unverified buffer is hashed and verified
|
||||||
* against current value of verity_io_want_digest(v, io).
|
* against current value of want_digest.
|
||||||
*/
|
*/
|
||||||
static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
|
static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
sector_t block, int level, bool skip_unverified,
|
sector_t block, int level, bool skip_unverified,
|
||||||
|
@ -325,13 +366,12 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
goto release_ret_r;
|
goto release_ret_r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = verity_hash(v, verity_io_hash_req(v, io),
|
r = verity_hash(v, io, data, 1 << v->hash_dev_block_bits,
|
||||||
data, 1 << v->hash_dev_block_bits,
|
io->tmp_digest, !io->in_tasklet);
|
||||||
verity_io_real_digest(v, io), !io->in_tasklet);
|
|
||||||
if (unlikely(r < 0))
|
if (unlikely(r < 0))
|
||||||
goto release_ret_r;
|
goto release_ret_r;
|
||||||
|
|
||||||
if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
|
if (likely(memcmp(io->tmp_digest, want_digest,
|
||||||
v->digest_size) == 0))
|
v->digest_size) == 0))
|
||||||
aux->hash_verified = 1;
|
aux->hash_verified = 1;
|
||||||
else if (static_branch_unlikely(&use_tasklet_enabled) &&
|
else if (static_branch_unlikely(&use_tasklet_enabled) &&
|
||||||
|
@ -343,7 +383,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
r = -EAGAIN;
|
r = -EAGAIN;
|
||||||
goto release_ret_r;
|
goto release_ret_r;
|
||||||
} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA,
|
} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA,
|
||||||
hash_block, data, NULL) == 0)
|
want_digest, hash_block, data) == 0)
|
||||||
aux->hash_verified = 1;
|
aux->hash_verified = 1;
|
||||||
else if (verity_handle_err(v,
|
else if (verity_handle_err(v,
|
||||||
DM_VERITY_BLOCK_TYPE_METADATA,
|
DM_VERITY_BLOCK_TYPE_METADATA,
|
||||||
|
@ -405,98 +445,9 @@ out:
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Calculates the digest for the given bio
|
|
||||||
*/
|
|
||||||
static int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io,
|
|
||||||
struct bvec_iter *iter, struct crypto_wait *wait)
|
|
||||||
{
|
|
||||||
unsigned int todo = 1 << v->data_dev_block_bits;
|
|
||||||
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
|
|
||||||
struct scatterlist sg;
|
|
||||||
struct ahash_request *req = verity_io_hash_req(v, io);
|
|
||||||
|
|
||||||
do {
|
|
||||||
int r;
|
|
||||||
unsigned int len;
|
|
||||||
struct bio_vec bv = bio_iter_iovec(bio, *iter);
|
|
||||||
|
|
||||||
sg_init_table(&sg, 1);
|
|
||||||
|
|
||||||
len = bv.bv_len;
|
|
||||||
|
|
||||||
if (likely(len >= todo))
|
|
||||||
len = todo;
|
|
||||||
/*
|
|
||||||
* Operating on a single page at a time looks suboptimal
|
|
||||||
* until you consider the typical block size is 4,096B.
|
|
||||||
* Going through this loops twice should be very rare.
|
|
||||||
*/
|
|
||||||
sg_set_page(&sg, bv.bv_page, len, bv.bv_offset);
|
|
||||||
ahash_request_set_crypt(req, &sg, NULL, len);
|
|
||||||
r = crypto_wait_req(crypto_ahash_update(req), wait);
|
|
||||||
|
|
||||||
if (unlikely(r < 0)) {
|
|
||||||
DMERR("%s crypto op failed: %d", __func__, r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
bio_advance_iter(bio, iter, len);
|
|
||||||
todo -= len;
|
|
||||||
} while (todo);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Calls function process for 1 << v->data_dev_block_bits bytes in the bio_vec
|
|
||||||
* starting from iter.
|
|
||||||
*/
|
|
||||||
int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
|
|
||||||
struct bvec_iter *iter,
|
|
||||||
int (*process)(struct dm_verity *v,
|
|
||||||
struct dm_verity_io *io, u8 *data,
|
|
||||||
size_t len))
|
|
||||||
{
|
|
||||||
unsigned int todo = 1 << v->data_dev_block_bits;
|
|
||||||
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
|
|
||||||
|
|
||||||
do {
|
|
||||||
int r;
|
|
||||||
u8 *page;
|
|
||||||
unsigned int len;
|
|
||||||
struct bio_vec bv = bio_iter_iovec(bio, *iter);
|
|
||||||
|
|
||||||
page = bvec_kmap_local(&bv);
|
|
||||||
len = bv.bv_len;
|
|
||||||
|
|
||||||
if (likely(len >= todo))
|
|
||||||
len = todo;
|
|
||||||
|
|
||||||
r = process(v, io, page, len);
|
|
||||||
kunmap_local(page);
|
|
||||||
|
|
||||||
if (r < 0)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
bio_advance_iter(bio, iter, len);
|
|
||||||
todo -= len;
|
|
||||||
} while (todo);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int verity_recheck_copy(struct dm_verity *v, struct dm_verity_io *io,
|
|
||||||
u8 *data, size_t len)
|
|
||||||
{
|
|
||||||
memcpy(data, io->recheck_buffer, len);
|
|
||||||
io->recheck_buffer += len;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
|
static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
struct bvec_iter start, sector_t cur_block)
|
const u8 *want_digest, sector_t cur_block,
|
||||||
|
u8 *dest)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
void *buffer;
|
void *buffer;
|
||||||
|
@ -519,23 +470,17 @@ static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
if (unlikely(r))
|
if (unlikely(r))
|
||||||
goto free_ret;
|
goto free_ret;
|
||||||
|
|
||||||
r = verity_hash(v, verity_io_hash_req(v, io), buffer,
|
r = verity_hash(v, io, buffer, 1 << v->data_dev_block_bits,
|
||||||
1 << v->data_dev_block_bits,
|
io->tmp_digest, true);
|
||||||
verity_io_real_digest(v, io), true);
|
|
||||||
if (unlikely(r))
|
if (unlikely(r))
|
||||||
goto free_ret;
|
goto free_ret;
|
||||||
|
|
||||||
if (memcmp(verity_io_real_digest(v, io),
|
if (memcmp(io->tmp_digest, want_digest, v->digest_size)) {
|
||||||
verity_io_want_digest(v, io), v->digest_size)) {
|
|
||||||
r = -EIO;
|
r = -EIO;
|
||||||
goto free_ret;
|
goto free_ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
io->recheck_buffer = buffer;
|
memcpy(dest, buffer, 1 << v->data_dev_block_bits);
|
||||||
r = verity_for_bv_block(v, io, &start, verity_recheck_copy);
|
|
||||||
if (unlikely(r))
|
|
||||||
goto free_ret;
|
|
||||||
|
|
||||||
r = 0;
|
r = 0;
|
||||||
free_ret:
|
free_ret:
|
||||||
mempool_free(page, &v->recheck_pool);
|
mempool_free(page, &v->recheck_pool);
|
||||||
|
@ -543,23 +488,87 @@ free_ret:
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
|
static int verity_handle_data_hash_mismatch(struct dm_verity *v,
|
||||||
u8 *data, size_t len)
|
struct dm_verity_io *io,
|
||||||
|
struct bio *bio,
|
||||||
|
struct pending_block *block)
|
||||||
{
|
{
|
||||||
memset(data, 0, len);
|
const u8 *want_digest = block->want_digest;
|
||||||
|
sector_t blkno = block->blkno;
|
||||||
|
u8 *data = block->data;
|
||||||
|
|
||||||
|
if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
|
||||||
|
/*
|
||||||
|
* Error handling code (FEC included) cannot be run in a
|
||||||
|
* tasklet since it may sleep, so fallback to work-queue.
|
||||||
|
*/
|
||||||
|
return -EAGAIN;
|
||||||
|
}
|
||||||
|
if (verity_recheck(v, io, want_digest, blkno, data) == 0) {
|
||||||
|
if (v->validated_blocks)
|
||||||
|
set_bit(blkno, v->validated_blocks);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#if defined(CONFIG_DM_VERITY_FEC)
|
||||||
|
if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA, want_digest,
|
||||||
|
blkno, data) == 0)
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
|
if (bio->bi_status)
|
||||||
|
return -EIO; /* Error correction failed; Just return error */
|
||||||
|
|
||||||
|
if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA, blkno)) {
|
||||||
|
dm_audit_log_bio(DM_MSG_PREFIX, "verify-data", bio, blkno, 0);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static void verity_clear_pending_blocks(struct dm_verity_io *io)
|
||||||
* Moves the bio iter one data block forward.
|
|
||||||
*/
|
|
||||||
static inline void verity_bv_skip_block(struct dm_verity *v,
|
|
||||||
struct dm_verity_io *io,
|
|
||||||
struct bvec_iter *iter)
|
|
||||||
{
|
{
|
||||||
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
|
int i;
|
||||||
|
|
||||||
bio_advance_iter(bio, iter, 1 << v->data_dev_block_bits);
|
for (i = io->num_pending - 1; i >= 0; i--) {
|
||||||
|
kunmap_local(io->pending_blocks[i].data);
|
||||||
|
io->pending_blocks[i].data = NULL;
|
||||||
|
}
|
||||||
|
io->num_pending = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int verity_verify_pending_blocks(struct dm_verity *v,
|
||||||
|
struct dm_verity_io *io,
|
||||||
|
struct bio *bio)
|
||||||
|
{
|
||||||
|
const u8 *data[DM_VERITY_MAX_PENDING_DATA_BLOCKS];
|
||||||
|
u8 *real_digests[DM_VERITY_MAX_PENDING_DATA_BLOCKS];
|
||||||
|
int i;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
for (i = 0; i < io->num_pending; i++) {
|
||||||
|
data[i] = io->pending_blocks[i].data;
|
||||||
|
real_digests[i] = io->pending_blocks[i].real_digest;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = verity_hash_mb(v, io, data, 1 << v->data_dev_block_bits,
|
||||||
|
real_digests, io->num_pending);
|
||||||
|
if (unlikely(r))
|
||||||
|
return r;
|
||||||
|
|
||||||
|
for (i = 0; i < io->num_pending; i++) {
|
||||||
|
struct pending_block *block = &io->pending_blocks[i];
|
||||||
|
|
||||||
|
if (likely(memcmp(block->real_digest, block->want_digest,
|
||||||
|
v->digest_size) == 0)) {
|
||||||
|
if (v->validated_blocks)
|
||||||
|
set_bit(block->blkno, v->validated_blocks);
|
||||||
|
} else {
|
||||||
|
r = verity_handle_data_hash_mismatch(v, io, bio, block);
|
||||||
|
if (unlikely(r))
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
verity_clear_pending_blocks(io);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -567,14 +576,15 @@ static inline void verity_bv_skip_block(struct dm_verity *v,
|
||||||
*/
|
*/
|
||||||
static int verity_verify_io(struct dm_verity_io *io)
|
static int verity_verify_io(struct dm_verity_io *io)
|
||||||
{
|
{
|
||||||
bool is_zero;
|
|
||||||
struct dm_verity *v = io->v;
|
struct dm_verity *v = io->v;
|
||||||
struct bvec_iter start;
|
const unsigned int block_size = 1 << v->data_dev_block_bits;
|
||||||
struct bvec_iter iter_copy;
|
struct bvec_iter iter_copy;
|
||||||
struct bvec_iter *iter;
|
struct bvec_iter *iter;
|
||||||
struct crypto_wait wait;
|
|
||||||
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
|
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
|
||||||
unsigned int b;
|
unsigned int b;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
io->num_pending = 0;
|
||||||
|
|
||||||
if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
|
if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
|
||||||
/*
|
/*
|
||||||
|
@ -586,88 +596,69 @@ static int verity_verify_io(struct dm_verity_io *io)
|
||||||
} else
|
} else
|
||||||
iter = &io->iter;
|
iter = &io->iter;
|
||||||
|
|
||||||
for (b = 0; b < io->n_blocks; b++) {
|
for (b = 0; b < io->n_blocks;
|
||||||
int r;
|
b++, bio_advance_iter(bio, iter, block_size)) {
|
||||||
sector_t cur_block = io->block + b;
|
sector_t blkno = io->block + b;
|
||||||
struct ahash_request *req = verity_io_hash_req(v, io);
|
struct pending_block *block;
|
||||||
|
bool is_zero;
|
||||||
|
struct bio_vec bv;
|
||||||
|
void *data;
|
||||||
|
|
||||||
if (v->validated_blocks && bio->bi_status == BLK_STS_OK &&
|
if (v->validated_blocks && bio->bi_status == BLK_STS_OK &&
|
||||||
likely(test_bit(cur_block, v->validated_blocks))) {
|
likely(test_bit(blkno, v->validated_blocks)))
|
||||||
verity_bv_skip_block(v, io, iter);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
|
|
||||||
r = verity_hash_for_block(v, io, cur_block,
|
block = &io->pending_blocks[io->num_pending];
|
||||||
verity_io_want_digest(v, io),
|
|
||||||
|
r = verity_hash_for_block(v, io, blkno, block->want_digest,
|
||||||
&is_zero);
|
&is_zero);
|
||||||
if (unlikely(r < 0))
|
if (unlikely(r < 0))
|
||||||
return r;
|
goto error;
|
||||||
|
|
||||||
|
bv = bio_iter_iovec(bio, *iter);
|
||||||
|
if (unlikely(bv.bv_len < block_size)) {
|
||||||
|
/*
|
||||||
|
* Data block spans pages. This should not happen,
|
||||||
|
* since dm-verity sets dma_alignment to the data block
|
||||||
|
* size minus 1, and dm-verity also doesn't allow the
|
||||||
|
* data block size to be greater than PAGE_SIZE.
|
||||||
|
*/
|
||||||
|
DMERR_LIMIT("unaligned io (data block spans pages)");
|
||||||
|
r = -EIO;
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
|
data = bvec_kmap_local(&bv);
|
||||||
|
|
||||||
if (is_zero) {
|
if (is_zero) {
|
||||||
/*
|
/*
|
||||||
* If we expect a zero block, don't validate, just
|
* If we expect a zero block, don't validate, just
|
||||||
* return zeros.
|
* return zeros.
|
||||||
*/
|
*/
|
||||||
r = verity_for_bv_block(v, io, iter,
|
memset(data, 0, block_size);
|
||||||
verity_bv_zero);
|
kunmap_local(data);
|
||||||
if (unlikely(r < 0))
|
|
||||||
return r;
|
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
block->data = data;
|
||||||
r = verity_hash_init(v, req, &wait, !io->in_tasklet);
|
block->blkno = blkno;
|
||||||
if (unlikely(r < 0))
|
if (++io->num_pending == v->mb_max_msgs) {
|
||||||
return r;
|
r = verity_verify_pending_blocks(v, io, bio);
|
||||||
|
if (unlikely(r))
|
||||||
start = *iter;
|
goto error;
|
||||||
r = verity_for_io_block(v, io, iter, &wait);
|
|
||||||
if (unlikely(r < 0))
|
|
||||||
return r;
|
|
||||||
|
|
||||||
r = verity_hash_final(v, req, verity_io_real_digest(v, io),
|
|
||||||
&wait);
|
|
||||||
if (unlikely(r < 0))
|
|
||||||
return r;
|
|
||||||
|
|
||||||
if (likely(memcmp(verity_io_real_digest(v, io),
|
|
||||||
verity_io_want_digest(v, io), v->digest_size) == 0)) {
|
|
||||||
if (v->validated_blocks)
|
|
||||||
set_bit(cur_block, v->validated_blocks);
|
|
||||||
continue;
|
|
||||||
} else if (static_branch_unlikely(&use_tasklet_enabled) &&
|
|
||||||
io->in_tasklet) {
|
|
||||||
/*
|
|
||||||
* Error handling code (FEC included) cannot be run in a
|
|
||||||
* tasklet since it may sleep, so fallback to work-queue.
|
|
||||||
*/
|
|
||||||
return -EAGAIN;
|
|
||||||
} else if (verity_recheck(v, io, start, cur_block) == 0) {
|
|
||||||
if (v->validated_blocks)
|
|
||||||
set_bit(cur_block, v->validated_blocks);
|
|
||||||
continue;
|
|
||||||
#if defined(CONFIG_DM_VERITY_FEC)
|
|
||||||
} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
|
|
||||||
cur_block, NULL, &start) == 0) {
|
|
||||||
continue;
|
|
||||||
#endif
|
|
||||||
} else {
|
|
||||||
if (bio->bi_status) {
|
|
||||||
/*
|
|
||||||
* Error correction failed; Just return error
|
|
||||||
*/
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
|
|
||||||
cur_block)) {
|
|
||||||
dm_audit_log_bio(DM_MSG_PREFIX, "verify-data",
|
|
||||||
bio, cur_block, 0);
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (io->num_pending) {
|
||||||
|
r = verity_verify_pending_blocks(v, io, bio);
|
||||||
|
if (unlikely(r))
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
error:
|
||||||
|
verity_clear_pending_blocks(io);
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -995,6 +986,8 @@ static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||||
limits->physical_block_size = 1 << v->data_dev_block_bits;
|
limits->physical_block_size = 1 << v->data_dev_block_bits;
|
||||||
|
|
||||||
blk_limits_io_min(limits, limits->logical_block_size);
|
blk_limits_io_min(limits, limits->logical_block_size);
|
||||||
|
|
||||||
|
limits->dma_alignment = limits->logical_block_size - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void verity_dtr(struct dm_target *ti)
|
static void verity_dtr(struct dm_target *ti)
|
||||||
|
@ -1013,11 +1006,16 @@ static void verity_dtr(struct dm_target *ti)
|
||||||
|
|
||||||
kvfree(v->validated_blocks);
|
kvfree(v->validated_blocks);
|
||||||
kfree(v->salt);
|
kfree(v->salt);
|
||||||
|
kfree(v->initial_hashstate);
|
||||||
kfree(v->root_digest);
|
kfree(v->root_digest);
|
||||||
kfree(v->zero_digest);
|
kfree(v->zero_digest);
|
||||||
|
|
||||||
if (v->tfm)
|
if (v->ahash_tfm) {
|
||||||
crypto_free_ahash(v->tfm);
|
static_branch_dec(&ahash_enabled);
|
||||||
|
crypto_free_ahash(v->ahash_tfm);
|
||||||
|
} else {
|
||||||
|
crypto_free_shash(v->shash_tfm);
|
||||||
|
}
|
||||||
|
|
||||||
kfree(v->alg_name);
|
kfree(v->alg_name);
|
||||||
|
|
||||||
|
@ -1063,7 +1061,7 @@ static int verity_alloc_most_once(struct dm_verity *v)
|
||||||
static int verity_alloc_zero_digest(struct dm_verity *v)
|
static int verity_alloc_zero_digest(struct dm_verity *v)
|
||||||
{
|
{
|
||||||
int r = -ENOMEM;
|
int r = -ENOMEM;
|
||||||
struct ahash_request *req;
|
struct dm_verity_io *io;
|
||||||
u8 *zero_data;
|
u8 *zero_data;
|
||||||
|
|
||||||
v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL);
|
v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL);
|
||||||
|
@ -1071,9 +1069,9 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
|
||||||
if (!v->zero_digest)
|
if (!v->zero_digest)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
req = kmalloc(v->ahash_reqsize, GFP_KERNEL);
|
io = kmalloc(sizeof(*io) + v->hash_reqsize, GFP_KERNEL);
|
||||||
|
|
||||||
if (!req)
|
if (!io)
|
||||||
return r; /* verity_dtr will free zero_digest */
|
return r; /* verity_dtr will free zero_digest */
|
||||||
|
|
||||||
zero_data = kzalloc(1 << v->data_dev_block_bits, GFP_KERNEL);
|
zero_data = kzalloc(1 << v->data_dev_block_bits, GFP_KERNEL);
|
||||||
|
@ -1081,11 +1079,11 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
|
||||||
if (!zero_data)
|
if (!zero_data)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
r = verity_hash(v, req, zero_data, 1 << v->data_dev_block_bits,
|
r = verity_hash(v, io, zero_data, 1 << v->data_dev_block_bits,
|
||||||
v->zero_digest, true);
|
v->zero_digest, true);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
kfree(req);
|
kfree(io);
|
||||||
kfree(zero_data);
|
kfree(zero_data);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
@ -1206,6 +1204,118 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int verity_setup_hash_alg(struct dm_verity *v, const char *alg_name)
|
||||||
|
{
|
||||||
|
struct dm_target *ti = v->ti;
|
||||||
|
struct crypto_ahash *ahash;
|
||||||
|
struct crypto_shash *shash = NULL;
|
||||||
|
const char *driver_name;
|
||||||
|
|
||||||
|
v->alg_name = kstrdup(alg_name, GFP_KERNEL);
|
||||||
|
if (!v->alg_name) {
|
||||||
|
ti->error = "Cannot allocate algorithm name";
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate the hash transformation object that this dm-verity instance
|
||||||
|
* will use. The vast majority of dm-verity users use CPU-based
|
||||||
|
* hashing, so when possible use the shash API to minimize the crypto
|
||||||
|
* API overhead, especially when multibuffer hashing is used. If the
|
||||||
|
* ahash API resolves to a different driver (likely an off-CPU hardware
|
||||||
|
* offload), use ahash instead. Also use ahash if the obsolete
|
||||||
|
* dm-verity format with the appended salt is being used, so that quirk
|
||||||
|
* only needs to be handled in one place.
|
||||||
|
*/
|
||||||
|
ahash = crypto_alloc_ahash(alg_name, 0,
|
||||||
|
v->use_tasklet ? CRYPTO_ALG_ASYNC : 0);
|
||||||
|
if (IS_ERR(ahash)) {
|
||||||
|
ti->error = "Cannot initialize hash function";
|
||||||
|
return PTR_ERR(ahash);
|
||||||
|
}
|
||||||
|
driver_name = crypto_ahash_driver_name(ahash);
|
||||||
|
if (v->version >= 1 /* salt prepended, not appended? */) {
|
||||||
|
shash = crypto_alloc_shash(alg_name, 0, 0);
|
||||||
|
if (!IS_ERR(shash) &&
|
||||||
|
strcmp(crypto_shash_driver_name(shash), driver_name) != 0) {
|
||||||
|
/*
|
||||||
|
* ahash gave a different driver than shash, so probably
|
||||||
|
* this is a case of real hardware offload. Use ahash.
|
||||||
|
*/
|
||||||
|
crypto_free_shash(shash);
|
||||||
|
shash = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!IS_ERR_OR_NULL(shash)) {
|
||||||
|
crypto_free_ahash(ahash);
|
||||||
|
ahash = NULL;
|
||||||
|
v->shash_tfm = shash;
|
||||||
|
v->digest_size = crypto_shash_digestsize(shash);
|
||||||
|
v->hash_reqsize = sizeof(struct shash_desc) +
|
||||||
|
crypto_shash_descsize(shash);
|
||||||
|
v->mb_max_msgs = min(crypto_shash_mb_max_msgs(shash),
|
||||||
|
DM_VERITY_MAX_PENDING_DATA_BLOCKS);
|
||||||
|
DMINFO("%s using shash \"%s\"%s", alg_name, driver_name,
|
||||||
|
v->mb_max_msgs > 1 ? " (multibuffer)" : "");
|
||||||
|
} else {
|
||||||
|
v->ahash_tfm = ahash;
|
||||||
|
static_branch_inc(&ahash_enabled);
|
||||||
|
v->digest_size = crypto_ahash_digestsize(ahash);
|
||||||
|
v->hash_reqsize = sizeof(struct ahash_request) +
|
||||||
|
crypto_ahash_reqsize(ahash);
|
||||||
|
v->mb_max_msgs = 1;
|
||||||
|
DMINFO("%s using ahash \"%s\"", alg_name, driver_name);
|
||||||
|
}
|
||||||
|
if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
|
||||||
|
ti->error = "Digest size too big";
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int verity_setup_salt_and_hashstate(struct dm_verity *v, const char *arg)
|
||||||
|
{
|
||||||
|
struct dm_target *ti = v->ti;
|
||||||
|
|
||||||
|
if (strcmp(arg, "-") != 0) {
|
||||||
|
v->salt_size = strlen(arg) / 2;
|
||||||
|
v->salt = kmalloc(v->salt_size, GFP_KERNEL);
|
||||||
|
if (!v->salt) {
|
||||||
|
ti->error = "Cannot allocate salt";
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
if (strlen(arg) != v->salt_size * 2 ||
|
||||||
|
hex2bin(v->salt, arg, v->salt_size)) {
|
||||||
|
ti->error = "Invalid salt";
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (v->shash_tfm) {
|
||||||
|
SHASH_DESC_ON_STACK(desc, v->shash_tfm);
|
||||||
|
int r;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Compute the pre-salted hash state that can be passed to
|
||||||
|
* crypto_shash_import() for each block later.
|
||||||
|
*/
|
||||||
|
v->initial_hashstate = kmalloc(
|
||||||
|
crypto_shash_statesize(v->shash_tfm), GFP_KERNEL);
|
||||||
|
if (!v->initial_hashstate) {
|
||||||
|
ti->error = "Cannot allocate initial hash state";
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
desc->tfm = v->shash_tfm;
|
||||||
|
r = crypto_shash_init(desc) ?:
|
||||||
|
crypto_shash_update(desc, v->salt, v->salt_size) ?:
|
||||||
|
crypto_shash_export(desc, v->initial_hashstate);
|
||||||
|
if (r) {
|
||||||
|
ti->error = "Cannot set up initial hash state";
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Target parameters:
|
* Target parameters:
|
||||||
* <version> The current format is version 1.
|
* <version> The current format is version 1.
|
||||||
|
@ -1330,38 +1440,9 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||||
}
|
}
|
||||||
v->hash_start = num_ll;
|
v->hash_start = num_ll;
|
||||||
|
|
||||||
v->alg_name = kstrdup(argv[7], GFP_KERNEL);
|
r = verity_setup_hash_alg(v, argv[7]);
|
||||||
if (!v->alg_name) {
|
if (r)
|
||||||
ti->error = "Cannot allocate algorithm name";
|
|
||||||
r = -ENOMEM;
|
|
||||||
goto bad;
|
goto bad;
|
||||||
}
|
|
||||||
|
|
||||||
v->tfm = crypto_alloc_ahash(v->alg_name, 0,
|
|
||||||
v->use_tasklet ? CRYPTO_ALG_ASYNC : 0);
|
|
||||||
if (IS_ERR(v->tfm)) {
|
|
||||||
ti->error = "Cannot initialize hash function";
|
|
||||||
r = PTR_ERR(v->tfm);
|
|
||||||
v->tfm = NULL;
|
|
||||||
goto bad;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* dm-verity performance can vary greatly depending on which hash
|
|
||||||
* algorithm implementation is used. Help people debug performance
|
|
||||||
* problems by logging the ->cra_driver_name.
|
|
||||||
*/
|
|
||||||
DMINFO("%s using implementation \"%s\"", v->alg_name,
|
|
||||||
crypto_hash_alg_common(v->tfm)->base.cra_driver_name);
|
|
||||||
|
|
||||||
v->digest_size = crypto_ahash_digestsize(v->tfm);
|
|
||||||
if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
|
|
||||||
ti->error = "Digest size too big";
|
|
||||||
r = -EINVAL;
|
|
||||||
goto bad;
|
|
||||||
}
|
|
||||||
v->ahash_reqsize = sizeof(struct ahash_request) +
|
|
||||||
crypto_ahash_reqsize(v->tfm);
|
|
||||||
|
|
||||||
v->root_digest = kmalloc(v->digest_size, GFP_KERNEL);
|
v->root_digest = kmalloc(v->digest_size, GFP_KERNEL);
|
||||||
if (!v->root_digest) {
|
if (!v->root_digest) {
|
||||||
|
@ -1377,21 +1458,9 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||||
}
|
}
|
||||||
root_hash_digest_to_validate = argv[8];
|
root_hash_digest_to_validate = argv[8];
|
||||||
|
|
||||||
if (strcmp(argv[9], "-")) {
|
r = verity_setup_salt_and_hashstate(v, argv[9]);
|
||||||
v->salt_size = strlen(argv[9]) / 2;
|
if (r)
|
||||||
v->salt = kmalloc(v->salt_size, GFP_KERNEL);
|
goto bad;
|
||||||
if (!v->salt) {
|
|
||||||
ti->error = "Cannot allocate salt";
|
|
||||||
r = -ENOMEM;
|
|
||||||
goto bad;
|
|
||||||
}
|
|
||||||
if (strlen(argv[9]) != v->salt_size * 2 ||
|
|
||||||
hex2bin(v->salt, argv[9], v->salt_size)) {
|
|
||||||
ti->error = "Invalid salt";
|
|
||||||
r = -EINVAL;
|
|
||||||
goto bad;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
argv += 10;
|
argv += 10;
|
||||||
argc -= 10;
|
argc -= 10;
|
||||||
|
@ -1493,8 +1562,7 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||||
goto bad;
|
goto bad;
|
||||||
}
|
}
|
||||||
|
|
||||||
ti->per_io_data_size = sizeof(struct dm_verity_io) +
|
ti->per_io_data_size = sizeof(struct dm_verity_io) + v->hash_reqsize;
|
||||||
v->ahash_reqsize + v->digest_size * 2;
|
|
||||||
|
|
||||||
r = verity_fec_ctr(v);
|
r = verity_fec_ctr(v);
|
||||||
if (r)
|
if (r)
|
||||||
|
|
|
@ -39,9 +39,11 @@ struct dm_verity {
|
||||||
struct dm_target *ti;
|
struct dm_target *ti;
|
||||||
struct dm_bufio_client *bufio;
|
struct dm_bufio_client *bufio;
|
||||||
char *alg_name;
|
char *alg_name;
|
||||||
struct crypto_ahash *tfm;
|
struct crypto_ahash *ahash_tfm; /* either this or shash_tfm is set */
|
||||||
|
struct crypto_shash *shash_tfm; /* either this or ahash_tfm is set */
|
||||||
u8 *root_digest; /* digest of the root block */
|
u8 *root_digest; /* digest of the root block */
|
||||||
u8 *salt; /* salt: its size is salt_size */
|
u8 *salt; /* salt: its size is salt_size */
|
||||||
|
u8 *initial_hashstate; /* salted initial state, if shash_tfm is set */
|
||||||
u8 *zero_digest; /* digest for a zero block */
|
u8 *zero_digest; /* digest for a zero block */
|
||||||
unsigned int salt_size;
|
unsigned int salt_size;
|
||||||
sector_t data_start; /* data offset in 512-byte sectors */
|
sector_t data_start; /* data offset in 512-byte sectors */
|
||||||
|
@ -55,8 +57,9 @@ struct dm_verity {
|
||||||
unsigned char version;
|
unsigned char version;
|
||||||
bool hash_failed:1; /* set if hash of any block failed */
|
bool hash_failed:1; /* set if hash of any block failed */
|
||||||
bool use_tasklet:1; /* try to verify in tasklet before work-queue */
|
bool use_tasklet:1; /* try to verify in tasklet before work-queue */
|
||||||
|
unsigned char mb_max_msgs; /* max multibuffer hashing interleaving factor */
|
||||||
unsigned int digest_size; /* digest size for the current hash algorithm */
|
unsigned int digest_size; /* digest size for the current hash algorithm */
|
||||||
unsigned int ahash_reqsize;/* the size of temporary space for crypto */
|
unsigned int hash_reqsize; /* the size of temporary space for crypto */
|
||||||
enum verity_mode mode; /* mode for handling verification errors */
|
enum verity_mode mode; /* mode for handling verification errors */
|
||||||
unsigned int corrupted_errs;/* Number of errors for corrupted blocks */
|
unsigned int corrupted_errs;/* Number of errors for corrupted blocks */
|
||||||
|
|
||||||
|
@ -74,6 +77,15 @@ struct dm_verity {
|
||||||
mempool_t recheck_pool;
|
mempool_t recheck_pool;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define DM_VERITY_MAX_PENDING_DATA_BLOCKS HASH_MAX_MB_MSGS
|
||||||
|
|
||||||
|
struct pending_block {
|
||||||
|
void *data;
|
||||||
|
sector_t blkno;
|
||||||
|
u8 want_digest[HASH_MAX_DIGESTSIZE];
|
||||||
|
u8 real_digest[HASH_MAX_DIGESTSIZE];
|
||||||
|
};
|
||||||
|
|
||||||
struct dm_verity_io {
|
struct dm_verity_io {
|
||||||
struct dm_verity *v;
|
struct dm_verity *v;
|
||||||
|
|
||||||
|
@ -88,45 +100,32 @@ struct dm_verity_io {
|
||||||
|
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
|
|
||||||
char *recheck_buffer;
|
u8 tmp_digest[HASH_MAX_DIGESTSIZE];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Three variably-size fields follow this struct:
|
* This is the queue of data blocks that are pending verification. We
|
||||||
*
|
* allow multiple blocks to be queued up in order to support multibuffer
|
||||||
* u8 hash_req[v->ahash_reqsize];
|
* hashing, i.e. interleaving the hashing of multiple messages. On many
|
||||||
* u8 real_digest[v->digest_size];
|
* CPUs this improves performance significantly.
|
||||||
* u8 want_digest[v->digest_size];
|
*/
|
||||||
*
|
int num_pending;
|
||||||
* To access them use: verity_io_hash_req(), verity_io_real_digest()
|
struct pending_block pending_blocks[DM_VERITY_MAX_PENDING_DATA_BLOCKS];
|
||||||
* and verity_io_want_digest().
|
|
||||||
|
/*
|
||||||
|
* This struct is followed by a variable-sized hash request of size
|
||||||
|
* v->hash_reqsize, either a struct ahash_request or a struct shash_desc
|
||||||
|
* (depending on whether ahash_tfm or shash_tfm is being used). To
|
||||||
|
* access it, use verity_io_hash_req().
|
||||||
*/
|
*/
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct ahash_request *verity_io_hash_req(struct dm_verity *v,
|
static inline void *verity_io_hash_req(struct dm_verity *v,
|
||||||
struct dm_verity_io *io)
|
struct dm_verity_io *io)
|
||||||
{
|
{
|
||||||
return (struct ahash_request *)(io + 1);
|
return io + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u8 *verity_io_real_digest(struct dm_verity *v,
|
extern int verity_hash(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
struct dm_verity_io *io)
|
|
||||||
{
|
|
||||||
return (u8 *)(io + 1) + v->ahash_reqsize;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u8 *verity_io_want_digest(struct dm_verity *v,
|
|
||||||
struct dm_verity_io *io)
|
|
||||||
{
|
|
||||||
return (u8 *)(io + 1) + v->ahash_reqsize + v->digest_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
|
|
||||||
struct bvec_iter *iter,
|
|
||||||
int (*process)(struct dm_verity *v,
|
|
||||||
struct dm_verity_io *io,
|
|
||||||
u8 *data, size_t len));
|
|
||||||
|
|
||||||
extern int verity_hash(struct dm_verity *v, struct ahash_request *req,
|
|
||||||
const u8 *data, size_t len, u8 *digest, bool may_sleep);
|
const u8 *data, size_t len, u8 *digest, bool may_sleep);
|
||||||
|
|
||||||
extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
|
extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
|
||||||
|
|
|
@ -195,7 +195,6 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
|
||||||
struct vdec_t *vdec = inst->priv;
|
struct vdec_t *vdec = inst->priv;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
vpu_inst_lock(inst);
|
|
||||||
switch (ctrl->id) {
|
switch (ctrl->id) {
|
||||||
case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE:
|
case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE:
|
||||||
vdec->params.display_delay_enable = ctrl->val;
|
vdec->params.display_delay_enable = ctrl->val;
|
||||||
|
@ -207,7 +206,6 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
vpu_inst_unlock(inst);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -518,7 +518,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
|
||||||
struct venc_t *venc = inst->priv;
|
struct venc_t *venc = inst->priv;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
vpu_inst_lock(inst);
|
|
||||||
switch (ctrl->id) {
|
switch (ctrl->id) {
|
||||||
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
|
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
|
||||||
venc->params.profile = ctrl->val;
|
venc->params.profile = ctrl->val;
|
||||||
|
@ -579,7 +578,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
vpu_inst_unlock(inst);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -680,6 +678,9 @@ static int venc_ctrl_init(struct vpu_inst *inst)
|
||||||
~(1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME),
|
~(1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME),
|
||||||
V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
|
V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
|
||||||
|
|
||||||
|
v4l2_ctrl_new_std(&inst->ctrl_handler, NULL,
|
||||||
|
V4L2_CID_MPEG_VIDEO_AVERAGE_QP, 0, 51, 1, 0);
|
||||||
|
|
||||||
if (inst->ctrl_handler.error) {
|
if (inst->ctrl_handler.error) {
|
||||||
ret = inst->ctrl_handler.error;
|
ret = inst->ctrl_handler.error;
|
||||||
v4l2_ctrl_handler_free(&inst->ctrl_handler);
|
v4l2_ctrl_handler_free(&inst->ctrl_handler);
|
||||||
|
@ -819,6 +820,7 @@ static int venc_get_one_encoded_frame(struct vpu_inst *inst,
|
||||||
vbuf->field = inst->cap_format.field;
|
vbuf->field = inst->cap_format.field;
|
||||||
vbuf->flags |= frame->info.pic_type;
|
vbuf->flags |= frame->info.pic_type;
|
||||||
vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE);
|
vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE);
|
||||||
|
vpu_set_buffer_average_qp(vbuf, frame->info.average_qp);
|
||||||
dev_dbg(inst->dev, "[%d][OUTPUT TS]%32lld\n", inst->id, vbuf->vb2_buf.timestamp);
|
dev_dbg(inst->dev, "[%d][OUTPUT TS]%32lld\n", inst->id, vbuf->vb2_buf.timestamp);
|
||||||
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
|
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
|
||||||
venc->ready_count++;
|
venc->ready_count++;
|
||||||
|
|
|
@ -306,6 +306,7 @@ struct vpu_vb2_buffer {
|
||||||
dma_addr_t chroma_v;
|
dma_addr_t chroma_v;
|
||||||
unsigned int state;
|
unsigned int state;
|
||||||
u32 tag;
|
u32 tag;
|
||||||
|
u32 average_qp;
|
||||||
};
|
};
|
||||||
|
|
||||||
void vpu_writel(struct vpu_dev *vpu, u32 reg, u32 val);
|
void vpu_writel(struct vpu_dev *vpu, u32 reg, u32 val);
|
||||||
|
|
|
@ -114,6 +114,7 @@ struct vpu_enc_pic_info {
|
||||||
u32 wptr;
|
u32 wptr;
|
||||||
u32 crc;
|
u32 crc;
|
||||||
s64 timestamp;
|
s64 timestamp;
|
||||||
|
u32 average_qp;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct vpu_dec_codec_info {
|
struct vpu_dec_codec_info {
|
||||||
|
|
|
@ -63,6 +63,13 @@ unsigned int vpu_get_buffer_state(struct vb2_v4l2_buffer *vbuf)
|
||||||
return vpu_buf->state;
|
return vpu_buf->state;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void vpu_set_buffer_average_qp(struct vb2_v4l2_buffer *vbuf, u32 qp)
|
||||||
|
{
|
||||||
|
struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf);
|
||||||
|
|
||||||
|
vpu_buf->average_qp = qp;
|
||||||
|
}
|
||||||
|
|
||||||
void vpu_v4l2_set_error(struct vpu_inst *inst)
|
void vpu_v4l2_set_error(struct vpu_inst *inst)
|
||||||
{
|
{
|
||||||
vpu_inst_lock(inst);
|
vpu_inst_lock(inst);
|
||||||
|
@ -539,6 +546,15 @@ static void vpu_vb2_buf_finish(struct vb2_buffer *vb)
|
||||||
struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
|
struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
|
||||||
struct vb2_queue *q = vb->vb2_queue;
|
struct vb2_queue *q = vb->vb2_queue;
|
||||||
|
|
||||||
|
if (V4L2_TYPE_IS_CAPTURE(vb->type)) {
|
||||||
|
struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf);
|
||||||
|
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
|
||||||
|
V4L2_CID_MPEG_VIDEO_AVERAGE_QP);
|
||||||
|
|
||||||
|
if (ctrl)
|
||||||
|
v4l2_ctrl_s_ctrl(ctrl, vpu_buf->average_qp);
|
||||||
|
}
|
||||||
|
|
||||||
if (vbuf->flags & V4L2_BUF_FLAG_LAST)
|
if (vbuf->flags & V4L2_BUF_FLAG_LAST)
|
||||||
vpu_notify_eos(inst);
|
vpu_notify_eos(inst);
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ void vpu_inst_lock(struct vpu_inst *inst);
|
||||||
void vpu_inst_unlock(struct vpu_inst *inst);
|
void vpu_inst_unlock(struct vpu_inst *inst);
|
||||||
void vpu_set_buffer_state(struct vb2_v4l2_buffer *vbuf, unsigned int state);
|
void vpu_set_buffer_state(struct vb2_v4l2_buffer *vbuf, unsigned int state);
|
||||||
unsigned int vpu_get_buffer_state(struct vb2_v4l2_buffer *vbuf);
|
unsigned int vpu_get_buffer_state(struct vb2_v4l2_buffer *vbuf);
|
||||||
|
void vpu_set_buffer_average_qp(struct vb2_v4l2_buffer *vbuf, u32 qp);
|
||||||
|
|
||||||
int vpu_v4l2_open(struct file *file, struct vpu_inst *inst);
|
int vpu_v4l2_open(struct file *file, struct vpu_inst *inst);
|
||||||
int vpu_v4l2_close(struct file *file);
|
int vpu_v4l2_close(struct file *file);
|
||||||
|
|
|
@ -499,6 +499,7 @@ struct windsor_pic_info {
|
||||||
u32 proc_dacc_rng_wr_cnt;
|
u32 proc_dacc_rng_wr_cnt;
|
||||||
s32 tv_s;
|
s32 tv_s;
|
||||||
u32 tv_ns;
|
u32 tv_ns;
|
||||||
|
u32 average_qp;
|
||||||
};
|
};
|
||||||
|
|
||||||
u32 vpu_windsor_get_data_size(void)
|
u32 vpu_windsor_get_data_size(void)
|
||||||
|
@ -734,6 +735,7 @@ static void vpu_windsor_unpack_pic_info(struct vpu_rpc_event *pkt, void *data)
|
||||||
info->wptr = get_ptr(windsor->str_buff_wptr);
|
info->wptr = get_ptr(windsor->str_buff_wptr);
|
||||||
info->crc = windsor->frame_crc;
|
info->crc = windsor->frame_crc;
|
||||||
info->timestamp = timespec64_to_ns(&ts);
|
info->timestamp = timespec64_to_ns(&ts);
|
||||||
|
info->average_qp = windsor->average_qp;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vpu_windsor_unpack_mem_req(struct vpu_rpc_event *pkt, void *data)
|
static void vpu_windsor_unpack_mem_req(struct vpu_rpc_event *pkt, void *data)
|
||||||
|
|
|
@ -970,6 +970,7 @@ const char *v4l2_ctrl_get_name(u32 id)
|
||||||
case V4L2_CID_MPEG_VIDEO_LTR_COUNT: return "LTR Count";
|
case V4L2_CID_MPEG_VIDEO_LTR_COUNT: return "LTR Count";
|
||||||
case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: return "Frame LTR Index";
|
case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: return "Frame LTR Index";
|
||||||
case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: return "Use LTR Frames";
|
case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: return "Use LTR Frames";
|
||||||
|
case V4L2_CID_MPEG_VIDEO_AVERAGE_QP: return "Average QP Value";
|
||||||
case V4L2_CID_FWHT_I_FRAME_QP: return "FWHT I-Frame QP Value";
|
case V4L2_CID_FWHT_I_FRAME_QP: return "FWHT I-Frame QP Value";
|
||||||
case V4L2_CID_FWHT_P_FRAME_QP: return "FWHT P-Frame QP Value";
|
case V4L2_CID_FWHT_P_FRAME_QP: return "FWHT P-Frame QP Value";
|
||||||
|
|
||||||
|
@ -1507,6 +1508,10 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
|
||||||
*max = 0xffffffffffffLL;
|
*max = 0xffffffffffffLL;
|
||||||
*step = 1;
|
*step = 1;
|
||||||
break;
|
break;
|
||||||
|
case V4L2_CID_MPEG_VIDEO_AVERAGE_QP:
|
||||||
|
*type = V4L2_CTRL_TYPE_INTEGER;
|
||||||
|
*flags |= V4L2_CTRL_FLAG_READ_ONLY;
|
||||||
|
break;
|
||||||
case V4L2_CID_PIXEL_RATE:
|
case V4L2_CID_PIXEL_RATE:
|
||||||
*type = V4L2_CTRL_TYPE_INTEGER64;
|
*type = V4L2_CTRL_TYPE_INTEGER64;
|
||||||
*flags |= V4L2_CTRL_FLAG_READ_ONLY;
|
*flags |= V4L2_CTRL_FLAG_READ_ONLY;
|
||||||
|
|
|
@ -46,7 +46,6 @@
|
||||||
#include <linux/mmc/host.h>
|
#include <linux/mmc/host.h>
|
||||||
#include <linux/mmc/mmc.h>
|
#include <linux/mmc/mmc.h>
|
||||||
#include <linux/mmc/sd.h>
|
#include <linux/mmc/sd.h>
|
||||||
#include <trace/hooks/mmc.h>
|
|
||||||
|
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
|
||||||
|
@ -3030,7 +3029,6 @@ static int mmc_blk_probe(struct mmc_card *card)
|
||||||
ret = PTR_ERR(md);
|
ret = PTR_ERR(md);
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
trace_android_vh_mmc_update_mmc_queue(card, &md->queue);
|
|
||||||
|
|
||||||
ret = mmc_blk_alloc_parts(card, md);
|
ret = mmc_blk_alloc_parts(card, md);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -1538,6 +1538,12 @@ static int thermal_pm_notify(struct notifier_block *nb,
|
||||||
list_for_each_entry(tz, &thermal_tz_list, node) {
|
list_for_each_entry(tz, &thermal_tz_list, node) {
|
||||||
mutex_lock(&tz->lock);
|
mutex_lock(&tz->lock);
|
||||||
|
|
||||||
|
trace_android_vh_thermal_pm_notify_suspend(tz, &irq_wakeable);
|
||||||
|
if (irq_wakeable) {
|
||||||
|
mutex_unlock(&tz->lock);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
tz->suspended = true;
|
tz->suspended = true;
|
||||||
|
|
||||||
mutex_unlock(&tz->lock);
|
mutex_unlock(&tz->lock);
|
||||||
|
@ -1553,11 +1559,13 @@ static int thermal_pm_notify(struct notifier_block *nb,
|
||||||
list_for_each_entry(tz, &thermal_tz_list, node) {
|
list_for_each_entry(tz, &thermal_tz_list, node) {
|
||||||
mutex_lock(&tz->lock);
|
mutex_lock(&tz->lock);
|
||||||
|
|
||||||
tz->suspended = false;
|
|
||||||
|
|
||||||
trace_android_vh_thermal_pm_notify_suspend(tz, &irq_wakeable);
|
trace_android_vh_thermal_pm_notify_suspend(tz, &irq_wakeable);
|
||||||
if (irq_wakeable)
|
if (irq_wakeable) {
|
||||||
|
mutex_unlock(&tz->lock);
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
tz->suspended = false;
|
||||||
|
|
||||||
thermal_zone_device_init(tz);
|
thermal_zone_device_init(tz);
|
||||||
__thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
|
__thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
|
||||||
|
|
|
@ -6,6 +6,9 @@
|
||||||
#include <ufs/ufshcd.h>
|
#include <ufs/ufshcd.h>
|
||||||
#include "ufshcd-crypto.h"
|
#include "ufshcd-crypto.h"
|
||||||
|
|
||||||
|
#undef CREATE_TRACE_POINTS
|
||||||
|
#include <trace/hooks/ufshcd.h>
|
||||||
|
|
||||||
/* Blk-crypto modes supported by UFS crypto */
|
/* Blk-crypto modes supported by UFS crypto */
|
||||||
static const struct ufs_crypto_alg_entry {
|
static const struct ufs_crypto_alg_entry {
|
||||||
enum ufs_crypto_alg ufs_alg;
|
enum ufs_crypto_alg ufs_alg;
|
||||||
|
@ -122,7 +125,13 @@ bool ufshcd_crypto_enable(struct ufs_hba *hba)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* Reset might clear all keys, so reprogram all the keys. */
|
/* Reset might clear all keys, so reprogram all the keys. */
|
||||||
blk_crypto_reprogram_all_keys(&hba->crypto_profile);
|
if (hba->crypto_profile.num_slots) {
|
||||||
|
int err = -EOPNOTSUPP;
|
||||||
|
|
||||||
|
trace_android_rvh_ufs_reprogram_all_keys(hba, &err);
|
||||||
|
if (err == -EOPNOTSUPP)
|
||||||
|
blk_crypto_reprogram_all_keys(&hba->crypto_profile);
|
||||||
|
}
|
||||||
|
|
||||||
if (hba->android_quirks & UFSHCD_ANDROID_QUIRK_BROKEN_CRYPTO_ENABLE)
|
if (hba->android_quirks & UFSHCD_ANDROID_QUIRK_BROKEN_CRYPTO_ENABLE)
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -1396,8 +1396,6 @@ static int ufshcd_devfreq_target(struct device *dev,
|
||||||
struct list_head *clk_list = &hba->clk_list_head;
|
struct list_head *clk_list = &hba->clk_list_head;
|
||||||
struct ufs_clk_info *clki;
|
struct ufs_clk_info *clki;
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
bool force_out = false;
|
|
||||||
bool force_scaling = false;
|
|
||||||
|
|
||||||
if (!ufshcd_is_clkscaling_supported(hba))
|
if (!ufshcd_is_clkscaling_supported(hba))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -1430,11 +1428,8 @@ static int ufshcd_devfreq_target(struct device *dev,
|
||||||
scale_up = *freq == clki->max_freq;
|
scale_up = *freq == clki->max_freq;
|
||||||
if (!scale_up)
|
if (!scale_up)
|
||||||
*freq = clki->min_freq;
|
*freq = clki->min_freq;
|
||||||
|
|
||||||
trace_android_vh_ufs_clock_scaling(hba, &force_out, &force_scaling, &scale_up);
|
|
||||||
|
|
||||||
/* Update the frequency */
|
/* Update the frequency */
|
||||||
if (force_out || (!force_scaling && !ufshcd_is_devfreq_scaling_required(hba, scale_up))) {
|
if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
|
||||||
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
|
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto out; /* no state change required */
|
goto out; /* no state change required */
|
||||||
|
@ -8850,6 +8845,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
|
||||||
/* Enable Auto-Hibernate if configured */
|
/* Enable Auto-Hibernate if configured */
|
||||||
ufshcd_auto_hibern8_enable(hba);
|
ufshcd_auto_hibern8_enable(hba);
|
||||||
|
|
||||||
|
trace_android_rvh_ufs_complete_init(hba);
|
||||||
out:
|
out:
|
||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -228,6 +228,27 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We must have interrupts enabled when making the hypercall to switch to
|
||||||
|
* guest vcpu, else guest vcpu runs until end of hypervisor scheduling time
|
||||||
|
* slice and also increases interrupt latency. Native vtime accounting
|
||||||
|
* requires that interrupts are disabled, so we can't do accounting.
|
||||||
|
*/
|
||||||
|
#if IS_ENABLED(CONFIG_TICK_CPU_ACCOUNTING)
|
||||||
|
static inline void gh_guest_accounting_enter(void)
|
||||||
|
{
|
||||||
|
vtime_account_guest_enter();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void gh_guest_accounting_exit(void)
|
||||||
|
{
|
||||||
|
vtime_account_guest_exit();
|
||||||
|
}
|
||||||
|
#else /* !CONFIG_TICK_CPU_ACCOUNTING */
|
||||||
|
static inline void gh_guest_accounting_enter(void) { }
|
||||||
|
static inline void gh_guest_accounting_exit(void) { }
|
||||||
|
#endif /* CONFIG_TICK_CPU_ACCOUNTING */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gunyah_vcpu_run() - Request Gunyah to begin scheduling this vCPU.
|
* gunyah_vcpu_run() - Request Gunyah to begin scheduling this vCPU.
|
||||||
* @vcpu: The client descriptor that was obtained via gunyah_vcpu_alloc()
|
* @vcpu: The client descriptor that was obtained via gunyah_vcpu_alloc()
|
||||||
|
@ -287,8 +308,10 @@ static int gunyah_vcpu_run(struct gunyah_vcpu *vcpu)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
gh_guest_accounting_enter();
|
||||||
gunyah_error = gunyah_hypercall_vcpu_run(
|
gunyah_error = gunyah_hypercall_vcpu_run(
|
||||||
vcpu->rsc->capid, resume_data, &vcpu_run_resp);
|
vcpu->rsc->capid, resume_data, &vcpu_run_resp);
|
||||||
|
gh_guest_accounting_exit();
|
||||||
if (gunyah_error == GUNYAH_ERROR_OK) {
|
if (gunyah_error == GUNYAH_ERROR_OK) {
|
||||||
memset(resume_data, 0, sizeof(resume_data));
|
memset(resume_data, 0, sizeof(resume_data));
|
||||||
switch (vcpu_run_resp.state) {
|
switch (vcpu_run_resp.state) {
|
||||||
|
|
|
@ -6,7 +6,11 @@
|
||||||
# Rewritten to use lists instead of if-statements.
|
# Rewritten to use lists instead of if-statements.
|
||||||
#
|
#
|
||||||
|
|
||||||
|
ifdef CONFIG_GKI_HACKS_TO_FIX
|
||||||
subdir-ccflags-y += -DANDROID_GKI_VFS_EXPORT_ONLY=VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
|
subdir-ccflags-y += -DANDROID_GKI_VFS_EXPORT_ONLY=VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
|
||||||
|
else
|
||||||
|
subdir-ccflags-y += -DANDROID_GKI_VFS_EXPORT_ONLY=""
|
||||||
|
endif
|
||||||
|
|
||||||
obj-y := open.o read_write.o file_table.o super.o \
|
obj-y := open.o read_write.o file_table.o super.o \
|
||||||
char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \
|
char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
# SPDX-License-Identifier: GPL-2.0-only
|
# SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
|
||||||
obj-$(CONFIG_EROFS_FS) += erofs.o
|
obj-$(CONFIG_EROFS_FS) += erofs.o
|
||||||
erofs-objs := super.o inode.o data.o namei.o dir.o utils.o sysfs.o
|
erofs-objs := super.o inode.o data.o namei.o dir.o sysfs.o
|
||||||
erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
|
erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
|
||||||
erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o pcpubuf.o
|
erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o zutil.o
|
||||||
erofs-$(CONFIG_EROFS_FS_ZIP_LZMA) += decompressor_lzma.o
|
erofs-$(CONFIG_EROFS_FS_ZIP_LZMA) += decompressor_lzma.o
|
||||||
erofs-$(CONFIG_EROFS_FS_ZIP_DEFLATE) += decompressor_deflate.o
|
erofs-$(CONFIG_EROFS_FS_ZIP_DEFLATE) += decompressor_deflate.o
|
||||||
erofs-$(CONFIG_EROFS_FS_ONDEMAND) += fscache.o
|
erofs-$(CONFIG_EROFS_FS_ONDEMAND) += fscache.o
|
||||||
|
|
|
@ -11,13 +11,12 @@
|
||||||
struct z_erofs_decompress_req {
|
struct z_erofs_decompress_req {
|
||||||
struct super_block *sb;
|
struct super_block *sb;
|
||||||
struct page **in, **out;
|
struct page **in, **out;
|
||||||
|
|
||||||
unsigned short pageofs_in, pageofs_out;
|
unsigned short pageofs_in, pageofs_out;
|
||||||
unsigned int inputsize, outputsize;
|
unsigned int inputsize, outputsize;
|
||||||
|
|
||||||
/* indicate the algorithm will be used for decompression */
|
unsigned int alg; /* the algorithm for decompression */
|
||||||
unsigned int alg;
|
|
||||||
bool inplace_io, partial_decoding, fillgaps;
|
bool inplace_io, partial_decoding, fillgaps;
|
||||||
|
gfp_t gfp; /* allocation flags for extra temporary buffers */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct z_erofs_decompressor {
|
struct z_erofs_decompressor {
|
||||||
|
|
|
@ -55,7 +55,7 @@ static int z_erofs_load_lz4_config(struct super_block *sb,
|
||||||
sbi->lz4.max_distance_pages = distance ?
|
sbi->lz4.max_distance_pages = distance ?
|
||||||
DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
|
DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
|
||||||
LZ4_MAX_DISTANCE_PAGES;
|
LZ4_MAX_DISTANCE_PAGES;
|
||||||
return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
|
return z_erofs_gbuf_growsize(sbi->lz4.max_pclusterblks);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -112,8 +112,9 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
|
||||||
victim = availables[--top];
|
victim = availables[--top];
|
||||||
get_page(victim);
|
get_page(victim);
|
||||||
} else {
|
} else {
|
||||||
victim = erofs_allocpage(pagepool,
|
victim = __erofs_allocpage(pagepool, rq->gfp, true);
|
||||||
GFP_KERNEL | __GFP_NOFAIL);
|
if (!victim)
|
||||||
|
return -ENOMEM;
|
||||||
set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
|
set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
|
||||||
}
|
}
|
||||||
rq->out[i] = victim;
|
rq->out[i] = victim;
|
||||||
|
@ -159,7 +160,7 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
|
||||||
docopy:
|
docopy:
|
||||||
/* Or copy compressed data which can be overlapped to per-CPU buffer */
|
/* Or copy compressed data which can be overlapped to per-CPU buffer */
|
||||||
in = rq->in;
|
in = rq->in;
|
||||||
src = erofs_get_pcpubuf(ctx->inpages);
|
src = z_erofs_get_gbuf(ctx->inpages);
|
||||||
if (!src) {
|
if (!src) {
|
||||||
DBG_BUGON(1);
|
DBG_BUGON(1);
|
||||||
kunmap_local(inpage);
|
kunmap_local(inpage);
|
||||||
|
@ -266,7 +267,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
|
||||||
} else if (maptype == 1) {
|
} else if (maptype == 1) {
|
||||||
vm_unmap_ram(src, ctx->inpages);
|
vm_unmap_ram(src, ctx->inpages);
|
||||||
} else if (maptype == 2) {
|
} else if (maptype == 2) {
|
||||||
erofs_put_pcpubuf(src);
|
z_erofs_put_gbuf(src);
|
||||||
} else if (maptype != 3) {
|
} else if (maptype != 3) {
|
||||||
DBG_BUGON(1);
|
DBG_BUGON(1);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
@ -329,7 +330,8 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
|
||||||
unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
|
unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
|
||||||
u8 *kin;
|
u8 *kin;
|
||||||
|
|
||||||
DBG_BUGON(rq->outputsize > rq->inputsize);
|
if (rq->outputsize > rq->inputsize)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
|
if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
|
||||||
cur = bs - (rq->pageofs_out & (bs - 1));
|
cur = bs - (rq->pageofs_out & (bs - 1));
|
||||||
pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
|
pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
|
||||||
|
|
|
@ -99,7 +99,7 @@ failed:
|
||||||
}
|
}
|
||||||
|
|
||||||
int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
|
int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
|
||||||
struct page **pagepool)
|
struct page **pgpl)
|
||||||
{
|
{
|
||||||
const unsigned int nrpages_out =
|
const unsigned int nrpages_out =
|
||||||
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
|
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
|
||||||
|
@ -162,8 +162,12 @@ again:
|
||||||
strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs);
|
strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs);
|
||||||
outsz -= strm->z.avail_out;
|
outsz -= strm->z.avail_out;
|
||||||
if (!rq->out[no]) {
|
if (!rq->out[no]) {
|
||||||
rq->out[no] = erofs_allocpage(pagepool,
|
rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
|
||||||
GFP_KERNEL | __GFP_NOFAIL);
|
if (!rq->out[no]) {
|
||||||
|
kout = NULL;
|
||||||
|
err = -ENOMEM;
|
||||||
|
break;
|
||||||
|
}
|
||||||
set_page_private(rq->out[no],
|
set_page_private(rq->out[no],
|
||||||
Z_EROFS_SHORTLIVED_PAGE);
|
Z_EROFS_SHORTLIVED_PAGE);
|
||||||
}
|
}
|
||||||
|
@ -215,8 +219,11 @@ again:
|
||||||
|
|
||||||
DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb),
|
DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb),
|
||||||
rq->in[j]));
|
rq->in[j]));
|
||||||
tmppage = erofs_allocpage(pagepool,
|
tmppage = erofs_allocpage(pgpl, rq->gfp);
|
||||||
GFP_KERNEL | __GFP_NOFAIL);
|
if (!tmppage) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto failed;
|
||||||
|
}
|
||||||
set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
|
set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
|
||||||
copy_highpage(tmppage, rq->in[j]);
|
copy_highpage(tmppage, rq->in[j]);
|
||||||
rq->in[j] = tmppage;
|
rq->in[j] = tmppage;
|
||||||
|
@ -234,7 +241,7 @@ again:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
failed:
|
||||||
if (zlib_inflateEnd(&strm->z) != Z_OK && !err)
|
if (zlib_inflateEnd(&strm->z) != Z_OK && !err)
|
||||||
err = -EIO;
|
err = -EIO;
|
||||||
if (kout)
|
if (kout)
|
||||||
|
|
|
@ -151,7 +151,7 @@ again:
|
||||||
}
|
}
|
||||||
|
|
||||||
int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
|
int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
|
||||||
struct page **pagepool)
|
struct page **pgpl)
|
||||||
{
|
{
|
||||||
const unsigned int nrpages_out =
|
const unsigned int nrpages_out =
|
||||||
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
|
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
|
||||||
|
@ -218,8 +218,11 @@ again:
|
||||||
PAGE_SIZE - pageofs);
|
PAGE_SIZE - pageofs);
|
||||||
outlen -= strm->buf.out_size;
|
outlen -= strm->buf.out_size;
|
||||||
if (!rq->out[no] && rq->fillgaps) { /* deduped */
|
if (!rq->out[no] && rq->fillgaps) { /* deduped */
|
||||||
rq->out[no] = erofs_allocpage(pagepool,
|
rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
|
||||||
GFP_KERNEL | __GFP_NOFAIL);
|
if (!rq->out[no]) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
break;
|
||||||
|
}
|
||||||
set_page_private(rq->out[no],
|
set_page_private(rq->out[no],
|
||||||
Z_EROFS_SHORTLIVED_PAGE);
|
Z_EROFS_SHORTLIVED_PAGE);
|
||||||
}
|
}
|
||||||
|
@ -261,8 +264,11 @@ again:
|
||||||
|
|
||||||
DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb),
|
DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb),
|
||||||
rq->in[j]));
|
rq->in[j]));
|
||||||
tmppage = erofs_allocpage(pagepool,
|
tmppage = erofs_allocpage(pgpl, rq->gfp);
|
||||||
GFP_KERNEL | __GFP_NOFAIL);
|
if (!tmppage) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto failed;
|
||||||
|
}
|
||||||
set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
|
set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
|
||||||
copy_highpage(tmppage, rq->in[j]);
|
copy_highpage(tmppage, rq->in[j]);
|
||||||
rq->in[j] = tmppage;
|
rq->in[j] = tmppage;
|
||||||
|
@ -280,6 +286,7 @@ again:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
failed:
|
||||||
if (no < nrpages_out && strm->buf.out)
|
if (no < nrpages_out && strm->buf.out)
|
||||||
kunmap(rq->out[no]);
|
kunmap(rq->out[no]);
|
||||||
if (ni < nrpages_in)
|
if (ni < nrpages_in)
|
||||||
|
|
|
@ -438,7 +438,11 @@ void erofs_unregister_sysfs(struct super_block *sb);
|
||||||
int __init erofs_init_sysfs(void);
|
int __init erofs_init_sysfs(void);
|
||||||
void erofs_exit_sysfs(void);
|
void erofs_exit_sysfs(void);
|
||||||
|
|
||||||
struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp);
|
struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv);
|
||||||
|
static inline struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
|
||||||
|
{
|
||||||
|
return __erofs_allocpage(pagepool, gfp, false);
|
||||||
|
}
|
||||||
static inline void erofs_pagepool_add(struct page **pagepool, struct page *page)
|
static inline void erofs_pagepool_add(struct page **pagepool, struct page *page)
|
||||||
{
|
{
|
||||||
set_page_private(page, (unsigned long)*pagepool);
|
set_page_private(page, (unsigned long)*pagepool);
|
||||||
|
@ -463,11 +467,11 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
|
||||||
struct erofs_workgroup *egrp);
|
struct erofs_workgroup *egrp);
|
||||||
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
|
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
|
||||||
int flags);
|
int flags);
|
||||||
void *erofs_get_pcpubuf(unsigned int requiredpages);
|
void *z_erofs_get_gbuf(unsigned int requiredpages);
|
||||||
void erofs_put_pcpubuf(void *ptr);
|
void z_erofs_put_gbuf(void *ptr);
|
||||||
int erofs_pcpubuf_growsize(unsigned int nrpages);
|
int z_erofs_gbuf_growsize(unsigned int nrpages);
|
||||||
void __init erofs_pcpubuf_init(void);
|
int __init z_erofs_gbuf_init(void);
|
||||||
void erofs_pcpubuf_exit(void);
|
void z_erofs_gbuf_exit(void);
|
||||||
int erofs_init_managed_cache(struct super_block *sb);
|
int erofs_init_managed_cache(struct super_block *sb);
|
||||||
int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb);
|
int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb);
|
||||||
#else
|
#else
|
||||||
|
@ -477,8 +481,8 @@ static inline int erofs_init_shrinker(void) { return 0; }
|
||||||
static inline void erofs_exit_shrinker(void) {}
|
static inline void erofs_exit_shrinker(void) {}
|
||||||
static inline int z_erofs_init_zip_subsystem(void) { return 0; }
|
static inline int z_erofs_init_zip_subsystem(void) { return 0; }
|
||||||
static inline void z_erofs_exit_zip_subsystem(void) {}
|
static inline void z_erofs_exit_zip_subsystem(void) {}
|
||||||
static inline void erofs_pcpubuf_init(void) {}
|
static inline int z_erofs_gbuf_init(void) { return 0; }
|
||||||
static inline void erofs_pcpubuf_exit(void) {}
|
static inline void z_erofs_gbuf_exit(void) {}
|
||||||
static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
|
static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
|
||||||
#endif /* !CONFIG_EROFS_FS_ZIP */
|
#endif /* !CONFIG_EROFS_FS_ZIP */
|
||||||
|
|
||||||
|
|
|
@ -1,148 +0,0 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0-only
|
|
||||||
/*
|
|
||||||
* Copyright (C) Gao Xiang <xiang@kernel.org>
|
|
||||||
*
|
|
||||||
* For low-latency decompression algorithms (e.g. lz4), reserve consecutive
|
|
||||||
* per-CPU virtual memory (in pages) in advance to store such inplace I/O
|
|
||||||
* data if inplace decompression is failed (due to unmet inplace margin for
|
|
||||||
* example).
|
|
||||||
*/
|
|
||||||
#include "internal.h"
|
|
||||||
|
|
||||||
struct erofs_pcpubuf {
|
|
||||||
raw_spinlock_t lock;
|
|
||||||
void *ptr;
|
|
||||||
struct page **pages;
|
|
||||||
unsigned int nrpages;
|
|
||||||
};
|
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct erofs_pcpubuf, erofs_pcb);
|
|
||||||
|
|
||||||
void *erofs_get_pcpubuf(unsigned int requiredpages)
|
|
||||||
__acquires(pcb->lock)
|
|
||||||
{
|
|
||||||
struct erofs_pcpubuf *pcb = &get_cpu_var(erofs_pcb);
|
|
||||||
|
|
||||||
raw_spin_lock(&pcb->lock);
|
|
||||||
/* check if the per-CPU buffer is too small */
|
|
||||||
if (requiredpages > pcb->nrpages) {
|
|
||||||
raw_spin_unlock(&pcb->lock);
|
|
||||||
put_cpu_var(erofs_pcb);
|
|
||||||
/* (for sparse checker) pretend pcb->lock is still taken */
|
|
||||||
__acquire(pcb->lock);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
return pcb->ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void erofs_put_pcpubuf(void *ptr) __releases(pcb->lock)
|
|
||||||
{
|
|
||||||
struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, smp_processor_id());
|
|
||||||
|
|
||||||
DBG_BUGON(pcb->ptr != ptr);
|
|
||||||
raw_spin_unlock(&pcb->lock);
|
|
||||||
put_cpu_var(erofs_pcb);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* the next step: support per-CPU page buffers hotplug */
|
|
||||||
int erofs_pcpubuf_growsize(unsigned int nrpages)
|
|
||||||
{
|
|
||||||
static DEFINE_MUTEX(pcb_resize_mutex);
|
|
||||||
static unsigned int pcb_nrpages;
|
|
||||||
struct page *pagepool = NULL;
|
|
||||||
int delta, cpu, ret, i;
|
|
||||||
|
|
||||||
mutex_lock(&pcb_resize_mutex);
|
|
||||||
delta = nrpages - pcb_nrpages;
|
|
||||||
ret = 0;
|
|
||||||
/* avoid shrinking pcpubuf, since no idea how many fses rely on */
|
|
||||||
if (delta <= 0)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
|
||||||
struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
|
|
||||||
struct page **pages, **oldpages;
|
|
||||||
void *ptr, *old_ptr;
|
|
||||||
|
|
||||||
pages = kmalloc_array(nrpages, sizeof(*pages), GFP_KERNEL);
|
|
||||||
if (!pages) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < nrpages; ++i) {
|
|
||||||
pages[i] = erofs_allocpage(&pagepool, GFP_KERNEL);
|
|
||||||
if (!pages[i]) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
oldpages = pages;
|
|
||||||
goto free_pagearray;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ptr = vmap(pages, nrpages, VM_MAP, PAGE_KERNEL);
|
|
||||||
if (!ptr) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
oldpages = pages;
|
|
||||||
goto free_pagearray;
|
|
||||||
}
|
|
||||||
raw_spin_lock(&pcb->lock);
|
|
||||||
old_ptr = pcb->ptr;
|
|
||||||
pcb->ptr = ptr;
|
|
||||||
oldpages = pcb->pages;
|
|
||||||
pcb->pages = pages;
|
|
||||||
i = pcb->nrpages;
|
|
||||||
pcb->nrpages = nrpages;
|
|
||||||
raw_spin_unlock(&pcb->lock);
|
|
||||||
|
|
||||||
if (!oldpages) {
|
|
||||||
DBG_BUGON(old_ptr);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (old_ptr)
|
|
||||||
vunmap(old_ptr);
|
|
||||||
free_pagearray:
|
|
||||||
while (i)
|
|
||||||
erofs_pagepool_add(&pagepool, oldpages[--i]);
|
|
||||||
kfree(oldpages);
|
|
||||||
if (ret)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
pcb_nrpages = nrpages;
|
|
||||||
erofs_release_pages(&pagepool);
|
|
||||||
out:
|
|
||||||
mutex_unlock(&pcb_resize_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init erofs_pcpubuf_init(void)
|
|
||||||
{
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
|
||||||
struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
|
|
||||||
|
|
||||||
raw_spin_lock_init(&pcb->lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void erofs_pcpubuf_exit(void)
|
|
||||||
{
|
|
||||||
int cpu, i;
|
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
|
||||||
struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
|
|
||||||
|
|
||||||
if (pcb->ptr) {
|
|
||||||
vunmap(pcb->ptr);
|
|
||||||
pcb->ptr = NULL;
|
|
||||||
}
|
|
||||||
if (!pcb->pages)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
for (i = 0; i < pcb->nrpages; ++i)
|
|
||||||
if (pcb->pages[i])
|
|
||||||
put_page(pcb->pages[i]);
|
|
||||||
kfree(pcb->pages);
|
|
||||||
pcb->pages = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -856,7 +856,10 @@ static int __init erofs_module_init(void)
|
||||||
if (err)
|
if (err)
|
||||||
goto deflate_err;
|
goto deflate_err;
|
||||||
|
|
||||||
erofs_pcpubuf_init();
|
err = z_erofs_gbuf_init();
|
||||||
|
if (err)
|
||||||
|
goto gbuf_err;
|
||||||
|
|
||||||
err = z_erofs_init_zip_subsystem();
|
err = z_erofs_init_zip_subsystem();
|
||||||
if (err)
|
if (err)
|
||||||
goto zip_err;
|
goto zip_err;
|
||||||
|
@ -876,6 +879,8 @@ fs_err:
|
||||||
sysfs_err:
|
sysfs_err:
|
||||||
z_erofs_exit_zip_subsystem();
|
z_erofs_exit_zip_subsystem();
|
||||||
zip_err:
|
zip_err:
|
||||||
|
z_erofs_gbuf_exit();
|
||||||
|
gbuf_err:
|
||||||
z_erofs_deflate_exit();
|
z_erofs_deflate_exit();
|
||||||
deflate_err:
|
deflate_err:
|
||||||
z_erofs_lzma_exit();
|
z_erofs_lzma_exit();
|
||||||
|
@ -899,7 +904,7 @@ static void __exit erofs_module_exit(void)
|
||||||
z_erofs_lzma_exit();
|
z_erofs_lzma_exit();
|
||||||
erofs_exit_shrinker();
|
erofs_exit_shrinker();
|
||||||
kmem_cache_destroy(erofs_inode_cachep);
|
kmem_cache_destroy(erofs_inode_cachep);
|
||||||
erofs_pcpubuf_exit();
|
z_erofs_gbuf_exit();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||||
|
|
|
@ -82,6 +82,9 @@ struct z_erofs_pcluster {
|
||||||
/* L: indicate several pageofs_outs or not */
|
/* L: indicate several pageofs_outs or not */
|
||||||
bool multibases;
|
bool multibases;
|
||||||
|
|
||||||
|
/* L: whether extra buffer allocations are best-effort */
|
||||||
|
bool besteffort;
|
||||||
|
|
||||||
/* A: compressed bvecs (can be cached or inplaced pages) */
|
/* A: compressed bvecs (can be cached or inplaced pages) */
|
||||||
struct z_erofs_bvec compressed_bvecs[];
|
struct z_erofs_bvec compressed_bvecs[];
|
||||||
};
|
};
|
||||||
|
@ -960,7 +963,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
|
static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
|
||||||
struct page *page)
|
struct page *page, bool ra)
|
||||||
{
|
{
|
||||||
struct inode *const inode = fe->inode;
|
struct inode *const inode = fe->inode;
|
||||||
struct erofs_map_blocks *const map = &fe->map;
|
struct erofs_map_blocks *const map = &fe->map;
|
||||||
|
@ -1010,6 +1013,7 @@ repeat:
|
||||||
err = z_erofs_pcluster_begin(fe);
|
err = z_erofs_pcluster_begin(fe);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
fe->pcl->besteffort |= !ra;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1285,6 +1289,9 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
|
||||||
.inplace_io = overlapped,
|
.inplace_io = overlapped,
|
||||||
.partial_decoding = pcl->partial,
|
.partial_decoding = pcl->partial,
|
||||||
.fillgaps = pcl->multibases,
|
.fillgaps = pcl->multibases,
|
||||||
|
.gfp = pcl->besteffort ?
|
||||||
|
GFP_KERNEL | __GFP_NOFAIL :
|
||||||
|
GFP_NOWAIT | __GFP_NORETRY
|
||||||
}, be->pagepool);
|
}, be->pagepool);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -1328,6 +1335,7 @@ out:
|
||||||
pcl->length = 0;
|
pcl->length = 0;
|
||||||
pcl->partial = true;
|
pcl->partial = true;
|
||||||
pcl->multibases = false;
|
pcl->multibases = false;
|
||||||
|
pcl->besteffort = false;
|
||||||
pcl->bvset.nextpage = NULL;
|
pcl->bvset.nextpage = NULL;
|
||||||
pcl->vcnt = 0;
|
pcl->vcnt = 0;
|
||||||
|
|
||||||
|
@ -1797,7 +1805,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
|
||||||
if (PageUptodate(page))
|
if (PageUptodate(page))
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
else
|
else
|
||||||
(void)z_erofs_do_read_page(f, page);
|
(void)z_erofs_do_read_page(f, page, !!rac);
|
||||||
put_page(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1818,7 +1826,7 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
|
||||||
f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
|
f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
|
||||||
|
|
||||||
z_erofs_pcluster_readmore(&f, NULL, true);
|
z_erofs_pcluster_readmore(&f, NULL, true);
|
||||||
err = z_erofs_do_read_page(&f, &folio->page);
|
err = z_erofs_do_read_page(&f, &folio->page, false);
|
||||||
z_erofs_pcluster_readmore(&f, NULL, false);
|
z_erofs_pcluster_readmore(&f, NULL, false);
|
||||||
z_erofs_pcluster_end(&f);
|
z_erofs_pcluster_end(&f);
|
||||||
|
|
||||||
|
@ -1859,7 +1867,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
|
||||||
folio = head;
|
folio = head;
|
||||||
head = folio_get_private(folio);
|
head = folio_get_private(folio);
|
||||||
|
|
||||||
err = z_erofs_do_read_page(&f, &folio->page);
|
err = z_erofs_do_read_page(&f, &folio->page, true);
|
||||||
if (err && err != -EINTR)
|
if (err && err != -EINTR)
|
||||||
erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
|
erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
|
||||||
folio->index, EROFS_I(inode)->nid);
|
folio->index, EROFS_I(inode)->nid);
|
||||||
|
|
|
@ -723,6 +723,8 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
|
||||||
|
|
||||||
err = z_erofs_do_map_blocks(inode, map, flags);
|
err = z_erofs_do_map_blocks(inode, map, flags);
|
||||||
out:
|
out:
|
||||||
|
if (err)
|
||||||
|
map->m_llen = 0;
|
||||||
trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
|
trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,16 +5,185 @@
|
||||||
*/
|
*/
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
|
struct z_erofs_gbuf {
|
||||||
|
spinlock_t lock;
|
||||||
|
void *ptr;
|
||||||
|
struct page **pages;
|
||||||
|
unsigned int nrpages;
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct z_erofs_gbuf *z_erofs_gbufpool, *z_erofs_rsvbuf;
|
||||||
|
static unsigned int z_erofs_gbuf_count, z_erofs_gbuf_nrpages,
|
||||||
|
z_erofs_rsv_nrpages;
|
||||||
|
|
||||||
|
module_param_named(global_buffers, z_erofs_gbuf_count, uint, 0444);
|
||||||
|
module_param_named(reserved_pages, z_erofs_rsv_nrpages, uint, 0444);
|
||||||
|
|
||||||
|
static atomic_long_t erofs_global_shrink_cnt; /* for all mounted instances */
|
||||||
|
/* protected by 'erofs_sb_list_lock' */
|
||||||
|
static unsigned int shrinker_run_no;
|
||||||
|
|
||||||
|
/* protects the mounted 'erofs_sb_list' */
|
||||||
|
static DEFINE_SPINLOCK(erofs_sb_list_lock);
|
||||||
|
static LIST_HEAD(erofs_sb_list);
|
||||||
|
|
||||||
|
static unsigned int z_erofs_gbuf_id(void)
|
||||||
|
{
|
||||||
|
return raw_smp_processor_id() % z_erofs_gbuf_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *z_erofs_get_gbuf(unsigned int requiredpages)
|
||||||
|
__acquires(gbuf->lock)
|
||||||
|
{
|
||||||
|
struct z_erofs_gbuf *gbuf;
|
||||||
|
|
||||||
|
gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()];
|
||||||
|
spin_lock(&gbuf->lock);
|
||||||
|
/* check if the buffer is too small */
|
||||||
|
if (requiredpages > gbuf->nrpages) {
|
||||||
|
spin_unlock(&gbuf->lock);
|
||||||
|
/* (for sparse checker) pretend gbuf->lock is still taken */
|
||||||
|
__acquire(gbuf->lock);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return gbuf->ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void z_erofs_put_gbuf(void *ptr) __releases(gbuf->lock)
|
||||||
|
{
|
||||||
|
struct z_erofs_gbuf *gbuf;
|
||||||
|
|
||||||
|
gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()];
|
||||||
|
DBG_BUGON(gbuf->ptr != ptr);
|
||||||
|
spin_unlock(&gbuf->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
int z_erofs_gbuf_growsize(unsigned int nrpages)
|
||||||
|
{
|
||||||
|
static DEFINE_MUTEX(gbuf_resize_mutex);
|
||||||
|
struct page **tmp_pages = NULL;
|
||||||
|
struct z_erofs_gbuf *gbuf;
|
||||||
|
void *ptr, *old_ptr;
|
||||||
|
int last, i, j;
|
||||||
|
|
||||||
|
mutex_lock(&gbuf_resize_mutex);
|
||||||
|
/* avoid shrinking gbufs, since no idea how many fses rely on */
|
||||||
|
if (nrpages <= z_erofs_gbuf_nrpages) {
|
||||||
|
mutex_unlock(&gbuf_resize_mutex);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < z_erofs_gbuf_count; ++i) {
|
||||||
|
gbuf = &z_erofs_gbufpool[i];
|
||||||
|
tmp_pages = kcalloc(nrpages, sizeof(*tmp_pages), GFP_KERNEL);
|
||||||
|
if (!tmp_pages)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
for (j = 0; j < gbuf->nrpages; ++j)
|
||||||
|
tmp_pages[j] = gbuf->pages[j];
|
||||||
|
do {
|
||||||
|
last = j;
|
||||||
|
j = alloc_pages_bulk_array(GFP_KERNEL, nrpages,
|
||||||
|
tmp_pages);
|
||||||
|
if (last == j)
|
||||||
|
goto out;
|
||||||
|
} while (j != nrpages);
|
||||||
|
|
||||||
|
ptr = vmap(tmp_pages, nrpages, VM_MAP, PAGE_KERNEL);
|
||||||
|
if (!ptr)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
spin_lock(&gbuf->lock);
|
||||||
|
kfree(gbuf->pages);
|
||||||
|
gbuf->pages = tmp_pages;
|
||||||
|
old_ptr = gbuf->ptr;
|
||||||
|
gbuf->ptr = ptr;
|
||||||
|
gbuf->nrpages = nrpages;
|
||||||
|
spin_unlock(&gbuf->lock);
|
||||||
|
if (old_ptr)
|
||||||
|
vunmap(old_ptr);
|
||||||
|
}
|
||||||
|
z_erofs_gbuf_nrpages = nrpages;
|
||||||
|
out:
|
||||||
|
if (i < z_erofs_gbuf_count && tmp_pages) {
|
||||||
|
for (j = 0; j < nrpages; ++j)
|
||||||
|
if (tmp_pages[j] && tmp_pages[j] != gbuf->pages[j])
|
||||||
|
__free_page(tmp_pages[j]);
|
||||||
|
kfree(tmp_pages);
|
||||||
|
}
|
||||||
|
mutex_unlock(&gbuf_resize_mutex);
|
||||||
|
return i < z_erofs_gbuf_count ? -ENOMEM : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int __init z_erofs_gbuf_init(void)
|
||||||
|
{
|
||||||
|
unsigned int i, total = num_possible_cpus();
|
||||||
|
|
||||||
|
if (z_erofs_gbuf_count)
|
||||||
|
total = min(z_erofs_gbuf_count, total);
|
||||||
|
z_erofs_gbuf_count = total;
|
||||||
|
|
||||||
|
/* The last (special) global buffer is the reserved buffer */
|
||||||
|
total += !!z_erofs_rsv_nrpages;
|
||||||
|
|
||||||
|
z_erofs_gbufpool = kcalloc(total, sizeof(*z_erofs_gbufpool),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!z_erofs_gbufpool)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
if (z_erofs_rsv_nrpages) {
|
||||||
|
z_erofs_rsvbuf = &z_erofs_gbufpool[total - 1];
|
||||||
|
z_erofs_rsvbuf->pages = kcalloc(z_erofs_rsv_nrpages,
|
||||||
|
sizeof(*z_erofs_rsvbuf->pages), GFP_KERNEL);
|
||||||
|
if (!z_erofs_rsvbuf->pages) {
|
||||||
|
z_erofs_rsvbuf = NULL;
|
||||||
|
z_erofs_rsv_nrpages = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (i = 0; i < total; ++i)
|
||||||
|
spin_lock_init(&z_erofs_gbufpool[i].lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void z_erofs_gbuf_exit(void)
|
||||||
|
{
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
for (i = 0; i < z_erofs_gbuf_count + (!!z_erofs_rsvbuf); ++i) {
|
||||||
|
struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i];
|
||||||
|
|
||||||
|
if (gbuf->ptr) {
|
||||||
|
vunmap(gbuf->ptr);
|
||||||
|
gbuf->ptr = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!gbuf->pages)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
for (j = 0; j < gbuf->nrpages; ++j)
|
||||||
|
if (gbuf->pages[j])
|
||||||
|
put_page(gbuf->pages[j]);
|
||||||
|
kfree(gbuf->pages);
|
||||||
|
gbuf->pages = NULL;
|
||||||
|
}
|
||||||
|
kfree(z_erofs_gbufpool);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv)
|
||||||
{
|
{
|
||||||
struct page *page = *pagepool;
|
struct page *page = *pagepool;
|
||||||
|
|
||||||
if (page) {
|
if (page) {
|
||||||
DBG_BUGON(page_ref_count(page) != 1);
|
|
||||||
*pagepool = (struct page *)page_private(page);
|
*pagepool = (struct page *)page_private(page);
|
||||||
} else {
|
} else if (tryrsv && z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages) {
|
||||||
page = alloc_page(gfp);
|
spin_lock(&z_erofs_rsvbuf->lock);
|
||||||
|
if (z_erofs_rsvbuf->nrpages)
|
||||||
|
page = z_erofs_rsvbuf->pages[--z_erofs_rsvbuf->nrpages];
|
||||||
|
spin_unlock(&z_erofs_rsvbuf->lock);
|
||||||
}
|
}
|
||||||
|
if (!page)
|
||||||
|
page = alloc_page(gfp);
|
||||||
|
DBG_BUGON(page && page_ref_count(page) != 1);
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,14 +193,22 @@ void erofs_release_pages(struct page **pagepool)
|
||||||
struct page *page = *pagepool;
|
struct page *page = *pagepool;
|
||||||
|
|
||||||
*pagepool = (struct page *)page_private(page);
|
*pagepool = (struct page *)page_private(page);
|
||||||
|
/* try to fill reserved global pool first */
|
||||||
|
if (z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages <
|
||||||
|
z_erofs_rsv_nrpages) {
|
||||||
|
spin_lock(&z_erofs_rsvbuf->lock);
|
||||||
|
if (z_erofs_rsvbuf->nrpages < z_erofs_rsv_nrpages) {
|
||||||
|
z_erofs_rsvbuf->pages[z_erofs_rsvbuf->nrpages++]
|
||||||
|
= page;
|
||||||
|
spin_unlock(&z_erofs_rsvbuf->lock);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
spin_unlock(&z_erofs_rsvbuf->lock);
|
||||||
|
}
|
||||||
put_page(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_EROFS_FS_ZIP
|
|
||||||
/* global shrink count (for all mounted EROFS instances) */
|
|
||||||
static atomic_long_t erofs_global_shrink_cnt;
|
|
||||||
|
|
||||||
static bool erofs_workgroup_get(struct erofs_workgroup *grp)
|
static bool erofs_workgroup_get(struct erofs_workgroup *grp)
|
||||||
{
|
{
|
||||||
if (lockref_get_not_zero(&grp->lockref))
|
if (lockref_get_not_zero(&grp->lockref))
|
||||||
|
@ -171,13 +348,6 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
|
||||||
return freed;
|
return freed;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* protected by 'erofs_sb_list_lock' */
|
|
||||||
static unsigned int shrinker_run_no;
|
|
||||||
|
|
||||||
/* protects the mounted 'erofs_sb_list' */
|
|
||||||
static DEFINE_SPINLOCK(erofs_sb_list_lock);
|
|
||||||
static LIST_HEAD(erofs_sb_list);
|
|
||||||
|
|
||||||
void erofs_shrinker_register(struct super_block *sb)
|
void erofs_shrinker_register(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct erofs_sb_info *sbi = EROFS_SB(sb);
|
struct erofs_sb_info *sbi = EROFS_SB(sb);
|
||||||
|
@ -279,4 +449,3 @@ void erofs_exit_shrinker(void)
|
||||||
{
|
{
|
||||||
unregister_shrinker(&erofs_shrinker_info);
|
unregister_shrinker(&erofs_shrinker_info);
|
||||||
}
|
}
|
||||||
#endif /* !CONFIG_EROFS_FS_ZIP */
|
|
|
@ -1100,7 +1100,7 @@ retry:
|
||||||
struct bio *bio = NULL;
|
struct bio *bio = NULL;
|
||||||
|
|
||||||
ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
|
ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
|
||||||
&last_block_in_bio, false, true);
|
&last_block_in_bio, NULL, true);
|
||||||
f2fs_put_rpages(cc);
|
f2fs_put_rpages(cc);
|
||||||
f2fs_destroy_compress_ctx(cc, true);
|
f2fs_destroy_compress_ctx(cc, true);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "segment.h"
|
#include "segment.h"
|
||||||
#include "iostat.h"
|
#include "iostat.h"
|
||||||
#include <trace/events/f2fs.h>
|
#include <trace/events/f2fs.h>
|
||||||
|
#include <trace/hooks/blk.h>
|
||||||
|
|
||||||
#define NUM_PREALLOC_POST_READ_CTXS 128
|
#define NUM_PREALLOC_POST_READ_CTXS 128
|
||||||
|
|
||||||
|
@ -2072,12 +2073,20 @@ static inline loff_t f2fs_readpage_limit(struct inode *inode)
|
||||||
return i_size_read(inode);
|
return i_size_read(inode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline blk_opf_t f2fs_ra_op_flags(struct readahead_control *rac)
|
||||||
|
{
|
||||||
|
blk_opf_t op_flag = rac ? REQ_RAHEAD : 0;
|
||||||
|
|
||||||
|
trace_android_vh_f2fs_ra_op_flags(&op_flag, rac);
|
||||||
|
return op_flag;
|
||||||
|
}
|
||||||
|
|
||||||
static int f2fs_read_single_page(struct inode *inode, struct folio *folio,
|
static int f2fs_read_single_page(struct inode *inode, struct folio *folio,
|
||||||
unsigned nr_pages,
|
unsigned nr_pages,
|
||||||
struct f2fs_map_blocks *map,
|
struct f2fs_map_blocks *map,
|
||||||
struct bio **bio_ret,
|
struct bio **bio_ret,
|
||||||
sector_t *last_block_in_bio,
|
sector_t *last_block_in_bio,
|
||||||
bool is_readahead)
|
struct readahead_control *rac)
|
||||||
{
|
{
|
||||||
struct bio *bio = *bio_ret;
|
struct bio *bio = *bio_ret;
|
||||||
const unsigned blocksize = blks_to_bytes(inode, 1);
|
const unsigned blocksize = blks_to_bytes(inode, 1);
|
||||||
|
@ -2159,7 +2168,7 @@ submit_and_realloc:
|
||||||
}
|
}
|
||||||
if (bio == NULL) {
|
if (bio == NULL) {
|
||||||
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
|
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
|
||||||
is_readahead ? REQ_RAHEAD : 0, index,
|
f2fs_ra_op_flags(rac), index,
|
||||||
false);
|
false);
|
||||||
if (IS_ERR(bio)) {
|
if (IS_ERR(bio)) {
|
||||||
ret = PTR_ERR(bio);
|
ret = PTR_ERR(bio);
|
||||||
|
@ -2196,7 +2205,7 @@ out:
|
||||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||||
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
||||||
unsigned nr_pages, sector_t *last_block_in_bio,
|
unsigned nr_pages, sector_t *last_block_in_bio,
|
||||||
bool is_readahead, bool for_write)
|
struct readahead_control *rac, bool for_write)
|
||||||
{
|
{
|
||||||
struct dnode_of_data dn;
|
struct dnode_of_data dn;
|
||||||
struct inode *inode = cc->inode;
|
struct inode *inode = cc->inode;
|
||||||
|
@ -2319,7 +2328,7 @@ submit_and_realloc:
|
||||||
|
|
||||||
if (!bio) {
|
if (!bio) {
|
||||||
bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
|
bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
|
||||||
is_readahead ? REQ_RAHEAD : 0,
|
f2fs_ra_op_flags(rac),
|
||||||
page->index, for_write);
|
page->index, for_write);
|
||||||
if (IS_ERR(bio)) {
|
if (IS_ERR(bio)) {
|
||||||
ret = PTR_ERR(bio);
|
ret = PTR_ERR(bio);
|
||||||
|
@ -2417,7 +2426,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
|
||||||
ret = f2fs_read_multi_pages(&cc, &bio,
|
ret = f2fs_read_multi_pages(&cc, &bio,
|
||||||
max_nr_pages,
|
max_nr_pages,
|
||||||
&last_block_in_bio,
|
&last_block_in_bio,
|
||||||
rac != NULL, false);
|
rac, false);
|
||||||
f2fs_destroy_compress_ctx(&cc, false);
|
f2fs_destroy_compress_ctx(&cc, false);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto set_error_page;
|
goto set_error_page;
|
||||||
|
@ -2467,7 +2476,7 @@ next_page:
|
||||||
ret = f2fs_read_multi_pages(&cc, &bio,
|
ret = f2fs_read_multi_pages(&cc, &bio,
|
||||||
max_nr_pages,
|
max_nr_pages,
|
||||||
&last_block_in_bio,
|
&last_block_in_bio,
|
||||||
rac != NULL, false);
|
rac, false);
|
||||||
f2fs_destroy_compress_ctx(&cc, false);
|
f2fs_destroy_compress_ctx(&cc, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4304,7 +4304,7 @@ void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
|
||||||
unsigned int llen, unsigned int c_len);
|
unsigned int llen, unsigned int c_len);
|
||||||
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
||||||
unsigned nr_pages, sector_t *last_block_in_bio,
|
unsigned nr_pages, sector_t *last_block_in_bio,
|
||||||
bool is_readahead, bool for_write);
|
struct readahead_control *rac, bool for_write);
|
||||||
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
|
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
|
||||||
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
|
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
|
||||||
bool in_task);
|
bool in_task);
|
||||||
|
|
|
@ -830,6 +830,8 @@ static bool f2fs_force_buffered_io(struct inode *inode, int rw)
|
||||||
return true;
|
return true;
|
||||||
if (f2fs_compressed_file(inode))
|
if (f2fs_compressed_file(inode))
|
||||||
return true;
|
return true;
|
||||||
|
if (f2fs_has_inline_data(inode))
|
||||||
|
return true;
|
||||||
|
|
||||||
/* disallow direct IO if any of devices has unaligned blksize */
|
/* disallow direct IO if any of devices has unaligned blksize */
|
||||||
if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
|
if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include <linux/part_stat.h>
|
#include <linux/part_stat.h>
|
||||||
#include <linux/zstd.h>
|
#include <linux/zstd.h>
|
||||||
#include <linux/lz4.h>
|
#include <linux/lz4.h>
|
||||||
|
#include <linux/cleancache.h>
|
||||||
|
|
||||||
#include "f2fs.h"
|
#include "f2fs.h"
|
||||||
#include "node.h"
|
#include "node.h"
|
||||||
|
@ -4760,6 +4761,8 @@ reset_checkpoint:
|
||||||
f2fs_update_time(sbi, CP_TIME);
|
f2fs_update_time(sbi, CP_TIME);
|
||||||
f2fs_update_time(sbi, REQ_TIME);
|
f2fs_update_time(sbi, REQ_TIME);
|
||||||
clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
|
clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
|
||||||
|
|
||||||
|
cleancache_init_fs(sb);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
sync_free_meta:
|
sync_free_meta:
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user