Merge branch 'android15-6.6' into branch 'android15-6.6-lts'

Catch the -lts branch up with the recent symbol additions and other
changes added in the non-lts branch.  Included in here is the resolution
for the "tricky" bluetooth changes being merged together. Commits
include in here are:

dc65436adf ANDROID: abi fixups for HCI_AMP removal changes
01acf6e91d UPSTREAM: Bluetooth: HCI: Remove HCI_AMP support
4ac04ce3f6 ANDROID: Update the ABI symbol list
f9770feb3c ANDROID: softirq: Add EXPORT_SYMBOL_GPL for softirq and tasklet
ca917b1dd1 ANDROID: fix the --entry linker flag for kselftest
b8be0f74ab ANDROID: abi_gki_aarch64_qcom: update abi symbols
bd6cceb88b Revert^2 "ANDROID: Enable Rust Binder Module"
9af84a38cf ANDROID: GKI: Update qcom symbol list
bd4cad301a ANDROID: ABI: update symbol list for honor
73e6ecb484 ANDROID: vendor_hooks: add vendor hook for bpf jit
1cfb164127 ANDROID: GKI: update symbol list file for xiaomi
c0dd2a5c2b ANDROID: abi_gki_aarch64_vivo: Update symbol list
f437bda7c4 ANDROID: vendor_hooks: add hook in alloc_and_link_pwqs()
55b691a3a1 ANDROID: GKI: export symbols
92635be7b7 BACKPORT: FROMLIST: usb: host: xhci-plat: Add support for XHCI_WRITE_64_HI_LO
6afea34dbb BACKPORT: FROMLIST: xhci: Add a quirk for writing ERST in high-low order
a12901ecd0 FROMLIST: usb: dwc3: Support quirk for writing high-low order
350c2157ba FROMGIT: KVM: arm64: Use FF-A 1.1 with pKVM
1ae3c27af3 UPSTREAM: firmware: arm_ffa: Update memory descriptor to support v1.1 format
88e3c003e7 UPSTREAM: firmware: arm_ffa: Switch to using ffa_mem_desc_offset() accessor
79b4cf9c57 UPSTREAM: KVM: arm64: FFA: Remove access of endpoint memory access descriptor array
5649c512e3 UPSTREAM: firmware: arm_ffa: Simplify the computation of transmit and fragment length
8e2a94f58a UPSTREAM: firmware: arm_ffa: Add notification handling mechanism
c78f532206 UPSTREAM: firmware: arm_ffa: Add interface to send a notification to a given partition
3000dcf09e UPSTREAM: firmware: arm_ffa: Add interfaces to request notification callbacks
42a0b4c7d9 UPSTREAM: firmware: arm_ffa: Add schedule receiver callback mechanism
31523f6f5f UPSTREAM: firmware: arm_ffa: Initial support for scheduler receiver interrupt
d0db159b74 UPSTREAM: firmware: arm_ffa: Implement the NOTIFICATION_INFO_GET interface
f97ac1ec38 UPSTREAM: firmware: arm_ffa: Implement the FFA_NOTIFICATION_GET interface
c59a1c3b15 UPSTREAM: firmware: arm_ffa: Implement the FFA_NOTIFICATION_SET interface
88ddcfb599 UPSTREAM: firmware: arm_ffa: Implement the FFA_RUN interface
94ec19ed75 UPSTREAM: firmware: arm_ffa: Implement the notification bind and unbind interface
b11c3a7143 UPSTREAM: firmware: arm_ffa: Implement notification bitmap create and destroy interfaces
6cd749aa84 UPSTREAM: firmware: arm_ffa: Update the FF-A command list with v1.1 additions
3de401e96d UPSTREAM: KVM: arm64: Remove FFA_MSG_SEND_DIRECT_REQ from the denylist
94a0645c7e FROMGIT: KVM: arm64: Update the identification range for the FF-A smcs
13ab11c83d BACKPORT: FROMGIT: KVM: arm64: Add support for FFA_PARTITION_INFO_GET
5930b56908 BACKPORT: FROMGIT: KVM: arm64: Trap FFA_VERSION host call in pKVM
3342ce711d ANDROID: fix kernelci build break
3e92cc50b7 ANDROID: Update the ABI symbol list
f84811b9a6 ANDROID: Export functions to be used with dma_map_ops in modules
dc90f62570 ANDROID: GKI: Add initial symbol list for Tuxera
7be06ac9e1 ANDROID: Update the ABI symbol list
30800b0a33 ANDROID: thermal: Add vendor hook for thermal_genl_check
aecd80888c ANDROID: GKI: Add initial lenovo symbol list
06007d8930 ANDROID: KVM: arm64: Drop level from struct hyp_fixmap_slot
7d5179c2a1 ANDROID: GKI: Update Honor abi symbol list
8284931c09 ANDROID: ABI: update Unisoc symbol list for printk_cpuid
08c63cde8c ANDROID: vendor_hooks: add hook to show more vendor cpu info.
5eb138730a ANDROID: Update the ABI symbol list
358f4bcd33 ANDROID: KVM: arm64: Guest page CMOs with PMD_SIZE fixmap at EL2
b6a4560a75 Revert "ANDROID: Enable Rust Binder Module"
3fced63a7b ANDROID: GKI: Add initial sunxi symbol list
47f5b5731e ANDROID: Update the ABI symbol list
6ff1732938 ANDROID: Export sysctl_sched_base_slice
27ef0d81a1 ANDROID: sched: export update_misfit_status symbol
ed558fd9d8 ANDROID: sched: Export symbols needed for vendor hooks
38fdc88203 ANDROID: power: Add vendor hook for suspend
9064ca9a18 ANDROID: topology: Add vendor hook for use_amu_fie
22656cc290 ANDROID: sched: Add trace_android_rvh_set_user_nice_locked
5cb12b53e1 ANDROID: sched: Add vendor hook for update_load_sum
cfb8030501 ANDROID: sched: Add vendor hook for util_fits_cpu
9f6cb7cdd8 ANDROID: fs/proc: Perform priority inheritance around access_remote_vm()
9ad961ed05 ANDROID: sched: Add vendor hook for rt util update
1a7550f7c9 ANDROID: sched: Add vendor hooks for override sugov behavior
b7f2df29a2 ANDROID: Add new hook to enable overriding uclamp_validate()
712f192370 ANDROID: GKI: Update symbol list for vivo
dbe0e69e89 ANDROID: vendor_hooks: add vendor hooks for bd_link_disk_holder
173fe3da85 UPSTREAM: iomap: fault in smaller chunks for non-large folio mappings
dd9c02ccfe ANDROID: 16K: Avoid mmap lock assertions for padding VMAs
2c5e07cea3 ANDROID: ABI: update symbol list for honor
9842b4145b ANDROID: GKI: net: add vendor hook for network quality estimation
df9e426fab ANDROID: abi_gki_aarch64_qcom: update abi symbol list
93fd111566 ANDROID: GKI: Update symbol list for Amlogic
d072b31467 UPSTREAM: filemap: add helper mapping_max_folio_size()
15fc1b760f ANDROID: android: Add symbols to debug_symbols driver
42eb612b25 ANDROID: Enable Rust Binder Module
cb0bef6d8e ANDROID: rust_binder: Switch Rust Binder to GKI Module
b511e79bc5 ANDROID: Only warn on long symbols
4a571c32cb ANDROID: Re-enable Rust symbol export
9b771fcf57 ANDROID: ABI: Update symbol list for Exynos SoC.
7ee86e5b15 ANDROID: vendor_hooks: Add hooks for util-update related functions
e484bff0c4 ANDROID: ABI: update symbol list for honor
047f3eded8 ANDROID: mm: create vendor hooks for memory reclaim
3ebe062ee3 ANDROID: ABI: update symbol list for honor
d61134668c ANDROID: Allow vendor modules perform operationson on memleak detect

Change-Id: Ib183a4e2c7bb39d68831eb9f5f7325a9823ed784
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-06-21 09:27:35 +00:00
commit 2dbf1ccc27
91 changed files with 7784 additions and 800 deletions

View File

@ -102,12 +102,15 @@ filegroup(
"android/abi_gki_aarch64_galaxy", "android/abi_gki_aarch64_galaxy",
"android/abi_gki_aarch64_honor", "android/abi_gki_aarch64_honor",
"android/abi_gki_aarch64_imx", "android/abi_gki_aarch64_imx",
"android/abi_gki_aarch64_lenovo",
"android/abi_gki_aarch64_mtk", "android/abi_gki_aarch64_mtk",
"android/abi_gki_aarch64_nothing", "android/abi_gki_aarch64_nothing",
"android/abi_gki_aarch64_oplus", "android/abi_gki_aarch64_oplus",
"android/abi_gki_aarch64_pixel", "android/abi_gki_aarch64_pixel",
"android/abi_gki_aarch64_qcom", "android/abi_gki_aarch64_qcom",
"android/abi_gki_aarch64_sunxi",
"android/abi_gki_aarch64_tcl", "android/abi_gki_aarch64_tcl",
"android/abi_gki_aarch64_tuxera",
"android/abi_gki_aarch64_type_visibility", "android/abi_gki_aarch64_type_visibility",
"android/abi_gki_aarch64_unisoc", "android/abi_gki_aarch64_unisoc",
"android/abi_gki_aarch64_virtual_device", "android/abi_gki_aarch64_virtual_device",
@ -1942,8 +1945,7 @@ cc_binary_with_abi(
includes = ["tools/testing/selftests"], includes = ["tools/testing/selftests"],
linkopts = [ linkopts = [
"-static", "-static",
"-Wl", "-Wl,-ereal_start",
"-ereal_start",
], ],
path_prefix = _KSELFTEST_DIR, path_prefix = _KSELFTEST_DIR,
target_compatible_with = ["@platforms//os:android"], target_compatible_with = ["@platforms//os:android"],

File diff suppressed because it is too large Load Diff

View File

@ -2,6 +2,7 @@
add_cpu add_cpu
add_device_randomness add_device_randomness
add_timer add_timer
add_to_page_cache_lru
add_uevent_var add_uevent_var
add_wait_queue add_wait_queue
adjust_managed_page_count adjust_managed_page_count
@ -64,9 +65,12 @@
__bitmap_weight __bitmap_weight
__bitmap_xor __bitmap_xor
bitmap_zalloc bitmap_zalloc
bit_wait
__blk_alloc_disk __blk_alloc_disk
blkdev_get_by_path
blkdev_issue_discard blkdev_issue_discard
blkdev_issue_zeroout blkdev_issue_zeroout
blkdev_put
blk_finish_plug blk_finish_plug
blk_queue_flag_clear blk_queue_flag_clear
blk_queue_flag_set blk_queue_flag_set
@ -163,6 +167,7 @@
clk_hw_is_enabled clk_hw_is_enabled
clk_hw_round_rate clk_hw_round_rate
clk_hw_set_parent clk_hw_set_parent
clk_hw_set_rate_range
__clk_is_enabled __clk_is_enabled
clk_mux_determine_rate_flags clk_mux_determine_rate_flags
clk_mux_index_to_val clk_mux_index_to_val
@ -228,6 +233,7 @@
cpu_subsys cpu_subsys
cpu_topology cpu_topology
crc32_be crc32_be
crc32c
crc32_le crc32_le
create_empty_buffers create_empty_buffers
crypto_aead_decrypt crypto_aead_decrypt
@ -252,6 +258,7 @@
crypto_init_queue crypto_init_queue
__crypto_memneq __crypto_memneq
crypto_register_ahash crypto_register_ahash
crypto_register_alg
crypto_register_shash crypto_register_shash
crypto_register_skcipher crypto_register_skcipher
crypto_sha1_finup crypto_sha1_finup
@ -266,6 +273,7 @@
crypto_skcipher_setkey crypto_skcipher_setkey
crypto_unregister_ahash crypto_unregister_ahash
crypto_unregister_ahashes crypto_unregister_ahashes
crypto_unregister_alg
crypto_unregister_shash crypto_unregister_shash
crypto_unregister_skcipher crypto_unregister_skcipher
__crypto_xor __crypto_xor
@ -395,6 +403,7 @@
devm_platform_ioremap_resource_byname devm_platform_ioremap_resource_byname
devm_pwm_get devm_pwm_get
devm_regmap_field_alloc devm_regmap_field_alloc
__devm_regmap_init
__devm_regmap_init_i2c __devm_regmap_init_i2c
__devm_regmap_init_mmio_clk __devm_regmap_init_mmio_clk
devm_regulator_bulk_get devm_regulator_bulk_get
@ -776,6 +785,7 @@
filemap_fdatawrite filemap_fdatawrite
filemap_fdatawrite_range filemap_fdatawrite_range
filemap_flush filemap_flush
filemap_read
__filemap_set_wb_err __filemap_set_wb_err
filemap_splice_read filemap_splice_read
filemap_write_and_wait_range filemap_write_and_wait_range
@ -824,6 +834,9 @@
freezer_active freezer_active
freezing_slow_path freezing_slow_path
fs_bio_set fs_bio_set
fs_ftype_to_dtype
fs_kobj
fs_param_is_enum
fs_param_is_string fs_param_is_string
fs_param_is_u32 fs_param_is_u32
__fs_parse __fs_parse
@ -852,11 +865,13 @@
generic_file_mmap generic_file_mmap
generic_file_open generic_file_open
generic_file_read_iter generic_file_read_iter
generic_file_readonly_mmap
__generic_file_write_iter __generic_file_write_iter
generic_fillattr generic_fillattr
generic_handle_irq generic_handle_irq
generic_permission generic_permission
generic_read_dir generic_read_dir
generic_ro_fops
generic_shutdown_super generic_shutdown_super
generic_write_checks generic_write_checks
generic_write_end generic_write_end
@ -896,6 +911,7 @@
get_device get_device
get_device_system_crosststamp get_device_system_crosststamp
__get_free_pages __get_free_pages
get_fs_type
get_net_ns_by_fd get_net_ns_by_fd
get_net_ns_by_pid get_net_ns_by_pid
get_pfnblock_flags_mask get_pfnblock_flags_mask
@ -1026,6 +1042,7 @@
__init_swait_queue_head __init_swait_queue_head
init_task init_task
init_timer_key init_timer_key
init_user_ns
init_uts_ns init_uts_ns
init_wait_entry init_wait_entry
__init_waitqueue_head __init_waitqueue_head
@ -1047,6 +1064,11 @@
insert_inode_locked insert_inode_locked
invalidate_bdev invalidate_bdev
invalidate_inode_buffers invalidate_inode_buffers
iomap_bmap
iomap_dio_rw
iomap_fiemap
iomap_readahead
iomap_read_folio
iomem_resource iomem_resource
iommu_device_register iommu_device_register
iommu_device_sysfs_add iommu_device_sysfs_add
@ -1055,13 +1077,18 @@
iommu_fwspec_init iommu_fwspec_init
iommu_get_domain_for_dev iommu_get_domain_for_dev
ioremap_prot ioremap_prot
io_schedule
iounmap iounmap
iov_iter_alignment
iov_iter_init iov_iter_init
iov_iter_revert iov_iter_revert
iov_iter_zero iov_iter_zero
iput iput
__ipv6_addr_type __ipv6_addr_type
__irq_apply_affinity_hint __irq_apply_affinity_hint
irq_chip_ack_parent
irq_chip_disable_parent
irq_chip_enable_parent
irq_chip_eoi_parent irq_chip_eoi_parent
irq_chip_mask_parent irq_chip_mask_parent
irq_chip_retrigger_hierarchy irq_chip_retrigger_hierarchy
@ -1073,11 +1100,13 @@
__irq_domain_add __irq_domain_add
irq_domain_alloc_irqs_parent irq_domain_alloc_irqs_parent
irq_domain_create_hierarchy irq_domain_create_hierarchy
irq_domain_free_irqs_common
irq_domain_free_irqs_parent irq_domain_free_irqs_parent
irq_domain_get_irq_data irq_domain_get_irq_data
irq_domain_remove irq_domain_remove
irq_domain_set_hwirq_and_chip irq_domain_set_hwirq_and_chip
irq_domain_set_info irq_domain_set_info
irq_domain_xlate_onecell
irq_find_matching_fwspec irq_find_matching_fwspec
irq_get_irq_data irq_get_irq_data
irq_of_parse_and_map irq_of_parse_and_map
@ -1151,11 +1180,15 @@
kmem_cache_free kmem_cache_free
kmemdup kmemdup
kobject_create_and_add kobject_create_and_add
kobject_del
kobject_init_and_add kobject_init_and_add
kobject_put kobject_put
kobject_set_name
kobject_uevent kobject_uevent
kobject_uevent_env kobject_uevent_env
krealloc krealloc
kset_register
kset_unregister
kstrdup kstrdup
kstrdup_const kstrdup_const
kstrtobool kstrtobool
@ -1173,6 +1206,8 @@
kthread_bind kthread_bind
kthread_complete_and_exit kthread_complete_and_exit
kthread_create_on_node kthread_create_on_node
kthread_create_worker_on_cpu
kthread_destroy_worker
kthread_flush_worker kthread_flush_worker
__kthread_init_worker __kthread_init_worker
kthread_queue_work kthread_queue_work
@ -1195,6 +1230,7 @@
led_classdev_register_ext led_classdev_register_ext
led_classdev_unregister led_classdev_unregister
led_init_default_state_get led_init_default_state_get
led_set_brightness
led_trigger_blink_oneshot led_trigger_blink_oneshot
led_trigger_event led_trigger_event
led_trigger_register led_trigger_register
@ -1216,6 +1252,8 @@
log_write_mmio log_write_mmio
lookup_bdev lookup_bdev
loops_per_jiffy loops_per_jiffy
LZ4_decompress_safe
LZ4_decompress_safe_partial
mac_pton mac_pton
make_bad_inode make_bad_inode
mangle_path mangle_path
@ -1372,7 +1410,10 @@
__nlmsg_put __nlmsg_put
nonseekable_open nonseekable_open
noop_backing_dev_info noop_backing_dev_info
noop_direct_IO
noop_llseek noop_llseek
nop_posix_acl_access
nop_posix_acl_default
nr_cpu_ids nr_cpu_ids
ns_capable ns_capable
ns_to_timespec64 ns_to_timespec64
@ -1454,9 +1495,11 @@
of_usb_get_phy_mode of_usb_get_phy_mode
of_usb_host_tpl_support of_usb_host_tpl_support
oops_in_progress oops_in_progress
out_of_line_wait_on_bit_lock
overflowgid overflowgid
overflowuid overflowuid
pagecache_get_page pagecache_get_page
page_get_link
page_mapping page_mapping
page_pinner_inited page_pinner_inited
__page_pinner_put_page __page_pinner_put_page
@ -1525,11 +1568,14 @@
perf_trace_run_bpf_submit perf_trace_run_bpf_submit
pfn_is_map_memory pfn_is_map_memory
phy_attached_info phy_attached_info
phy_basic_features
phy_basic_t1_features phy_basic_t1_features
phy_drivers_register phy_drivers_register
phy_drivers_unregister phy_drivers_unregister
phy_error phy_error
phy_exit phy_exit
phy_gbit_features
phy_get_internal_delay
phy_init phy_init
phy_init_eee phy_init_eee
phylink_connect_phy phylink_connect_phy
@ -1577,6 +1623,7 @@
phy_select_page phy_select_page
phy_set_mode_ext phy_set_mode_ext
phy_trigger_machine phy_trigger_machine
phy_write_mmd
phy_write_paged phy_write_paged
pid_task pid_task
pinconf_generic_dt_free_map pinconf_generic_dt_free_map
@ -1634,6 +1681,7 @@
pm_wakeup_ws_event pm_wakeup_ws_event
pm_wq pm_wq
posix_acl_chmod posix_acl_chmod
posix_acl_from_xattr
power_supply_get_by_name power_supply_get_by_name
power_supply_put power_supply_put
power_supply_set_property power_supply_set_property
@ -1714,7 +1762,10 @@
__rcu_read_lock __rcu_read_lock
__rcu_read_unlock __rcu_read_unlock
rdev_get_drvdata rdev_get_drvdata
readahead_expand
readahead_gfp_mask
read_cache_page read_cache_page
read_cache_page_gfp
read_sanitised_ftr_reg read_sanitised_ftr_reg
rebuild_sched_domains rebuild_sched_domains
refcount_warn_saturate refcount_warn_saturate
@ -1744,6 +1795,7 @@
register_shrinker register_shrinker
register_syscore_ops register_syscore_ops
register_wide_hw_breakpoint register_wide_hw_breakpoint
regmap_bulk_write
regmap_field_read regmap_field_read
regmap_field_update_bits_base regmap_field_update_bits_base
regmap_irq_chip_get_base regmap_irq_chip_get_base
@ -1830,6 +1882,8 @@
schedule_timeout_interruptible schedule_timeout_interruptible
schedule_timeout_killable schedule_timeout_killable
schedule_timeout_uninterruptible schedule_timeout_uninterruptible
scmi_driver_register
scmi_driver_unregister
scnprintf scnprintf
sdio_align_size sdio_align_size
sdio_claim_host sdio_claim_host
@ -1908,6 +1962,7 @@
simple_attr_read simple_attr_read
simple_attr_release simple_attr_release
simple_attr_write simple_attr_write
simple_get_link
simple_open simple_open
simple_read_from_buffer simple_read_from_buffer
simple_rename_timestamp simple_rename_timestamp
@ -2445,8 +2500,10 @@
vmf_insert_pfn_prot vmf_insert_pfn_prot
vm_get_page_prot vm_get_page_prot
vm_insert_page vm_insert_page
vm_map_ram
vm_node_stat vm_node_stat
vm_unmap_aliases vm_unmap_aliases
vm_unmap_ram
vm_zone_stat vm_zone_stat
vprintk vprintk
vscnprintf vscnprintf
@ -2462,6 +2519,8 @@
__wait_on_buffer __wait_on_buffer
wait_woken wait_woken
__wake_up __wake_up
wake_up_bit
__wake_up_locked
wake_up_process wake_up_process
wakeup_source_register wakeup_source_register
wakeup_source_unregister wakeup_source_unregister
@ -2475,6 +2534,11 @@
write_inode_now write_inode_now
__write_overflow_field __write_overflow_field
x509_load_certificate_list x509_load_certificate_list
__xa_cmpxchg
__xa_erase
xa_find
xa_find_after
xa_load
xdp_convert_zc_to_xdp_frame xdp_convert_zc_to_xdp_frame
xdp_do_flush xdp_do_flush
xdp_do_redirect xdp_do_redirect

View File

@ -1228,10 +1228,12 @@
sysctl_sched_features sysctl_sched_features
system_32bit_el0_cpumask system_32bit_el0_cpumask
tick_nohz_get_sleep_length tick_nohz_get_sleep_length
__traceiter_android_rvh_attach_entity_load_avg
__traceiter_android_rvh_can_migrate_task __traceiter_android_rvh_can_migrate_task
__traceiter_android_rvh_check_preempt_wakeup __traceiter_android_rvh_check_preempt_wakeup
__traceiter_android_rvh_cpu_cgroup_attach __traceiter_android_rvh_cpu_cgroup_attach
__traceiter_android_rvh_dequeue_task __traceiter_android_rvh_dequeue_task
__traceiter_android_rvh_detach_entity_load_avg
__traceiter_android_rvh_do_sched_yield __traceiter_android_rvh_do_sched_yield
__traceiter_android_rvh_enqueue_task __traceiter_android_rvh_enqueue_task
__traceiter_android_rvh_find_busiest_queue __traceiter_android_rvh_find_busiest_queue
@ -1239,6 +1241,7 @@
__traceiter_android_rvh_find_new_ilb __traceiter_android_rvh_find_new_ilb
__traceiter_android_rvh_flush_task __traceiter_android_rvh_flush_task
__traceiter_android_rvh_new_task_stats __traceiter_android_rvh_new_task_stats
__traceiter_android_rvh_remove_entity_load_avg
__traceiter_android_rvh_replace_next_task_fair __traceiter_android_rvh_replace_next_task_fair
__traceiter_android_rvh_sched_newidle_balance __traceiter_android_rvh_sched_newidle_balance
__traceiter_android_rvh_sched_nohz_balancer_kick __traceiter_android_rvh_sched_nohz_balancer_kick
@ -1249,6 +1252,8 @@
__traceiter_android_rvh_select_task_rq_rt __traceiter_android_rvh_select_task_rq_rt
__traceiter_android_rvh_try_to_wake_up __traceiter_android_rvh_try_to_wake_up
__traceiter_android_rvh_uclamp_eff_get __traceiter_android_rvh_uclamp_eff_get
__traceiter_android_rvh_update_blocked_fair
__traceiter_android_rvh_update_load_avg
__traceiter_android_rvh_update_misfit_status __traceiter_android_rvh_update_misfit_status
__traceiter_android_rvh_wake_up_new_task __traceiter_android_rvh_wake_up_new_task
__traceiter_android_vh_binder_restore_priority __traceiter_android_vh_binder_restore_priority
@ -1257,10 +1262,12 @@
__traceiter_android_vh_scheduler_tick __traceiter_android_vh_scheduler_tick
__traceiter_android_vh_syscall_prctl_finished __traceiter_android_vh_syscall_prctl_finished
__traceiter_binder_transaction_received __traceiter_binder_transaction_received
__tracepoint_android_rvh_attach_entity_load_avg
__tracepoint_android_rvh_can_migrate_task __tracepoint_android_rvh_can_migrate_task
__tracepoint_android_rvh_check_preempt_wakeup __tracepoint_android_rvh_check_preempt_wakeup
__tracepoint_android_rvh_cpu_cgroup_attach __tracepoint_android_rvh_cpu_cgroup_attach
__tracepoint_android_rvh_dequeue_task __tracepoint_android_rvh_dequeue_task
__tracepoint_android_rvh_detach_entity_load_avg
__tracepoint_android_rvh_do_sched_yield __tracepoint_android_rvh_do_sched_yield
__tracepoint_android_rvh_enqueue_task __tracepoint_android_rvh_enqueue_task
__tracepoint_android_rvh_find_busiest_queue __tracepoint_android_rvh_find_busiest_queue
@ -1268,6 +1275,7 @@
__tracepoint_android_rvh_find_new_ilb __tracepoint_android_rvh_find_new_ilb
__tracepoint_android_rvh_flush_task __tracepoint_android_rvh_flush_task
__tracepoint_android_rvh_new_task_stats __tracepoint_android_rvh_new_task_stats
__tracepoint_android_rvh_remove_entity_load_avg
__tracepoint_android_rvh_replace_next_task_fair __tracepoint_android_rvh_replace_next_task_fair
__tracepoint_android_rvh_sched_newidle_balance __tracepoint_android_rvh_sched_newidle_balance
__tracepoint_android_rvh_sched_nohz_balancer_kick __tracepoint_android_rvh_sched_nohz_balancer_kick
@ -1278,6 +1286,8 @@
__tracepoint_android_rvh_select_task_rq_rt __tracepoint_android_rvh_select_task_rq_rt
__tracepoint_android_rvh_try_to_wake_up __tracepoint_android_rvh_try_to_wake_up
__tracepoint_android_rvh_uclamp_eff_get __tracepoint_android_rvh_uclamp_eff_get
__tracepoint_android_rvh_update_blocked_fair
__tracepoint_android_rvh_update_load_avg
__tracepoint_android_rvh_update_misfit_status __tracepoint_android_rvh_update_misfit_status
__tracepoint_android_rvh_wake_up_new_task __tracepoint_android_rvh_wake_up_new_task
__tracepoint_android_vh_binder_restore_priority __tracepoint_android_vh_binder_restore_priority

View File

@ -36,6 +36,7 @@
nla_append nla_append
skb_append skb_append
sysctl_max_skb_frags sysctl_max_skb_frags
__show_mem
find_vm_area find_vm_area
profile_event_unregister profile_event_unregister
binder_alloc_copy_from_buffer binder_alloc_copy_from_buffer
@ -46,12 +47,20 @@
drop_super drop_super
mm_trace_rss_stat mm_trace_rss_stat
__kfifo_len_r __kfifo_len_r
__tracepoint_android_rvh_cpuinfo_c_show
__traceiter_android_rvh_cpuinfo_c_show
__tracepoint_android_vh_dc_send_copy __tracepoint_android_vh_dc_send_copy
__traceiter_android_vh_dc_send_copy __traceiter_android_vh_dc_send_copy
__tracepoint_android_vh_dc_receive __tracepoint_android_vh_dc_receive
__traceiter_android_vh_dc_receive __traceiter_android_vh_dc_receive
__traceiter_android_vh_modify_scan_control
__traceiter_android_vh_should_continue_reclaim
__traceiter_android_vh_file_is_tiny_bypass
__tracepoint_android_vh_modify_scan_control
__tracepoint_android_vh_should_continue_reclaim
__tracepoint_android_vh_file_is_tiny_bypass
__tracepoint_android_vh_shrink_slab_bypass __tracepoint_android_vh_shrink_slab_bypass
__traceiter_android_vh_shrink_slab_bypass __traceiter_android_vh_shrink_slab_bypass
__tracepoint_android_vh_do_shrink_slab __tracepoint_android_vh_do_shrink_slab
__traceiter_android_vh_do_shrink_slab __traceiter_android_vh_do_shrink_slab
__tracepoint_android_vh_shrink_slab_bypass __tracepoint_android_vh_shrink_slab_bypass
@ -60,6 +69,8 @@
__traceiter_android_vh_slab_alloc_node __traceiter_android_vh_slab_alloc_node
__tracepoint_android_vh_slab_free __tracepoint_android_vh_slab_free
__traceiter_android_vh_slab_free __traceiter_android_vh_slab_free
__traceiter_android_vh_tcp_connect
__tracepoint_android_vh_tcp_connect
__traceiter_android_vh_tcp_write_timeout_estab_retrans __traceiter_android_vh_tcp_write_timeout_estab_retrans
__tracepoint_android_vh_tcp_write_timeout_estab_retrans __tracepoint_android_vh_tcp_write_timeout_estab_retrans
__tracepoint_android_vh_si_mem_available_adjust __tracepoint_android_vh_si_mem_available_adjust
@ -68,3 +79,5 @@
__traceiter_android_vh_si_meminfo_adjust __traceiter_android_vh_si_meminfo_adjust
__traceiter_android_rvh_hw_protection_shutdown __traceiter_android_rvh_hw_protection_shutdown
__tracepoint_android_rvh_hw_protection_shutdown __tracepoint_android_rvh_hw_protection_shutdown
__traceiter_android_rvh_bpf_int_jit_compile_ro
__tracepoint_android_rvh_bpf_int_jit_compile_ro

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,5 @@
[abi_symbol_list] [abi_symbol_list]
activate_task
add_cpu add_cpu
add_timer add_timer
add_timer_on add_timer_on
@ -35,6 +36,7 @@
__arch_clear_user __arch_clear_user
__arch_copy_from_user __arch_copy_from_user
__arch_copy_to_user __arch_copy_to_user
arch_freq_scale
arch_timer_read_counter arch_timer_read_counter
argv_free argv_free
argv_split argv_split
@ -46,8 +48,10 @@
atomic_notifier_chain_register atomic_notifier_chain_register
atomic_notifier_chain_unregister atomic_notifier_chain_unregister
autoremove_wake_function autoremove_wake_function
available_idle_cpu
backing_file_real_path backing_file_real_path
backlight_device_set_brightness backlight_device_set_brightness
balance_push_callback
bcmp bcmp
bin2hex bin2hex
__bitmap_andnot __bitmap_andnot
@ -77,6 +81,7 @@
bpf_trace_run1 bpf_trace_run1
bpf_trace_run10 bpf_trace_run10
bpf_trace_run11 bpf_trace_run11
bpf_trace_run12
bpf_trace_run2 bpf_trace_run2
bpf_trace_run3 bpf_trace_run3
bpf_trace_run4 bpf_trace_run4
@ -107,6 +112,7 @@
cdev_device_del cdev_device_del
cdev_init cdev_init
__check_object_size __check_object_size
check_preempt_curr
class_create class_create
class_destroy class_destroy
class_interface_unregister class_interface_unregister
@ -168,23 +174,36 @@
_copy_from_iter _copy_from_iter
__copy_overflow __copy_overflow
_copy_to_iter _copy_to_iter
__cpu_active_mask
cpu_all_bits cpu_all_bits
cpu_bit_bitmap cpu_bit_bitmap
cpu_busy_with_softirqs
cpufreq_add_update_util_hook
cpufreq_cpu_get cpufreq_cpu_get
cpufreq_cpu_get_raw cpufreq_cpu_get_raw
cpufreq_cpu_put cpufreq_cpu_put
cpufreq_disable_fast_switch
cpufreq_driver_fast_switch
cpufreq_driver_resolve_freq
__cpufreq_driver_target
cpufreq_driver_target cpufreq_driver_target
cpufreq_enable_fast_switch
cpufreq_freq_transition_begin cpufreq_freq_transition_begin
cpufreq_freq_transition_end cpufreq_freq_transition_end
cpufreq_frequency_table_verify cpufreq_frequency_table_verify
cpufreq_generic_attr cpufreq_generic_attr
cpufreq_get cpufreq_get
cpufreq_get_policy cpufreq_get_policy
cpufreq_policy_transition_delay_us
cpufreq_quick_get cpufreq_quick_get
cpufreq_quick_get_max cpufreq_quick_get_max
cpufreq_register_driver cpufreq_register_driver
cpufreq_register_governor
cpufreq_register_notifier cpufreq_register_notifier
cpufreq_remove_update_util_hook
cpufreq_table_index_unsorted cpufreq_table_index_unsorted
cpufreq_this_cpu_can_update
cpufreq_update_util_data
cpu_hotplug_disable cpu_hotplug_disable
cpu_hotplug_enable cpu_hotplug_enable
__cpuhp_remove_state __cpuhp_remove_state
@ -204,6 +223,7 @@
cpu_pm_unregister_notifier cpu_pm_unregister_notifier
__cpu_possible_mask __cpu_possible_mask
__cpu_present_mask __cpu_present_mask
cpupri_find_fitness
cpu_scale cpu_scale
cpus_read_lock cpus_read_lock
cpus_read_unlock cpus_read_unlock
@ -238,6 +258,7 @@
csum_tcpudp_nofold csum_tcpudp_nofold
_ctype _ctype
datagram_poll datagram_poll
deactivate_task
debugfs_attr_read debugfs_attr_read
debugfs_attr_write debugfs_attr_write
debugfs_create_atomic_t debugfs_create_atomic_t
@ -330,6 +351,8 @@
devm_clk_get devm_clk_get
devm_clk_get_optional devm_clk_get_optional
devm_clk_put devm_clk_put
devm_devfreq_add_device
devm_devfreq_remove_device
devm_device_add_group devm_device_add_group
devm_device_add_groups devm_device_add_groups
devm_drm_bridge_add devm_drm_bridge_add
@ -424,6 +447,7 @@
dev_pm_opp_of_register_em dev_pm_opp_of_register_em
dev_pm_opp_of_remove_table dev_pm_opp_of_remove_table
dev_pm_opp_put dev_pm_opp_put
dev_pm_opp_remove_all_dynamic
dev_pm_opp_set_config dev_pm_opp_set_config
dev_pm_qos_add_notifier dev_pm_qos_add_notifier
dev_pm_qos_add_request dev_pm_qos_add_request
@ -471,6 +495,8 @@
dma_buf_unmap_attachment_unlocked dma_buf_unmap_attachment_unlocked
dma_buf_vmap dma_buf_vmap
dma_buf_vunmap dma_buf_vunmap
dma_direct_alloc
dma_direct_free
dmaengine_unmap_put dmaengine_unmap_put
dma_fence_add_callback dma_fence_add_callback
dma_fence_array_ops dma_fence_array_ops
@ -790,6 +816,7 @@
_find_next_bit _find_next_bit
_find_next_zero_bit _find_next_zero_bit
find_pid_ns find_pid_ns
find_task_by_vpid
find_vma_intersection find_vma_intersection
finish_wait finish_wait
firmware_request_nowarn firmware_request_nowarn
@ -850,6 +877,7 @@
get_cpu_iowait_time_us get_cpu_iowait_time_us
get_device get_device
__get_free_pages __get_free_pages
get_governor_parent_kobj
get_net_ns_by_fd get_net_ns_by_fd
get_net_ns_by_pid get_net_ns_by_pid
get_pid_task get_pid_task
@ -860,12 +888,17 @@
get_random_u8 get_random_u8
get_sg_io_hdr get_sg_io_hdr
__get_task_comm __get_task_comm
get_task_cred
get_thermal_instance get_thermal_instance
get_unused_fd_flags get_unused_fd_flags
get_user_pages get_user_pages
get_user_pages_fast get_user_pages_fast
get_vaddr_frames get_vaddr_frames
glob_match glob_match
gov_attr_set_get
gov_attr_set_init
gov_attr_set_put
governor_sysfs_ops
gpiochip_generic_config gpiochip_generic_config
gpiochip_generic_free gpiochip_generic_free
gpiochip_generic_request gpiochip_generic_request
@ -902,6 +935,7 @@
handle_simple_irq handle_simple_irq
handle_sysrq handle_sysrq
hashlen_string hashlen_string
have_governor_per_policy
hdmi_avi_infoframe_pack hdmi_avi_infoframe_pack
hex2bin hex2bin
hex_dump_to_buffer hex_dump_to_buffer
@ -954,6 +988,12 @@
ida_alloc_range ida_alloc_range
ida_destroy ida_destroy
ida_free ida_free
idle_inject_get_duration
idle_inject_register
idle_inject_set_duration
idle_inject_set_latency
idle_inject_start
idle_inject_stop
idr_alloc idr_alloc
idr_alloc_cyclic idr_alloc_cyclic
idr_alloc_u32 idr_alloc_u32
@ -1077,6 +1117,8 @@
irq_set_irq_type irq_set_irq_type
irq_set_irq_wake irq_set_irq_wake
irq_to_desc irq_to_desc
irq_work_queue
irq_work_sync
is_vmalloc_addr is_vmalloc_addr
jiffies jiffies
jiffies_to_msecs jiffies_to_msecs
@ -1095,6 +1137,7 @@
kernel_sendmsg kernel_sendmsg
kernfs_find_and_get_ns kernfs_find_and_get_ns
kernfs_notify kernfs_notify
kernfs_path_from_node
kernfs_put kernfs_put
key_put key_put
keyring_alloc keyring_alloc
@ -1133,6 +1176,7 @@
kobj_sysfs_ops kobj_sysfs_ops
krealloc krealloc
ksize ksize
ksoftirqd
kstat kstat
kstrdup kstrdup
kstrndup kstrndup
@ -1148,6 +1192,7 @@
kstrtou8_from_user kstrtou8_from_user
kstrtouint kstrtouint
kstrtouint_from_user kstrtouint_from_user
kstrtoul_from_user
kstrtoull kstrtoull
kstrtoull_from_user kstrtoull_from_user
kthread_bind kthread_bind
@ -1209,6 +1254,7 @@
loops_per_jiffy loops_per_jiffy
mac_pton mac_pton
mas_empty_area_rev mas_empty_area_rev
max_load_balance_interval
mbox_chan_received_data mbox_chan_received_data
mbox_controller_register mbox_controller_register
mbox_controller_unregister mbox_controller_unregister
@ -1324,6 +1370,7 @@
noop_llseek noop_llseek
nr_cpu_ids nr_cpu_ids
nr_irqs nr_irqs
ns_capable
ns_capable_noaudit ns_capable_noaudit
nsec_to_clock_t nsec_to_clock_t
ns_to_timespec64 ns_to_timespec64
@ -1575,11 +1622,13 @@
prepare_to_wait_event prepare_to_wait_event
print_hex_dump print_hex_dump
_printk _printk
_printk_deferred
proc_create proc_create
proc_create_data proc_create_data
proc_create_single_data proc_create_single_data
proc_dointvec proc_dointvec
proc_dostring proc_dostring
proc_douintvec_minmax
proc_mkdir proc_mkdir
proc_mkdir_data proc_mkdir_data
proc_remove proc_remove
@ -1589,6 +1638,7 @@
pskb_expand_head pskb_expand_head
__pskb_pull_tail __pskb_pull_tail
___pskb_trim ___pskb_trim
push_cpu_stop
__put_cred __put_cred
put_device put_device
put_iova_domain put_iova_domain
@ -1621,14 +1671,18 @@
_raw_spin_lock_bh _raw_spin_lock_bh
_raw_spin_lock_irq _raw_spin_lock_irq
_raw_spin_lock_irqsave _raw_spin_lock_irqsave
raw_spin_rq_lock_nested
raw_spin_rq_unlock
_raw_spin_trylock _raw_spin_trylock
_raw_spin_unlock _raw_spin_unlock
_raw_spin_unlock_bh _raw_spin_unlock_bh
_raw_spin_unlock_irq _raw_spin_unlock_irq
_raw_spin_unlock_irqrestore _raw_spin_unlock_irqrestore
_raw_write_lock
_raw_write_lock_bh _raw_write_lock_bh
_raw_write_lock_irq _raw_write_lock_irq
_raw_write_lock_irqsave _raw_write_lock_irqsave
_raw_write_unlock
_raw_write_unlock_bh _raw_write_unlock_bh
_raw_write_unlock_irq _raw_write_unlock_irq
_raw_write_unlock_irqrestore _raw_write_unlock_irqrestore
@ -1726,8 +1780,10 @@
__request_percpu_irq __request_percpu_irq
__request_region __request_region
request_threaded_irq request_threaded_irq
resched_curr
reserve_iova reserve_iova
return_address return_address
reweight_task
rfkill_alloc rfkill_alloc
rfkill_blocked rfkill_blocked
rfkill_destroy rfkill_destroy
@ -1742,6 +1798,7 @@
__rht_bucket_nested __rht_bucket_nested
rht_bucket_nested rht_bucket_nested
rht_bucket_nested_insert rht_bucket_nested_insert
root_task_group
round_jiffies round_jiffies
round_jiffies_relative round_jiffies_relative
round_jiffies_up round_jiffies_up
@ -1761,13 +1818,16 @@
rtnl_lock rtnl_lock
rtnl_trylock rtnl_trylock
rtnl_unlock rtnl_unlock
runqueues
sched_clock sched_clock
sched_feat_keys
sched_setattr_nocheck sched_setattr_nocheck
sched_set_fifo sched_set_fifo
sched_set_normal sched_set_normal
sched_setscheduler sched_setscheduler
sched_setscheduler_nocheck sched_setscheduler_nocheck
sched_show_task sched_show_task
sched_uclamp_used
schedule schedule
schedule_timeout schedule_timeout
schedule_timeout_interruptible schedule_timeout_interruptible
@ -1805,6 +1865,7 @@
set_freezable set_freezable
set_page_dirty set_page_dirty
set_page_dirty_lock set_page_dirty_lock
set_task_cpu
set_user_nice set_user_nice
sg_alloc_table sg_alloc_table
sg_alloc_table_from_pages_segment sg_alloc_table_from_pages_segment
@ -1981,9 +2042,13 @@
srcu_notifier_chain_unregister srcu_notifier_chain_unregister
sscanf sscanf
__stack_chk_fail __stack_chk_fail
static_key_count
static_key_disable
static_key_enable
static_key_slow_dec static_key_slow_dec
static_key_slow_inc static_key_slow_inc
stop_machine stop_machine
stop_one_cpu_nowait
strcasecmp strcasecmp
strchr strchr
strchrnul strchrnul
@ -2022,6 +2087,8 @@
synchronize_net synchronize_net
synchronize_rcu synchronize_rcu
syscon_regmap_lookup_by_phandle syscon_regmap_lookup_by_phandle
sysctl_sched_base_slice
sysctl_sched_features
sysfs_add_file_to_group sysfs_add_file_to_group
sysfs_add_link_to_group sysfs_add_link_to_group
sysfs_create_file_ns sysfs_create_file_ns
@ -2059,6 +2126,7 @@
tasklet_setup tasklet_setup
tasklet_unlock_wait tasklet_unlock_wait
__task_pid_nr_ns __task_pid_nr_ns
task_rq_lock
tcpci_get_tcpm_port tcpci_get_tcpm_port
tcpci_irq tcpci_irq
tcpci_register_port tcpci_register_port
@ -2080,6 +2148,7 @@
thermal_cdev_update thermal_cdev_update
thermal_cooling_device_unregister thermal_cooling_device_unregister
thermal_of_cooling_device_register thermal_of_cooling_device_register
thermal_pressure
thermal_tripless_zone_device_register thermal_tripless_zone_device_register
thermal_zone_device_disable thermal_zone_device_disable
thermal_zone_device_enable thermal_zone_device_enable
@ -2104,16 +2173,61 @@
trace_event_raw_init trace_event_raw_init
trace_event_reg trace_event_reg
trace_handle_return trace_handle_return
__traceiter_android_rvh_attach_entity_load_avg
__traceiter_android_rvh_can_migrate_task
__traceiter_android_rvh_cgroup_force_kthread_migration __traceiter_android_rvh_cgroup_force_kthread_migration
__traceiter_android_rvh_check_preempt_wakeup
__traceiter_android_rvh_cpu_overutilized
__traceiter_android_rvh_dequeue_task
__traceiter_android_rvh_dequeue_task_fair
__traceiter_android_rvh_detach_entity_load_avg
__traceiter_android_rvh_enqueue_task
__traceiter_android_rvh_enqueue_task_fair
__traceiter_android_rvh_find_lowest_rq
__traceiter_android_rvh_iommu_alloc_insert_iova __traceiter_android_rvh_iommu_alloc_insert_iova
__traceiter_android_rvh_iommu_iovad_init_alloc_algo __traceiter_android_rvh_iommu_iovad_init_alloc_algo
__traceiter_android_rvh_iommu_limit_align_shift __traceiter_android_rvh_iommu_limit_align_shift
__traceiter_android_rvh_irqs_disable
__traceiter_android_rvh_irqs_enable
__traceiter_android_rvh_post_init_entity_util_avg
__traceiter_android_rvh_preempt_disable
__traceiter_android_rvh_preempt_enable
__traceiter_android_rvh_remove_entity_load_avg
__traceiter_android_rvh_rtmutex_prepare_setprio
__traceiter_android_rvh_sched_newidle_balance
__traceiter_android_rvh_select_task_rq_fair
__traceiter_android_rvh_select_task_rq_rt
__traceiter_android_rvh_set_cpus_allowed_by_task
__traceiter_android_rvh_set_iowait
__traceiter_android_rvh_setscheduler
__traceiter_android_rvh_set_task_cpu
__traceiter_android_rvh_set_user_nice_locked
__traceiter_android_rvh_tick_entry
__traceiter_android_rvh_uclamp_eff_get
__traceiter_android_rvh_update_blocked_fair
__traceiter_android_rvh_update_load_avg
__traceiter_android_rvh_update_misfit_status
__traceiter_android_rvh_update_rt_rq_load_avg
__traceiter_android_rvh_util_est_update
__traceiter_android_rvh_util_fits_cpu
__traceiter_android_vh_arch_set_freq_scale
__traceiter_android_vh_binder_restore_priority
__traceiter_android_vh_binder_set_priority
__traceiter_android_vh_cpu_idle_enter __traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit __traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_dump_throttled_rt_tasks
__traceiter_android_vh_dup_task_struct
__traceiter_android_vh_early_resume_begin
__traceiter_android_vh_enable_thermal_genl_check
__traceiter_android_vh_ipi_stop __traceiter_android_vh_ipi_stop
__traceiter_android_vh_prio_inheritance
__traceiter_android_vh_prio_restore
__traceiter_android_vh_resume_end
__traceiter_android_vh_scheduler_tick __traceiter_android_vh_scheduler_tick
__traceiter_android_vh_setscheduler_uclamp
__traceiter_android_vh_si_meminfo_adjust __traceiter_android_vh_si_meminfo_adjust
__traceiter_android_vh_sysrq_crash __traceiter_android_vh_sysrq_crash
__traceiter_android_vh_uclamp_validate
__traceiter_android_vh_ufs_check_int_errors __traceiter_android_vh_ufs_check_int_errors
__traceiter_android_vh_ufs_compl_command __traceiter_android_vh_ufs_compl_command
__traceiter_android_vh_ufs_fill_prdt __traceiter_android_vh_ufs_fill_prdt
@ -2123,30 +2237,91 @@
__traceiter_android_vh_ufs_send_uic_command __traceiter_android_vh_ufs_send_uic_command
__traceiter_android_vh_ufs_update_sdev __traceiter_android_vh_ufs_update_sdev
__traceiter_android_vh_ufs_update_sysfs __traceiter_android_vh_ufs_update_sysfs
__traceiter_android_vh_use_amu_fie
__traceiter_clock_set_rate __traceiter_clock_set_rate
__traceiter_cpu_idle
__traceiter_device_pm_callback_end __traceiter_device_pm_callback_end
__traceiter_device_pm_callback_start __traceiter_device_pm_callback_start
__traceiter_gpu_mem_total __traceiter_gpu_mem_total
__traceiter_irq_handler_entry
__traceiter_irq_handler_exit
__traceiter_mmap_lock_acquire_returned __traceiter_mmap_lock_acquire_returned
__traceiter_mmap_lock_released __traceiter_mmap_lock_released
__traceiter_mmap_lock_start_locking __traceiter_mmap_lock_start_locking
__traceiter_mm_vmscan_direct_reclaim_begin __traceiter_mm_vmscan_direct_reclaim_begin
__traceiter_mm_vmscan_direct_reclaim_end __traceiter_mm_vmscan_direct_reclaim_end
__traceiter_pelt_cfs_tp
__traceiter_pelt_dl_tp
__traceiter_pelt_irq_tp
__traceiter_pelt_rt_tp
__traceiter_pelt_se_tp
__traceiter_sched_cpu_capacity_tp
__traceiter_sched_overutilized_tp
__traceiter_sched_switch __traceiter_sched_switch
__traceiter_sched_util_est_cfs_tp
__traceiter_sched_util_est_se_tp
__traceiter_sched_wakeup
__traceiter_softirq_entry
__traceiter_softirq_exit
__traceiter_suspend_resume __traceiter_suspend_resume
__traceiter_workqueue_execute_end __traceiter_workqueue_execute_end
__traceiter_workqueue_execute_start __traceiter_workqueue_execute_start
trace_output_call trace_output_call
__tracepoint_android_rvh_attach_entity_load_avg
__tracepoint_android_rvh_can_migrate_task
__tracepoint_android_rvh_cgroup_force_kthread_migration __tracepoint_android_rvh_cgroup_force_kthread_migration
__tracepoint_android_rvh_check_preempt_wakeup
__tracepoint_android_rvh_cpu_overutilized
__tracepoint_android_rvh_dequeue_task
__tracepoint_android_rvh_dequeue_task_fair
__tracepoint_android_rvh_detach_entity_load_avg
__tracepoint_android_rvh_enqueue_task
__tracepoint_android_rvh_enqueue_task_fair
__tracepoint_android_rvh_find_lowest_rq
__tracepoint_android_rvh_iommu_alloc_insert_iova __tracepoint_android_rvh_iommu_alloc_insert_iova
__tracepoint_android_rvh_iommu_iovad_init_alloc_algo __tracepoint_android_rvh_iommu_iovad_init_alloc_algo
__tracepoint_android_rvh_iommu_limit_align_shift __tracepoint_android_rvh_iommu_limit_align_shift
__tracepoint_android_rvh_irqs_disable
__tracepoint_android_rvh_irqs_enable
__tracepoint_android_rvh_post_init_entity_util_avg
__tracepoint_android_rvh_preempt_disable
__tracepoint_android_rvh_preempt_enable
__tracepoint_android_rvh_remove_entity_load_avg
__tracepoint_android_rvh_rtmutex_prepare_setprio
__tracepoint_android_rvh_sched_newidle_balance
__tracepoint_android_rvh_select_task_rq_fair
__tracepoint_android_rvh_select_task_rq_rt
__tracepoint_android_rvh_set_cpus_allowed_by_task
__tracepoint_android_rvh_set_iowait
__tracepoint_android_rvh_setscheduler
__tracepoint_android_rvh_set_task_cpu
__tracepoint_android_rvh_set_user_nice_locked
__tracepoint_android_rvh_tick_entry
__tracepoint_android_rvh_uclamp_eff_get
__tracepoint_android_rvh_update_blocked_fair
__tracepoint_android_rvh_update_load_avg
__tracepoint_android_rvh_update_misfit_status
__tracepoint_android_rvh_update_rt_rq_load_avg
__tracepoint_android_rvh_util_est_update
__tracepoint_android_rvh_util_fits_cpu
__tracepoint_android_vh_arch_set_freq_scale
__tracepoint_android_vh_binder_restore_priority
__tracepoint_android_vh_binder_set_priority
__tracepoint_android_vh_cpu_idle_enter __tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit __tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_dump_throttled_rt_tasks
__tracepoint_android_vh_dup_task_struct
__tracepoint_android_vh_early_resume_begin
__tracepoint_android_vh_enable_thermal_genl_check
__tracepoint_android_vh_ipi_stop __tracepoint_android_vh_ipi_stop
__tracepoint_android_vh_prio_inheritance
__tracepoint_android_vh_prio_restore
__tracepoint_android_vh_resume_end
__tracepoint_android_vh_scheduler_tick __tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_setscheduler_uclamp
__tracepoint_android_vh_si_meminfo_adjust __tracepoint_android_vh_si_meminfo_adjust
__tracepoint_android_vh_sysrq_crash __tracepoint_android_vh_sysrq_crash
__tracepoint_android_vh_uclamp_validate
__tracepoint_android_vh_ufs_check_int_errors __tracepoint_android_vh_ufs_check_int_errors
__tracepoint_android_vh_ufs_compl_command __tracepoint_android_vh_ufs_compl_command
__tracepoint_android_vh_ufs_fill_prdt __tracepoint_android_vh_ufs_fill_prdt
@ -2156,18 +2331,34 @@
__tracepoint_android_vh_ufs_send_uic_command __tracepoint_android_vh_ufs_send_uic_command
__tracepoint_android_vh_ufs_update_sdev __tracepoint_android_vh_ufs_update_sdev
__tracepoint_android_vh_ufs_update_sysfs __tracepoint_android_vh_ufs_update_sysfs
__tracepoint_android_vh_use_amu_fie
__tracepoint_clock_set_rate __tracepoint_clock_set_rate
__tracepoint_cpu_idle
__tracepoint_device_pm_callback_end __tracepoint_device_pm_callback_end
__tracepoint_device_pm_callback_start __tracepoint_device_pm_callback_start
__tracepoint_gpu_mem_total __tracepoint_gpu_mem_total
__tracepoint_irq_handler_entry
__tracepoint_irq_handler_exit
__tracepoint_mmap_lock_acquire_returned __tracepoint_mmap_lock_acquire_returned
__tracepoint_mmap_lock_released __tracepoint_mmap_lock_released
__tracepoint_mmap_lock_start_locking __tracepoint_mmap_lock_start_locking
__tracepoint_mm_vmscan_direct_reclaim_begin __tracepoint_mm_vmscan_direct_reclaim_begin
__tracepoint_mm_vmscan_direct_reclaim_end __tracepoint_mm_vmscan_direct_reclaim_end
__tracepoint_pelt_cfs_tp
__tracepoint_pelt_dl_tp
__tracepoint_pelt_irq_tp
__tracepoint_pelt_rt_tp
__tracepoint_pelt_se_tp
tracepoint_probe_register tracepoint_probe_register
tracepoint_probe_unregister tracepoint_probe_unregister
__tracepoint_sched_cpu_capacity_tp
__tracepoint_sched_overutilized_tp
__tracepoint_sched_switch __tracepoint_sched_switch
__tracepoint_sched_util_est_cfs_tp
__tracepoint_sched_util_est_se_tp
__tracepoint_sched_wakeup
__tracepoint_softirq_entry
__tracepoint_softirq_exit
__tracepoint_suspend_resume __tracepoint_suspend_resume
__tracepoint_workqueue_execute_end __tracepoint_workqueue_execute_end
__tracepoint_workqueue_execute_start __tracepoint_workqueue_execute_start
@ -2203,6 +2394,7 @@
uart_unregister_driver uart_unregister_driver
uart_update_timeout uart_update_timeout
uart_write_wakeup uart_write_wakeup
uclamp_eff_value
__udelay __udelay
udp4_hwcsum udp4_hwcsum
ufshcd_auto_hibern8_update ufshcd_auto_hibern8_update
@ -2239,6 +2431,9 @@
unregister_virtio_driver unregister_virtio_driver
up up
update_devfreq update_devfreq
___update_load_sum
update_misfit_status
update_rq_clock
up_read up_read
up_write up_write
usb_add_function usb_add_function

View File

@ -336,6 +336,7 @@
devm_clk_get_optional_enabled devm_clk_get_optional_enabled
devm_clk_hw_register devm_clk_hw_register
devm_clk_register devm_clk_register
devm_device_add_group
devm_extcon_dev_allocate devm_extcon_dev_allocate
devm_extcon_dev_register devm_extcon_dev_register
devm_extcon_dev_unregister devm_extcon_dev_unregister
@ -749,6 +750,7 @@
hrtimer_start_range_ns hrtimer_start_range_ns
hrtimer_try_to_cancel hrtimer_try_to_cancel
hvc_alloc hvc_alloc
hvc_instantiate
hvc_kick hvc_kick
hvc_poll hvc_poll
hvc_remove hvc_remove
@ -767,6 +769,8 @@
i2c_get_dma_safe_msg_buf i2c_get_dma_safe_msg_buf
i2c_put_dma_safe_msg_buf i2c_put_dma_safe_msg_buf
i2c_register_driver i2c_register_driver
i2c_smbus_read_byte_data
i2c_smbus_write_byte_data
i2c_transfer i2c_transfer
i3c_device_do_priv_xfers i3c_device_do_priv_xfers
i3c_driver_register_with_owner i3c_driver_register_with_owner
@ -1916,6 +1920,7 @@
trace_get_event_file trace_get_event_file
trace_handle_return trace_handle_return
__traceiter_android_rvh_account_irq __traceiter_android_rvh_account_irq
__traceiter_android_rvh_before_do_sched_yield
__traceiter_android_rvh_build_perf_domains __traceiter_android_rvh_build_perf_domains
__traceiter_android_rvh_can_migrate_task __traceiter_android_rvh_can_migrate_task
__traceiter_android_rvh_check_preempt_wakeup __traceiter_android_rvh_check_preempt_wakeup
@ -2017,6 +2022,7 @@
__traceiter_sk_data_ready __traceiter_sk_data_ready
__traceiter_suspend_resume __traceiter_suspend_resume
__tracepoint_android_rvh_account_irq __tracepoint_android_rvh_account_irq
__tracepoint_android_rvh_before_do_sched_yield
__tracepoint_android_rvh_build_perf_domains __tracepoint_android_rvh_build_perf_domains
__tracepoint_android_rvh_can_migrate_task __tracepoint_android_rvh_can_migrate_task
__tracepoint_android_rvh_check_preempt_wakeup __tracepoint_android_rvh_check_preempt_wakeup

View File

@ -0,0 +1,51 @@
[abi_symbol_list]
arp_create
crc_ccitt_table
crypto_ahash_final
__dev_direct_xmit
dev_mc_add
dev_mc_del
dma_get_any_slave_channel
drm_client_buffer_vmap
drm_client_buffer_vunmap
drm_client_framebuffer_create
drm_client_framebuffer_delete
drm_client_release
drm_gem_dmabuf_export
drm_gem_fb_create_with_funcs
drm_helper_disable_unused_functions
drm_mode_create_tv_properties_legacy
drm_mode_legacy_fb_format
drm_property_create_object
hid_hw_request
iio_map_array_register
iio_map_array_unregister
init_on_alloc
ioport_resource
irq_setup_alt_chip
kstrtos16
ktime_add_safe
lookup_user_key
of_clk_hw_register
of_get_display_timings
of_io_request_and_map
pci_request_regions
phy_loopback
phy_queue_state_machine
phy_speed_to_str
pm_clk_add_clk
__scm_destroy
scm_detach_fds
snd_dmaengine_pcm_close_release_chan
snd_dmaengine_pcm_get_chan
snd_dmaengine_pcm_open
snd_dmaengine_pcm_pointer
snd_dmaengine_pcm_trigger
snd_hwparams_to_dma_slave_config
snd_soc_dai_set_clkdiv
sock_kfree_s
sock_kmalloc
sock_kzfree_s
sock_wake_async
start_tty
stop_tty

View File

@ -0,0 +1,53 @@
__bforget
__bh_read_batch
bit_waitqueue
block_is_partially_uptodate
__breadahead
capable_wrt_inode_uidgid
clear_page_dirty_for_io
d_add
d_add_ci
end_page_writeback
fault_in_safe_writeable
file_check_and_advance_wb_err
filemap_add_folio
filemap_check_errors
filemap_dirty_folio
filemap_fault
filemap_get_folios_tag
__folio_cancel_dirty
folio_zero_new_buffers
generic_error_remove_page
generic_file_direct_write
ilookup5
in_group_p
inode_maybe_inc_iversion
inode_query_iversion
inode_set_flags
__insert_inode_hash
invalidate_inode_pages2_range
io_schedule
iov_iter_alignment
iov_iter_single_seg_count
iunique
make_vfsgid
make_vfsuid
mark_buffer_async_write
mark_buffer_write_io_error
mnt_drop_write_file
mnt_want_write_file
mount_bdev
page_cache_next_miss
page_cache_prev_miss
redirty_page_for_writepage
__remove_inode_hash
sb_min_blocksize
security_inode_init_security
__set_page_dirty_nobuffers
set_page_writeback
__sync_dirty_buffer
tag_pages_for_writeback
timestamp_truncate
try_to_writeback_inodes_sb
wake_bit_function
wrap_directory_iterator

View File

@ -619,6 +619,10 @@
__tracepoint_mm_filemap_delete_from_page_cache __tracepoint_mm_filemap_delete_from_page_cache
__tracepoint_mm_filemap_add_to_page_cache __tracepoint_mm_filemap_add_to_page_cache
# required printk_cpuid.ko
__traceiter_android_vh_printk_ext_header
__tracepoint_android_vh_printk_ext_header
# required by mali_gondul.ko # required by mali_gondul.ko
anon_inode_getfd anon_inode_getfd
__arch_clear_user __arch_clear_user

View File

@ -1,4 +1,6 @@
[abi_symbol_list] [abi_symbol_list]
alloc_workqueue_attrs
apply_workqueue_attrs
blkdev_get_by_dev blkdev_get_by_dev
blkdev_put blkdev_put
__brelse __brelse
@ -9,6 +11,7 @@
end_buffer_read_sync end_buffer_read_sync
end_buffer_write_sync end_buffer_write_sync
filp_close filp_close
free_workqueue_attrs
__getblk_gfp __getblk_gfp
__irq_regs __irq_regs
kernfs_path_from_node kernfs_path_from_node
@ -25,6 +28,8 @@
submit_bh submit_bh
__kmalloc_node __kmalloc_node
__sbitmap_queue_get __sbitmap_queue_get
__set_task_comm
__traceiter_android_rvh_alloc_and_link_pwqs
__traceiter_android_rvh_check_preempt_wakeup __traceiter_android_rvh_check_preempt_wakeup
__traceiter_android_rvh_cpufreq_transition __traceiter_android_rvh_cpufreq_transition
__traceiter_android_rvh_dequeue_task_fair __traceiter_android_rvh_dequeue_task_fair
@ -39,6 +44,7 @@
__traceiter_android_vh_alloc_pages_reset_wmark __traceiter_android_vh_alloc_pages_reset_wmark
__traceiter_android_vh_alter_mutex_list_add __traceiter_android_vh_alter_mutex_list_add
__traceiter_android_vh_alter_rwsem_list_add __traceiter_android_vh_alter_rwsem_list_add
__traceiter_android_vh_bd_link_disk_holder
__traceiter_android_vh_binder_preset __traceiter_android_vh_binder_preset
__traceiter_android_vh_binder_restore_priority __traceiter_android_vh_binder_restore_priority
__traceiter_android_vh_binder_special_task __traceiter_android_vh_binder_special_task
@ -116,6 +122,7 @@
__traceiter_task_rename __traceiter_task_rename
__traceiter_workqueue_execute_end __traceiter_workqueue_execute_end
__traceiter_workqueue_execute_start __traceiter_workqueue_execute_start
__tracepoint_android_rvh_alloc_and_link_pwqs
__tracepoint_android_rvh_check_preempt_wakeup __tracepoint_android_rvh_check_preempt_wakeup
__tracepoint_android_rvh_cpufreq_transition __tracepoint_android_rvh_cpufreq_transition
__tracepoint_android_rvh_dequeue_task_fair __tracepoint_android_rvh_dequeue_task_fair
@ -130,6 +137,7 @@
__tracepoint_android_vh_alloc_pages_reset_wmark __tracepoint_android_vh_alloc_pages_reset_wmark
__tracepoint_android_vh_alter_mutex_list_add __tracepoint_android_vh_alter_mutex_list_add
__tracepoint_android_vh_alter_rwsem_list_add __tracepoint_android_vh_alter_rwsem_list_add
__tracepoint_android_vh_bd_link_disk_holder
__tracepoint_android_vh_binder_preset __tracepoint_android_vh_binder_preset
__tracepoint_android_vh_binder_restore_priority __tracepoint_android_vh_binder_restore_priority
__tracepoint_android_vh_binder_special_task __tracepoint_android_vh_binder_special_task

View File

@ -362,3 +362,10 @@
bio_init bio_init
__bio_add_page __bio_add_page
blkdev_get_by_dev blkdev_get_by_dev
#required by mem_reclaim_ctl.ko
__traceiter_android_vh_page_should_be_protected
__tracepoint_android_vh_page_should_be_protected
__traceiter_android_vh_page_referenced_check_bypass
__tracepoint_android_vh_page_referenced_check_bypass
folio_total_mapcount

View File

@ -1,4 +1,5 @@
arch/arm64/geniezone/gzvm.ko arch/arm64/geniezone/gzvm.ko
drivers/android/rust_binder.ko
drivers/bluetooth/btbcm.ko drivers/bluetooth/btbcm.ko
drivers/bluetooth/btqca.ko drivers/bluetooth/btqca.ko
drivers/bluetooth/btsdio.ko drivers/bluetooth/btsdio.ko

View File

@ -48,6 +48,7 @@ CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_ALL=y
# CONFIG_RSEQ is not set # CONFIG_RSEQ is not set
CONFIG_PROFILING=y CONFIG_PROFILING=y
CONFIG_RUST=y
CONFIG_ARCH_SUNXI=y CONFIG_ARCH_SUNXI=y
CONFIG_ARCH_HISI=y CONFIG_ARCH_HISI=y
CONFIG_ARCH_QCOM=y CONFIG_ARCH_QCOM=y
@ -602,6 +603,7 @@ CONFIG_DTPM=y
CONFIG_DTPM_CPU=y CONFIG_DTPM_CPU=y
CONFIG_DTPM_DEVFREQ=y CONFIG_DTPM_DEVFREQ=y
CONFIG_ANDROID_BINDER_IPC=y CONFIG_ANDROID_BINDER_IPC=y
CONFIG_ANDROID_BINDER_IPC_RUST=m
CONFIG_ANDROID_BINDERFS=y CONFIG_ANDROID_BINDERFS=y
CONFIG_ANDROID_DEBUG_SYMBOLS=y CONFIG_ANDROID_DEBUG_SYMBOLS=y
CONFIG_ANDROID_VENDOR_HOOKS=y CONFIG_ANDROID_VENDOR_HOOKS=y

View File

@ -25,6 +25,8 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <trace/hooks/cpuinfo.h>
/* /*
* In case the boot CPU is hotpluggable, we record its initial state and * In case the boot CPU is hotpluggable, we record its initial state and
* current state separately. Certain system registers may contain different * current state separately. Certain system registers may contain different
@ -238,6 +240,8 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr)); seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
} }
trace_android_rvh_cpuinfo_c_show(m);
return 0; return 0;
} }

View File

@ -22,6 +22,8 @@
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/topology.h> #include <asm/topology.h>
#include <trace/hooks/topology.h>
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
static bool __init acpi_cpu_is_threaded(int cpu) static bool __init acpi_cpu_is_threaded(int cpu)
{ {
@ -151,6 +153,11 @@ static void amu_scale_freq_tick(void)
{ {
u64 prev_core_cnt, prev_const_cnt; u64 prev_core_cnt, prev_const_cnt;
u64 core_cnt, const_cnt, scale; u64 core_cnt, const_cnt, scale;
bool use_amu_fie = true;
trace_android_vh_use_amu_fie(&use_amu_fie);
if(!use_amu_fie)
return;
prev_const_cnt = this_cpu_read(arch_const_cycles_prev); prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
prev_core_cnt = this_cpu_read(arch_core_cycles_prev); prev_core_cnt = this_cpu_read(arch_core_cycles_prev);

View File

@ -9,7 +9,7 @@
#include <asm/kvm_host.h> #include <asm/kvm_host.h>
#define FFA_MIN_FUNC_NUM 0x60 #define FFA_MIN_FUNC_NUM 0x60
#define FFA_MAX_FUNC_NUM 0x7F #define FFA_MAX_FUNC_NUM 0xFF
int hyp_ffa_init(void *pages); int hyp_ffa_init(void *pages);
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id); bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);

View File

@ -14,9 +14,11 @@ extern struct kvm_pgtable pkvm_pgtable;
extern hyp_spinlock_t pkvm_pgd_lock; extern hyp_spinlock_t pkvm_pgd_lock;
extern const struct pkvm_module_ops module_ops; extern const struct pkvm_module_ops module_ops;
int hyp_create_pcpu_fixmap(void); int hyp_create_fixmap(void);
void *hyp_fixmap_map(phys_addr_t phys); void *hyp_fixmap_map(phys_addr_t phys);
void hyp_fixmap_unmap(void); void hyp_fixmap_unmap(void);
void *hyp_fixblock_map(phys_addr_t phys);
void hyp_fixblock_unmap(void);
void hyp_poison_page(phys_addr_t phys); void hyp_poison_page(phys_addr_t phys);
int hyp_create_idmap(u32 hyp_va_bits); int hyp_create_idmap(u32 hyp_va_bits);

View File

@ -67,6 +67,9 @@ struct kvm_ffa_buffers {
*/ */
static struct kvm_ffa_buffers hyp_buffers; static struct kvm_ffa_buffers hyp_buffers;
static struct kvm_ffa_buffers host_buffers; static struct kvm_ffa_buffers host_buffers;
static u32 hyp_ffa_version;
static bool has_version_negotiated;
static hyp_spinlock_t version_lock;
static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno) static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
{ {
@ -431,6 +434,7 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
DECLARE_REG(u32, fraglen, ctxt, 2); DECLARE_REG(u32, fraglen, ctxt, 2);
DECLARE_REG(u64, addr_mbz, ctxt, 3); DECLARE_REG(u64, addr_mbz, ctxt, 3);
DECLARE_REG(u32, npages_mbz, ctxt, 4); DECLARE_REG(u32, npages_mbz, ctxt, 4);
struct ffa_mem_region_attributes *ep_mem_access;
struct ffa_composite_mem_region *reg; struct ffa_composite_mem_region *reg;
struct ffa_mem_region *buf; struct ffa_mem_region *buf;
u32 offset, nr_ranges; u32 offset, nr_ranges;
@ -460,7 +464,9 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
buf = hyp_buffers.tx; buf = hyp_buffers.tx;
memcpy(buf, host_buffers.tx, fraglen); memcpy(buf, host_buffers.tx, fraglen);
offset = buf->ep_mem_access[0].composite_off; ep_mem_access = (void *)buf +
ffa_mem_desc_offset(buf, 0, hyp_ffa_version);
offset = ep_mem_access->composite_off;
if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) { if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
ret = FFA_RET_INVALID_PARAMETERS; ret = FFA_RET_INVALID_PARAMETERS;
goto out_unlock; goto out_unlock;
@ -512,6 +518,7 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
DECLARE_REG(u32, handle_lo, ctxt, 1); DECLARE_REG(u32, handle_lo, ctxt, 1);
DECLARE_REG(u32, handle_hi, ctxt, 2); DECLARE_REG(u32, handle_hi, ctxt, 2);
DECLARE_REG(u32, flags, ctxt, 3); DECLARE_REG(u32, flags, ctxt, 3);
struct ffa_mem_region_attributes *ep_mem_access;
struct ffa_composite_mem_region *reg; struct ffa_composite_mem_region *reg;
u32 offset, len, fraglen, fragoff; u32 offset, len, fraglen, fragoff;
struct ffa_mem_region *buf; struct ffa_mem_region *buf;
@ -536,7 +543,9 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
len = res->a1; len = res->a1;
fraglen = res->a2; fraglen = res->a2;
offset = buf->ep_mem_access[0].composite_off; ep_mem_access = (void *)buf +
ffa_mem_desc_offset(buf, 0, hyp_ffa_version);
offset = ep_mem_access->composite_off;
/* /*
* We can trust the SPMD to get this right, but let's at least * We can trust the SPMD to get this right, but let's at least
* check that we end up with something that doesn't look _completely_ * check that we end up with something that doesn't look _completely_
@ -606,7 +615,6 @@ static bool ffa_call_supported(u64 func_id)
case FFA_MSG_POLL: case FFA_MSG_POLL:
case FFA_MSG_WAIT: case FFA_MSG_WAIT:
/* 32-bit variants of 64-bit calls */ /* 32-bit variants of 64-bit calls */
case FFA_MSG_SEND_DIRECT_REQ:
case FFA_MSG_SEND_DIRECT_RESP: case FFA_MSG_SEND_DIRECT_RESP:
case FFA_RXTX_MAP: case FFA_RXTX_MAP:
case FFA_MEM_DONATE: case FFA_MEM_DONATE:
@ -646,104 +654,10 @@ out_handled:
return true; return true;
} }
bool kvm_host_ffa_handler(struct kvm_cpu_context *ctxt, u32 func_id) static int hyp_ffa_post_init(void)
{ {
DECLARE_REG(u64, arg1, ctxt, 1);
DECLARE_REG(u64, arg2, ctxt, 2);
DECLARE_REG(u64, arg3, ctxt, 3);
DECLARE_REG(u64, arg4, ctxt, 4);
struct arm_smccc_res res;
bool handled = true;
int err = 0;
/*
* There's no way we can tell what a non-standard SMC call might
* be up to. Ideally, we would terminate these here and return
* an error to the host, but sadly devices make use of custom
* firmware calls for things like power management, debugging,
* RNG access and crash reporting.
*
* Given that the architecture requires us to trust EL3 anyway,
* we forward unrecognised calls on under the assumption that
* the firmware doesn't expose a mechanism to access arbitrary
* non-secure memory. Short of a per-device table of SMCs, this
* is the best we can do.
*/
if (!is_ffa_call(func_id))
return false;
switch (func_id) {
case FFA_FEATURES:
if (!do_ffa_features(&res, ctxt)) {
handled = false;
goto unhandled;
}
break;
/* Memory management */
case FFA_FN64_RXTX_MAP:
do_ffa_rxtx_map(&res, ctxt);
break;
case FFA_RXTX_UNMAP:
do_ffa_rxtx_unmap(&res, ctxt);
break;
case FFA_MEM_SHARE:
case FFA_FN64_MEM_SHARE:
do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, ctxt);
break;
case FFA_MEM_RECLAIM:
do_ffa_mem_reclaim(&res, ctxt);
break;
case FFA_MEM_LEND:
case FFA_FN64_MEM_LEND:
do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, ctxt);
break;
case FFA_MEM_FRAG_TX:
do_ffa_mem_frag_tx(&res, ctxt);
break;
default:
if (ffa_call_supported(func_id)) {
handled = false;
goto unhandled;
}
ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
}
ffa_set_retval(ctxt, &res);
err = res.a0 == FFA_SUCCESS ? 0 : res.a2;
unhandled:
trace_host_ffa_call(func_id, arg1, arg2, arg3, arg4, handled, err);
return handled;
}
int hyp_ffa_init(void *pages)
{
struct arm_smccc_res res;
size_t min_rxtx_sz; size_t min_rxtx_sz;
void *tx, *rx; struct arm_smccc_res res;
if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_1)
return 0;
arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_0, 0, 0, 0, 0, 0, 0, &res);
if (res.a0 == FFA_RET_NOT_SUPPORTED)
return 0;
/*
* Firmware returns the maximum supported version of the FF-A
* implementation. Check that the returned version is
* backwards-compatible with the hyp according to the rules in DEN0077A
* v1.1 REL0 13.2.1.
*
* Of course, things are never simple when dealing with firmware. v1.1
* broke ABI with v1.0 on several structures, which is itself
* incompatible with the aforementioned versioning scheme. The
* expectation is that v1.x implementations that do not support the v1.0
* ABI return NOT_SUPPORTED rather than a version number, according to
* DEN0077A v1.1 REL0 18.6.4.
*/
if (FFA_MAJOR_VERSION(res.a0) != 1)
return -EOPNOTSUPP;
arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res); arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
if (res.a0 != FFA_SUCCESS) if (res.a0 != FFA_SUCCESS)
@ -774,6 +688,212 @@ int hyp_ffa_init(void *pages)
if (min_rxtx_sz > PAGE_SIZE) if (min_rxtx_sz > PAGE_SIZE)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return 0;
}
static void do_ffa_version(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
DECLARE_REG(u32, ffa_req_version, ctxt, 1);
if (FFA_MAJOR_VERSION(ffa_req_version) != 1) {
res->a0 = FFA_RET_NOT_SUPPORTED;
return;
}
hyp_spin_lock(&version_lock);
if (has_version_negotiated) {
res->a0 = hyp_ffa_version;
goto unlock;
}
/*
* If the client driver tries to downgrade the version, we need to ask
* first if TEE supports it.
*/
if (FFA_MINOR_VERSION(ffa_req_version) < FFA_MINOR_VERSION(hyp_ffa_version)) {
arm_smccc_1_1_smc(FFA_VERSION, ffa_req_version, 0,
0, 0, 0, 0, 0,
res);
if (res->a0 == FFA_RET_NOT_SUPPORTED)
goto unlock;
hyp_ffa_version = ffa_req_version;
}
if (hyp_ffa_post_init())
res->a0 = FFA_RET_NOT_SUPPORTED;
else {
has_version_negotiated = true;
res->a0 = hyp_ffa_version;
}
unlock:
hyp_spin_unlock(&version_lock);
}
static void do_ffa_part_get(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
DECLARE_REG(u32, uuid0, ctxt, 1);
DECLARE_REG(u32, uuid1, ctxt, 2);
DECLARE_REG(u32, uuid2, ctxt, 3);
DECLARE_REG(u32, uuid3, ctxt, 4);
DECLARE_REG(u32, flags, ctxt, 5);
u32 count, partition_sz, copy_sz;
hyp_spin_lock(&host_buffers.lock);
if (!host_buffers.rx) {
ffa_to_smccc_res(res, FFA_RET_BUSY);
goto out_unlock;
}
arm_smccc_1_1_smc(FFA_PARTITION_INFO_GET, uuid0, uuid1,
uuid2, uuid3, flags, 0, 0,
res);
if (res->a0 != FFA_SUCCESS)
goto out_unlock;
count = res->a2;
if (!count)
goto out_unlock;
if (hyp_ffa_version > FFA_VERSION_1_0) {
/* Get the number of partitions deployed in the system */
if (flags & 0x1)
goto out_unlock;
partition_sz = res->a3;
} else {
/* FFA_VERSION_1_0 lacks the size in the response */
partition_sz = FFA_1_0_PARTITON_INFO_SZ;
}
copy_sz = partition_sz * count;
if (copy_sz > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
ffa_to_smccc_res(res, FFA_RET_ABORTED);
goto out_unlock;
}
memcpy(host_buffers.rx, hyp_buffers.rx, copy_sz);
out_unlock:
hyp_spin_unlock(&host_buffers.lock);
}
bool kvm_host_ffa_handler(struct kvm_cpu_context *ctxt, u32 func_id)
{
DECLARE_REG(u64, arg1, ctxt, 1);
DECLARE_REG(u64, arg2, ctxt, 2);
DECLARE_REG(u64, arg3, ctxt, 3);
DECLARE_REG(u64, arg4, ctxt, 4);
struct arm_smccc_res res;
bool handled = true;
int err = 0;
/*
* There's no way we can tell what a non-standard SMC call might
* be up to. Ideally, we would terminate these here and return
* an error to the host, but sadly devices make use of custom
* firmware calls for things like power management, debugging,
* RNG access and crash reporting.
*
* Given that the architecture requires us to trust EL3 anyway,
* we forward unrecognised calls on under the assumption that
* the firmware doesn't expose a mechanism to access arbitrary
* non-secure memory. Short of a per-device table of SMCs, this
* is the best we can do.
*/
if (!is_ffa_call(func_id))
return false;
if (!has_version_negotiated && func_id != FFA_VERSION) {
ffa_to_smccc_error(&res, FFA_RET_INVALID_PARAMETERS);
goto unhandled;
}
switch (func_id) {
case FFA_FEATURES:
if (!do_ffa_features(&res, ctxt)) {
handled = false;
goto unhandled;
}
break;
/* Memory management */
case FFA_FN64_RXTX_MAP:
do_ffa_rxtx_map(&res, ctxt);
break;
case FFA_RXTX_UNMAP:
do_ffa_rxtx_unmap(&res, ctxt);
break;
case FFA_MEM_SHARE:
case FFA_FN64_MEM_SHARE:
do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, ctxt);
break;
case FFA_MEM_RECLAIM:
do_ffa_mem_reclaim(&res, ctxt);
break;
case FFA_MEM_LEND:
case FFA_FN64_MEM_LEND:
do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, ctxt);
break;
case FFA_MEM_FRAG_TX:
do_ffa_mem_frag_tx(&res, ctxt);
break;
case FFA_VERSION:
do_ffa_version(&res, ctxt);
break;
case FFA_PARTITION_INFO_GET:
do_ffa_part_get(&res, ctxt);
break;
default:
if (ffa_call_supported(func_id)) {
handled = false;
goto unhandled;
}
ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
}
ffa_set_retval(ctxt, &res);
err = res.a0 == FFA_SUCCESS ? 0 : res.a2;
unhandled:
trace_host_ffa_call(func_id, arg1, arg2, arg3, arg4, handled, err);
return handled;
}
int hyp_ffa_init(void *pages)
{
struct arm_smccc_res res;
void *tx, *rx;
if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_1)
return 0;
arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_1, 0, 0, 0, 0, 0, 0, &res);
if (res.a0 == FFA_RET_NOT_SUPPORTED)
return 0;
/*
* Firmware returns the maximum supported version of the FF-A
* implementation. Check that the returned version is
* backwards-compatible with the hyp according to the rules in DEN0077A
* v1.1 REL0 13.2.1.
*
* Of course, things are never simple when dealing with firmware. v1.1
* broke ABI with v1.0 on several structures, which is itself
* incompatible with the aforementioned versioning scheme. The
* expectation is that v1.x implementations that do not support the v1.0
* ABI return NOT_SUPPORTED rather than a version number, according to
* DEN0077A v1.1 REL0 18.6.4.
*/
if (FFA_MAJOR_VERSION(res.a0) != 1)
return -EOPNOTSUPP;
if (FFA_MINOR_VERSION(res.a0) < FFA_MINOR_VERSION(FFA_VERSION_1_1))
hyp_ffa_version = res.a0;
else
hyp_ffa_version = FFA_VERSION_1_1;
tx = pages; tx = pages;
pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE; pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
rx = pages; rx = pages;
@ -795,5 +915,6 @@ int hyp_ffa_init(void *pages)
.lock = __HYP_SPIN_LOCK_UNLOCKED, .lock = __HYP_SPIN_LOCK_UNLOCKED,
}; };
version_lock = __HYP_SPIN_LOCK_UNLOCKED;
return 0; return 0;
} }

View File

@ -273,25 +273,68 @@ static void guest_s2_put_page(void *addr)
hyp_put_page(&current_vm->pool, addr); hyp_put_page(&current_vm->pool, addr);
} }
static void *__fixmap_guest_page(void *va, size_t *size)
{
void *addr;
if (WARN_ON(!IS_ALIGNED(*size, *size)))
return NULL;
if (IS_ALIGNED(*size, PMD_SIZE)) {
addr = hyp_fixblock_map(__hyp_pa(va));
if (addr)
return addr;
*size = PAGE_SIZE;
}
if (IS_ALIGNED(*size, PAGE_SIZE))
return hyp_fixmap_map(__hyp_pa(va));
WARN_ON(1);
return NULL;
}
static void __fixunmap_guest_page(size_t size)
{
switch (size) {
case PAGE_SIZE:
hyp_fixmap_unmap();
break;
case PMD_SIZE:
hyp_fixblock_unmap();
break;
default:
BUG();
}
}
static void clean_dcache_guest_page(void *va, size_t size) static void clean_dcache_guest_page(void *va, size_t size)
{ {
while (size) { while (size) {
__clean_dcache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size_t __size = size == PMD_SIZE ? size : PAGE_SIZE;
PAGE_SIZE); void *addr = __fixmap_guest_page(va, &__size);
hyp_fixmap_unmap();
va += PAGE_SIZE; __clean_dcache_guest_page(addr, __size);
size -= PAGE_SIZE; __fixunmap_guest_page(__size);
size -= __size;
va += __size;
} }
} }
static void invalidate_icache_guest_page(void *va, size_t size) static void invalidate_icache_guest_page(void *va, size_t size)
{ {
while (size) { while (size) {
__invalidate_icache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size_t __size = size == PMD_SIZE ? size : PAGE_SIZE;
PAGE_SIZE); void *addr = __fixmap_guest_page(va, &__size);
hyp_fixmap_unmap();
va += PAGE_SIZE; __invalidate_icache_guest_page(addr, __size);
size -= PAGE_SIZE; __fixunmap_guest_page(__size);
size -= __size;
va += __size;
} }
} }

View File

@ -309,9 +309,8 @@ int hyp_map_vectors(void)
return 0; return 0;
} }
void *hyp_fixmap_map(phys_addr_t phys) static void *fixmap_map_slot(struct hyp_fixmap_slot *slot, phys_addr_t phys)
{ {
struct hyp_fixmap_slot *slot = this_cpu_ptr(&fixmap_slots);
kvm_pte_t pte, *ptep = slot->ptep; kvm_pte_t pte, *ptep = slot->ptep;
pte = *ptep; pte = *ptep;
@ -323,10 +322,21 @@ void *hyp_fixmap_map(phys_addr_t phys)
return (void *)slot->addr + offset_in_page(phys); return (void *)slot->addr + offset_in_page(phys);
} }
void *hyp_fixmap_map(phys_addr_t phys)
{
return fixmap_map_slot(this_cpu_ptr(&fixmap_slots), phys);
}
static void fixmap_clear_slot(struct hyp_fixmap_slot *slot) static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
{ {
kvm_pte_t *ptep = slot->ptep; kvm_pte_t *ptep = slot->ptep;
u64 addr = slot->addr; u64 addr = slot->addr;
u32 level;
if (FIELD_GET(KVM_PTE_TYPE, *ptep) == KVM_PTE_TYPE_PAGE)
level = KVM_PGTABLE_MAX_LEVELS - 1;
else
level = KVM_PGTABLE_MAX_LEVELS - 2; /* create_fixblock() guarantees PMD level */
WRITE_ONCE(*ptep, *ptep & ~KVM_PTE_VALID); WRITE_ONCE(*ptep, *ptep & ~KVM_PTE_VALID);
@ -340,7 +350,7 @@ static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
* https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03 * https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
*/ */
dsb(ishst); dsb(ishst);
__tlbi_level(vale2is, __TLBI_VADDR(addr, 0), (KVM_PGTABLE_MAX_LEVELS - 1)); __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
dsb(ish); dsb(ish);
isb(); isb();
} }
@ -353,9 +363,9 @@ void hyp_fixmap_unmap(void)
static int __create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx *ctx, static int __create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx *ctx,
enum kvm_pgtable_walk_flags visit) enum kvm_pgtable_walk_flags visit)
{ {
struct hyp_fixmap_slot *slot = per_cpu_ptr(&fixmap_slots, (u64)ctx->arg); struct hyp_fixmap_slot *slot = (struct hyp_fixmap_slot *)ctx->arg;
if (!kvm_pte_valid(ctx->old) || ctx->level != KVM_PGTABLE_MAX_LEVELS - 1) if (!kvm_pte_valid(ctx->old) || (ctx->end - ctx->start) != kvm_granule_size(ctx->level))
return -EINVAL; return -EINVAL;
slot->addr = ctx->addr; slot->addr = ctx->addr;
@ -376,13 +386,75 @@ static int create_fixmap_slot(u64 addr, u64 cpu)
struct kvm_pgtable_walker walker = { struct kvm_pgtable_walker walker = {
.cb = __create_fixmap_slot_cb, .cb = __create_fixmap_slot_cb,
.flags = KVM_PGTABLE_WALK_LEAF, .flags = KVM_PGTABLE_WALK_LEAF,
.arg = (void *)cpu, .arg = (void *)per_cpu_ptr(&fixmap_slots, cpu),
}; };
return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker); return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker);
} }
int hyp_create_pcpu_fixmap(void) #ifndef CONFIG_ARM64_64K_PAGES
static struct hyp_fixmap_slot hyp_fixblock_slot;
static DEFINE_HYP_SPINLOCK(hyp_fixblock_lock);
void *hyp_fixblock_map(phys_addr_t phys)
{
WARN_ON(!IS_ALIGNED(phys, PMD_SIZE));
hyp_spin_lock(&hyp_fixblock_lock);
return fixmap_map_slot(&hyp_fixblock_slot, phys);
}
void hyp_fixblock_unmap(void)
{
fixmap_clear_slot(&hyp_fixblock_slot);
hyp_spin_unlock(&hyp_fixblock_lock);
}
static int create_fixblock(void)
{
struct kvm_pgtable_walker walker = {
.cb = __create_fixmap_slot_cb,
.flags = KVM_PGTABLE_WALK_LEAF,
.arg = (void *)&hyp_fixblock_slot,
};
unsigned long addr;
phys_addr_t phys;
int ret, i;
/* Find a RAM phys address, PMD aligned */
for (i = 0; i < hyp_memblock_nr; i++) {
phys = ALIGN(hyp_memory[i].base, PMD_SIZE);
if (phys + PMD_SIZE < (hyp_memory[i].base + hyp_memory[i].size))
break;
}
/* Really? Your RAM isn't larger than a couple of times PMD_SIZE? */
if (i >= hyp_memblock_nr)
return -EINVAL;
hyp_spin_lock(&pkvm_pgd_lock);
addr = ALIGN(__io_map_base, PMD_SIZE);
ret = __pkvm_alloc_private_va_range(addr, PMD_SIZE);
if (ret)
goto unlock;
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PMD_SIZE, phys, PAGE_HYP);
if (ret)
goto unlock;
ret = kvm_pgtable_walk(&pkvm_pgtable, addr, PMD_SIZE, &walker);
unlock:
hyp_spin_unlock(&pkvm_pgd_lock);
return ret;
}
#else
void hyp_fixblock_unmap(void) { WARN_ON(1); }
void *hyp_fixblock_map(phys_addr_t phys) { return NULL; }
static int create_fixblock(void) { return 0; }
#endif
int hyp_create_fixmap(void)
{ {
unsigned long addr, i; unsigned long addr, i;
int ret; int ret;
@ -402,7 +474,7 @@ int hyp_create_pcpu_fixmap(void)
return ret; return ret;
} }
return 0; return create_fixblock();
} }
int hyp_create_idmap(u32 hyp_va_bits) int hyp_create_idmap(u32 hyp_va_bits)

View File

@ -362,7 +362,7 @@ void __noreturn __pkvm_init_finalise(void)
if (ret) if (ret)
goto out; goto out;
ret = hyp_create_pcpu_fixmap(); ret = hyp_create_fixmap();
if (ret) if (ret)
goto out; goto out;

View File

@ -23,6 +23,8 @@
#include <asm/set_memory.h> #include <asm/set_memory.h>
#include "bpf_jit.h" #include "bpf_jit.h"
#undef CREATE_TRACE_POINTS
#include <trace/hooks/bpf_jit_comp.h>
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
@ -1649,6 +1651,8 @@ skip_init_ctx:
goto out_off; goto out_off;
} }
bpf_jit_binary_lock_ro(header); bpf_jit_binary_lock_ro(header);
trace_android_rvh_bpf_int_jit_compile_ro(header,
header->size);
} else { } else {
jit_data->ctx = ctx; jit_data->ctx = ctx;
jit_data->image = image_ptr; jit_data->image = image_ptr;

View File

@ -52,6 +52,7 @@ CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_ALL=y
# CONFIG_RSEQ is not set # CONFIG_RSEQ is not set
CONFIG_PROFILING=y CONFIG_PROFILING=y
CONFIG_RUST=y
CONFIG_SMP=y CONFIG_SMP=y
CONFIG_X86_X2APIC=y CONFIG_X86_X2APIC=y
CONFIG_HYPERVISOR_GUEST=y CONFIG_HYPERVISOR_GUEST=y
@ -537,6 +538,7 @@ CONFIG_POWERCAP=y
CONFIG_IDLE_INJECT=y CONFIG_IDLE_INJECT=y
CONFIG_DTPM=y CONFIG_DTPM=y
CONFIG_ANDROID_BINDER_IPC=y CONFIG_ANDROID_BINDER_IPC=y
CONFIG_ANDROID_BINDER_IPC_RUST=m
CONFIG_ANDROID_BINDERFS=y CONFIG_ANDROID_BINDERFS=y
CONFIG_ANDROID_DEBUG_SYMBOLS=y CONFIG_ANDROID_DEBUG_SYMBOLS=y
CONFIG_ANDROID_VENDOR_HOOKS=y CONFIG_ANDROID_VENDOR_HOOKS=y

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <trace/hooks/blk.h>
struct bd_holder_disk { struct bd_holder_disk {
struct list_head list; struct list_head list;
@ -107,6 +108,7 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
if (ret) if (ret)
goto out_del_symlink; goto out_del_symlink;
list_add(&holder->list, &disk->slave_bdevs); list_add(&holder->list, &disk->slave_bdevs);
trace_android_vh_bd_link_disk_holder(bdev, disk);
mutex_unlock(&disk->open_mutex); mutex_unlock(&disk->open_mutex);
return 0; return 0;

View File

@ -26,7 +26,7 @@ config ANDROID_BINDER_IPC_C
If unsure, pick the C implementation. If unsure, pick the C implementation.
config ANDROID_BINDER_IPC_RUST config ANDROID_BINDER_IPC_RUST
bool "Android Binder IPC Driver in Rust" tristate "Android Binder IPC Driver in Rust"
depends on ANDROID_BINDER_IPC && RUST depends on ANDROID_BINDER_IPC && RUST
help help
Enable the Rust implementation of the Android Binder IPC Driver. Enable the Rust implementation of the Android Binder IPC Driver.

View File

@ -5,8 +5,12 @@ obj-$(CONFIG_ANDROID_BINDER_IPC) += binder_pick_impl.o
obj-$(CONFIG_ANDROID_BINDERFS_C) += binderfs.o obj-$(CONFIG_ANDROID_BINDERFS_C) += binderfs.o
obj-$(CONFIG_ANDROID_BINDER_IPC_C) += binder.o binder_alloc.o obj-$(CONFIG_ANDROID_BINDER_IPC_C) += binder.o binder_alloc.o
obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
obj-$(CONFIG_ANDROID_BINDERFS_RUST) += binder/rust_binderfs.o
obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += binder/rust_binder.o
obj-$(CONFIG_ANDROID_DEBUG_SYMBOLS) += android_debug_symbols.o obj-$(CONFIG_ANDROID_DEBUG_SYMBOLS) += android_debug_symbols.o
obj-$(CONFIG_ANDROID_VENDOR_HOOKS) += vendor_hooks.o obj-$(CONFIG_ANDROID_VENDOR_HOOKS) += vendor_hooks.o
obj-$(CONFIG_ANDROID_DEBUG_KINFO) += debug_kinfo.o obj-$(CONFIG_ANDROID_DEBUG_KINFO) += debug_kinfo.o
obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += rust_binder.o
rust_binder-objs := binder/rust_binder.o
ifeq ($(CONFIG_ANDROID_BINDERFS_RUST),y)
rust_binder-objs += binder/rust_binderfs.o
endif

View File

@ -17,6 +17,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/compaction.h> #include <linux/compaction.h>
#include <linux/cma.h>
struct ads_entry { struct ads_entry {
char *name; char *name;
@ -47,6 +48,8 @@ static const struct ads_entry ads_entries[ADS_END] = {
ADS_ENTRY(ADS_DROP_SLAB, drop_slab), ADS_ENTRY(ADS_DROP_SLAB, drop_slab),
ADS_ENTRY(ADS_FREE_PAGES, try_to_free_pages), ADS_ENTRY(ADS_FREE_PAGES, try_to_free_pages),
ADS_ENTRY(ADS_COMPACT_PAGES, try_to_compact_pages), ADS_ENTRY(ADS_COMPACT_PAGES, try_to_compact_pages),
ADS_ENTRY(ADS_SHOW_MEM, __show_mem),
ADS_ENTRY(ADS_TOTAL_CMA, &totalcma_pages),
}; };
/* /*

View File

@ -36,6 +36,8 @@
#include "binder_internal.h" #include "binder_internal.h"
MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
#define FIRST_INODE 1 #define FIRST_INODE 1
#define SECOND_INODE 2 #define SECOND_INODE 2
#define INODE_OFFSET 3 #define INODE_OFFSET 3

View File

@ -22,12 +22,6 @@
#endif #endif
#endif #endif
#ifndef CONFIG_ANDROID_BINDER_IPC_RUST
#ifdef CONFIG_ANDROID_BINDER_IPC_DEFAULT_IS_RUST
#error "The default Binder driver implementation is Rust, but the Rust implementation is disabled"
#endif
#endif
#ifndef CONFIG_ANDROID_BINDER_IPC_C #ifndef CONFIG_ANDROID_BINDER_IPC_C
#ifndef CONFIG_ANDROID_BINDER_IPC_DEFAULT_IS_RUST #ifndef CONFIG_ANDROID_BINDER_IPC_DEFAULT_IS_RUST
#error "The default Binder driver implementation is C, but the C implementation is disabled" #error "The default Binder driver implementation is C, but the C implementation is disabled"
@ -35,7 +29,9 @@
#endif #endif
bool binder_use_rust = IS_ENABLED(CONFIG_ANDROID_BINDER_IPC_DEFAULT_IS_RUST); bool binder_use_rust = IS_ENABLED(CONFIG_ANDROID_BINDER_IPC_DEFAULT_IS_RUST);
EXPORT_SYMBOL_GPL(binder_use_rust);
bool binder_driver_initialized; bool binder_driver_initialized;
EXPORT_SYMBOL_GPL(binder_driver_initialized);
static int binder_param_set(const char *buffer, const struct kernel_param *kp) static int binder_param_set(const char *buffer, const struct kernel_param *kp)
{ {

View File

@ -59,8 +59,10 @@
#include <trace/hooks/topology.h> #include <trace/hooks/topology.h>
#include <trace/hooks/thermal.h> #include <trace/hooks/thermal.h>
#include <trace/hooks/bug.h> #include <trace/hooks/bug.h>
#include <trace/hooks/bpf_jit_comp.h>
#include <trace/hooks/softlockup.h> #include <trace/hooks/softlockup.h>
#include <trace/hooks/power.h> #include <trace/hooks/power.h>
#include <trace/hooks/cpuinfo.h>
#include <trace/hooks/gzvm.h> #include <trace/hooks/gzvm.h>
#include <trace/hooks/signal.h> #include <trace/hooks/signal.h>
#include <trace/hooks/logbuf.h> #include <trace/hooks/logbuf.h>
@ -71,6 +73,8 @@
#include <trace/hooks/user.h> #include <trace/hooks/user.h>
#include <trace/hooks/fuse.h> #include <trace/hooks/fuse.h>
#include <trace/hooks/psi.h> #include <trace/hooks/psi.h>
#include <trace/hooks/blk.h>
#include <trace/hooks/suspend.h>
/* /*
* Export tracepoints that act as a bare tracehook (ie: have no trace event * Export tracepoints that act as a bare tracehook (ie: have no trace event
@ -121,6 +125,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_idle_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_idle_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_idle_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mpam_set); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mpam_set);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_wq_lockup_pool); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_wq_lockup_pool);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_alloc_and_link_pwqs);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ipi_stop); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ipi_stop);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sysrq_crash); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sysrq_crash);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_printk_hotplug); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_printk_hotplug);
@ -135,6 +140,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_fast_switch);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_target); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_target);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_attach); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_attach);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_online); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_online);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuinfo_c_show);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_meminfo_cache_adjust); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_meminfo_cache_adjust);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_mem_available_adjust); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_mem_available_adjust);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_meminfo_adjust); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_meminfo_adjust);
@ -146,6 +152,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_compl_command); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_compl_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cgroup_set_task); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cgroup_set_task);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cgroup_force_kthread_migration); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cgroup_force_kthread_migration);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_bpf_int_jit_compile_ro);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_syscall_prctl_finished); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_syscall_prctl_finished);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_anon_vma_name_recog); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_anon_vma_name_recog);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_restore_mm_flags); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_restore_mm_flags);
@ -317,6 +324,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_thread_read);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_free_proc); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_free_proc);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_thread_release); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_thread_release);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_read_done); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_read_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_modify_scan_control);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_should_continue_reclaim);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_file_is_tiny_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_signal); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_signal);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_folio_look_around_ref); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_folio_look_around_ref);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_look_around); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_look_around);
@ -337,6 +347,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_logbuf_pr_cont);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_slowpath); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_slowpath);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmap_region); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmap_region);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tcp_write_timeout_estab_retrans); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tcp_write_timeout_estab_retrans);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tcp_connect);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_try_to_unmap_one); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_try_to_unmap_one);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_rwsem_reader_owned); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_rwsem_reader_owned);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_clear_rwsem_reader_owned); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_clear_rwsem_reader_owned);
@ -409,3 +420,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_read_lazy_flag);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_tsk_need_resched_lazy); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_tsk_need_resched_lazy);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_usb_dev_suspend); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_usb_dev_suspend);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_usb_dev_resume); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_usb_dev_resume);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_bd_link_disk_holder);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_use_amu_fie);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_resume_begin);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_resume_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_early_resume_begin);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_enable_thermal_genl_check);

View File

@ -34,6 +34,12 @@ static DEFINE_PER_CPU(u32, freq_factor) = 1;
static bool supports_scale_freq_counters(const struct cpumask *cpus) static bool supports_scale_freq_counters(const struct cpumask *cpus)
{ {
bool use_amu_fie = true;
trace_android_vh_use_amu_fie(&use_amu_fie);
if (!use_amu_fie)
return false;
return cpumask_subset(cpus, &scale_freq_counters_mask); return cpumask_subset(cpus, &scale_freq_counters_mask);
} }

View File

@ -121,13 +121,6 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
((event->data[2] == MODULE_BROUGHT_UP) || ((event->data[2] == MODULE_BROUGHT_UP) ||
(event->data[2] == MODULE_ALREADY_UP)) ? (event->data[2] == MODULE_ALREADY_UP)) ?
"Bring-up succeed" : "Bring-up failed"); "Bring-up succeed" : "Bring-up failed");
if (event->length > 3 && event->data[3])
priv->btmrvl_dev.dev_type = HCI_AMP;
else
priv->btmrvl_dev.dev_type = HCI_PRIMARY;
BT_DBG("dev_type: %d", priv->btmrvl_dev.dev_type);
} else if (priv->btmrvl_dev.sendcmdflag && } else if (priv->btmrvl_dev.sendcmdflag &&
event->data[1] == MODULE_SHUTDOWN_REQ) { event->data[1] == MODULE_SHUTDOWN_REQ) {
BT_DBG("EVENT:%s", (event->data[2]) ? BT_DBG("EVENT:%s", (event->data[2]) ?
@ -686,8 +679,6 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
hdev->wakeup = btmrvl_wakeup; hdev->wakeup = btmrvl_wakeup;
SET_HCIDEV_DEV(hdev, &card->func->dev); SET_HCIDEV_DEV(hdev, &card->func->dev);
hdev->dev_type = priv->btmrvl_dev.dev_type;
ret = hci_register_dev(hdev); ret = hci_register_dev(hdev);
if (ret < 0) { if (ret < 0) {
BT_ERR("Can not register HCI device"); BT_ERR("Can not register HCI device");

View File

@ -134,7 +134,6 @@ static int rsi_hci_attach(void *priv, struct rsi_proto_ops *ops)
hdev->bus = HCI_USB; hdev->bus = HCI_USB;
hci_set_drvdata(hdev, h_adapter); hci_set_drvdata(hdev, h_adapter);
hdev->dev_type = HCI_PRIMARY;
hdev->open = rsi_hci_open; hdev->open = rsi_hci_open;
hdev->close = rsi_hci_close; hdev->close = rsi_hci_close;
hdev->flush = rsi_hci_flush; hdev->flush = rsi_hci_flush;

View File

@ -32,9 +32,6 @@ static const struct sdio_device_id btsdio_table[] = {
/* Generic Bluetooth Type-B SDIO device */ /* Generic Bluetooth Type-B SDIO device */
{ SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) }, { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) },
/* Generic Bluetooth AMP controller */
{ SDIO_DEVICE_CLASS(SDIO_CLASS_BT_AMP) },
{ } /* Terminating entry */ { } /* Terminating entry */
}; };
@ -319,11 +316,6 @@ static int btsdio_probe(struct sdio_func *func,
hdev->bus = HCI_SDIO; hdev->bus = HCI_SDIO;
hci_set_drvdata(hdev, data); hci_set_drvdata(hdev, data);
if (id->class == SDIO_CLASS_BT_AMP)
hdev->dev_type = HCI_AMP;
else
hdev->dev_type = HCI_PRIMARY;
data->hdev = hdev; data->hdev = hdev;
SET_HCIDEV_DEV(hdev, &func->dev); SET_HCIDEV_DEV(hdev, &func->dev);

View File

@ -4308,11 +4308,6 @@ static int btusb_probe(struct usb_interface *intf,
hdev->bus = HCI_USB; hdev->bus = HCI_USB;
hci_set_drvdata(hdev, data); hci_set_drvdata(hdev, data);
if (id->driver_info & BTUSB_AMP)
hdev->dev_type = HCI_AMP;
else
hdev->dev_type = HCI_PRIMARY;
data->hdev = hdev; data->hdev = hdev;
SET_HCIDEV_DEV(hdev, &intf->dev); SET_HCIDEV_DEV(hdev, &intf->dev);

View File

@ -2361,7 +2361,6 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
bcm4377->hdev = hdev; bcm4377->hdev = hdev;
hdev->bus = HCI_PCI; hdev->bus = HCI_PCI;
hdev->dev_type = HCI_PRIMARY;
hdev->open = bcm4377_hci_open; hdev->open = bcm4377_hci_open;
hdev->close = bcm4377_hci_close; hdev->close = bcm4377_hci_close;
hdev->send = bcm4377_hci_send_frame; hdev->send = bcm4377_hci_send_frame;

View File

@ -667,11 +667,6 @@ static int hci_uart_register_dev(struct hci_uart *hu)
if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags)) if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
hdev->dev_type = HCI_AMP;
else
hdev->dev_type = HCI_PRIMARY;
/* Only call open() for the protocol after hdev is fully initialized as /* Only call open() for the protocol after hdev is fully initialized as
* open() (or a timer/workqueue it starts) may attempt to reference it. * open() (or a timer/workqueue it starts) may attempt to reference it.
*/ */
@ -722,7 +717,6 @@ static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags)
{ {
unsigned long valid_flags = BIT(HCI_UART_RAW_DEVICE) | unsigned long valid_flags = BIT(HCI_UART_RAW_DEVICE) |
BIT(HCI_UART_RESET_ON_INIT) | BIT(HCI_UART_RESET_ON_INIT) |
BIT(HCI_UART_CREATE_AMP) |
BIT(HCI_UART_INIT_PENDING) | BIT(HCI_UART_INIT_PENDING) |
BIT(HCI_UART_EXT_CONFIG) | BIT(HCI_UART_EXT_CONFIG) |
BIT(HCI_UART_VND_DETECT); BIT(HCI_UART_VND_DETECT);

View File

@ -366,11 +366,6 @@ int hci_uart_register_device_priv(struct hci_uart *hu,
if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags)) if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks); set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
hdev->dev_type = HCI_AMP;
else
hdev->dev_type = HCI_PRIMARY;
if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
return 0; return 0;

View File

@ -37,7 +37,6 @@
#define HCI_UART_RAW_DEVICE 0 #define HCI_UART_RAW_DEVICE 0
#define HCI_UART_RESET_ON_INIT 1 #define HCI_UART_RESET_ON_INIT 1
#define HCI_UART_CREATE_AMP 2
#define HCI_UART_INIT_PENDING 3 #define HCI_UART_INIT_PENDING 3
#define HCI_UART_EXT_CONFIG 4 #define HCI_UART_EXT_CONFIG 4
#define HCI_UART_VND_DETECT 5 #define HCI_UART_VND_DETECT 5

View File

@ -384,17 +384,10 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{ {
struct hci_dev *hdev; struct hci_dev *hdev;
struct sk_buff *skb; struct sk_buff *skb;
__u8 dev_type;
if (data->hdev) if (data->hdev)
return -EBADFD; return -EBADFD;
/* bits 0-1 are dev_type (Primary or AMP) */
dev_type = opcode & 0x03;
if (dev_type != HCI_PRIMARY && dev_type != HCI_AMP)
return -EINVAL;
/* bits 2-5 are reserved (must be zero) */ /* bits 2-5 are reserved (must be zero) */
if (opcode & 0x3c) if (opcode & 0x3c)
return -EINVAL; return -EINVAL;
@ -412,7 +405,6 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
data->hdev = hdev; data->hdev = hdev;
hdev->bus = HCI_VIRTUAL; hdev->bus = HCI_VIRTUAL;
hdev->dev_type = dev_type;
hci_set_drvdata(hdev, data); hci_set_drvdata(hdev, data);
hdev->open = vhci_open_dev; hdev->open = vhci_open_dev;
@ -634,7 +626,7 @@ static void vhci_open_timeout(struct work_struct *work)
struct vhci_data *data = container_of(work, struct vhci_data, struct vhci_data *data = container_of(work, struct vhci_data,
open_timeout.work); open_timeout.work);
vhci_create_device(data, amp ? HCI_AMP : HCI_PRIMARY); vhci_create_device(data, 0x00);
} }
static int vhci_open(struct inode *inode, struct file *file) static int vhci_open(struct inode *inode, struct file *file)

View File

@ -274,7 +274,6 @@ static int virtbt_probe(struct virtio_device *vdev)
switch (type) { switch (type) {
case VIRTIO_BT_CONFIG_TYPE_PRIMARY: case VIRTIO_BT_CONFIG_TYPE_PRIMARY:
case VIRTIO_BT_CONFIG_TYPE_AMP:
break; break;
default: default:
return -EINVAL; return -EINVAL;
@ -303,7 +302,6 @@ static int virtbt_probe(struct virtio_device *vdev)
vbt->hdev = hdev; vbt->hdev = hdev;
hdev->bus = HCI_VIRTIO; hdev->bus = HCI_VIRTIO;
hdev->dev_type = type;
hci_set_drvdata(hdev, vbt); hci_set_drvdata(hdev, vbt);
hdev->open = virtbt_open; hdev->open = virtbt_open;

View File

@ -22,16 +22,24 @@
#define DRIVER_NAME "ARM FF-A" #define DRIVER_NAME "ARM FF-A"
#define pr_fmt(fmt) DRIVER_NAME ": " fmt #define pr_fmt(fmt) DRIVER_NAME ": " fmt
#include <linux/acpi.h>
#include <linux/arm_ffa.h> #include <linux/arm_ffa.h>
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/cpuhotplug.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/hashtable.h>
#include <linux/interrupt.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/of_irq.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/smp.h>
#include <linux/uuid.h> #include <linux/uuid.h>
#include <linux/xarray.h>
#include "common.h" #include "common.h"
@ -51,6 +59,8 @@
*/ */
#define RXTX_BUFFER_SIZE SZ_4K #define RXTX_BUFFER_SIZE SZ_4K
#define FFA_MAX_NOTIFICATIONS 64
static ffa_fn *invoke_ffa_fn; static ffa_fn *invoke_ffa_fn;
static const int ffa_linux_errmap[] = { static const int ffa_linux_errmap[] = {
@ -64,6 +74,7 @@ static const int ffa_linux_errmap[] = {
-EACCES, /* FFA_RET_DENIED */ -EACCES, /* FFA_RET_DENIED */
-EAGAIN, /* FFA_RET_RETRY */ -EAGAIN, /* FFA_RET_RETRY */
-ECANCELED, /* FFA_RET_ABORTED */ -ECANCELED, /* FFA_RET_ABORTED */
-ENODATA, /* FFA_RET_NO_DATA */
}; };
static inline int ffa_to_linux_errno(int errno) static inline int ffa_to_linux_errno(int errno)
@ -75,6 +86,10 @@ static inline int ffa_to_linux_errno(int errno)
return -EINVAL; return -EINVAL;
} }
struct ffa_pcpu_irq {
struct ffa_drv_info *info;
};
struct ffa_drv_info { struct ffa_drv_info {
u32 version; u32 version;
u16 vm_id; u16 vm_id;
@ -83,6 +98,17 @@ struct ffa_drv_info {
void *rx_buffer; void *rx_buffer;
void *tx_buffer; void *tx_buffer;
bool mem_ops_native; bool mem_ops_native;
bool bitmap_created;
unsigned int sched_recv_irq;
unsigned int cpuhp_state;
struct ffa_pcpu_irq __percpu *irq_pcpu;
struct workqueue_struct *notif_pcpu_wq;
struct work_struct notif_pcpu_work;
struct work_struct irq_work;
struct xarray partition_info;
unsigned int partition_count;
DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
struct mutex notify_lock; /* lock to protect notifier hashtable */
}; };
static struct ffa_drv_info *drv_info; static struct ffa_drv_info *drv_info;
@ -397,7 +423,7 @@ static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
return num_pages; return num_pages;
} }
static u8 ffa_memory_attributes_get(u32 func_id) static u16 ffa_memory_attributes_get(u32 func_id)
{ {
/* /*
* For the memory lend or donate operation, if the receiver is a PE or * For the memory lend or donate operation, if the receiver is a PE or
@ -416,38 +442,47 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
{ {
int rc = 0; int rc = 0;
bool first = true; bool first = true;
u32 composite_offset;
phys_addr_t addr = 0; phys_addr_t addr = 0;
struct ffa_mem_region *mem_region = buffer;
struct ffa_composite_mem_region *composite; struct ffa_composite_mem_region *composite;
struct ffa_mem_region_addr_range *constituents; struct ffa_mem_region_addr_range *constituents;
struct ffa_mem_region_attributes *ep_mem_access; struct ffa_mem_region_attributes *ep_mem_access;
struct ffa_mem_region *mem_region = buffer;
u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg); u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg);
mem_region->tag = args->tag; mem_region->tag = args->tag;
mem_region->flags = args->flags; mem_region->flags = args->flags;
mem_region->sender_id = drv_info->vm_id; mem_region->sender_id = drv_info->vm_id;
mem_region->attributes = ffa_memory_attributes_get(func_id); mem_region->attributes = ffa_memory_attributes_get(func_id);
ep_mem_access = &mem_region->ep_mem_access[0]; ep_mem_access = buffer +
ffa_mem_desc_offset(buffer, 0, drv_info->version);
composite_offset = ffa_mem_desc_offset(buffer, args->nattrs,
drv_info->version);
for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) { for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
ep_mem_access->receiver = args->attrs[idx].receiver; ep_mem_access->receiver = args->attrs[idx].receiver;
ep_mem_access->attrs = args->attrs[idx].attrs; ep_mem_access->attrs = args->attrs[idx].attrs;
ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs); ep_mem_access->composite_off = composite_offset;
ep_mem_access->flag = 0; ep_mem_access->flag = 0;
ep_mem_access->reserved = 0; ep_mem_access->reserved = 0;
} }
mem_region->handle = 0; mem_region->handle = 0;
mem_region->reserved_0 = 0;
mem_region->reserved_1 = 0;
mem_region->ep_count = args->nattrs; mem_region->ep_count = args->nattrs;
if (drv_info->version <= FFA_VERSION_1_0) {
mem_region->ep_mem_size = 0;
} else {
mem_region->ep_mem_size = sizeof(*ep_mem_access);
mem_region->ep_mem_offset = sizeof(*mem_region);
memset(mem_region->reserved, 0, 12);
}
composite = buffer + COMPOSITE_OFFSET(args->nattrs); composite = buffer + composite_offset;
composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg); composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
composite->addr_range_cnt = num_entries; composite->addr_range_cnt = num_entries;
composite->reserved = 0; composite->reserved = 0;
length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries); length = composite_offset + CONSTITUENTS_OFFSET(num_entries);
frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0); frag_len = composite_offset + CONSTITUENTS_OFFSET(0);
if (frag_len > max_fragsize) if (frag_len > max_fragsize)
return -ENXIO; return -ENXIO;
@ -554,6 +589,236 @@ static int ffa_features(u32 func_feat_id, u32 input_props,
return 0; return 0;
} }
static int ffa_notification_bitmap_create(void)
{
ffa_value_t ret;
u16 vcpu_count = nr_cpu_ids;
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_NOTIFICATION_BITMAP_CREATE,
.a1 = drv_info->vm_id, .a2 = vcpu_count,
}, &ret);
if (ret.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)ret.a2);
return 0;
}
static int ffa_notification_bitmap_destroy(void)
{
ffa_value_t ret;
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_NOTIFICATION_BITMAP_DESTROY,
.a1 = drv_info->vm_id,
}, &ret);
if (ret.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)ret.a2);
return 0;
}
#define NOTIFICATION_LOW_MASK GENMASK(31, 0)
#define NOTIFICATION_HIGH_MASK GENMASK(63, 32)
#define NOTIFICATION_BITMAP_HIGH(x) \
((u32)(FIELD_GET(NOTIFICATION_HIGH_MASK, (x))))
#define NOTIFICATION_BITMAP_LOW(x) \
((u32)(FIELD_GET(NOTIFICATION_LOW_MASK, (x))))
#define PACK_NOTIFICATION_BITMAP(low, high) \
(FIELD_PREP(NOTIFICATION_LOW_MASK, (low)) | \
FIELD_PREP(NOTIFICATION_HIGH_MASK, (high)))
#define RECEIVER_VCPU_MASK GENMASK(31, 16)
#define PACK_NOTIFICATION_GET_RECEIVER_INFO(vcpu_r, r) \
(FIELD_PREP(RECEIVER_VCPU_MASK, (vcpu_r)) | \
FIELD_PREP(RECEIVER_ID_MASK, (r)))
#define NOTIFICATION_INFO_GET_MORE_PEND_MASK BIT(0)
#define NOTIFICATION_INFO_GET_ID_COUNT GENMASK(11, 7)
#define ID_LIST_MASK_64 GENMASK(51, 12)
#define ID_LIST_MASK_32 GENMASK(31, 12)
#define MAX_IDS_64 20
#define MAX_IDS_32 10
#define PER_VCPU_NOTIFICATION_FLAG BIT(0)
#define SECURE_PARTITION_BITMAP BIT(0)
#define NON_SECURE_VM_BITMAP BIT(1)
#define SPM_FRAMEWORK_BITMAP BIT(2)
#define NS_HYP_FRAMEWORK_BITMAP BIT(3)
static int ffa_notification_bind_common(u16 dst_id, u64 bitmap,
u32 flags, bool is_bind)
{
ffa_value_t ret;
u32 func, src_dst_ids = PACK_TARGET_INFO(dst_id, drv_info->vm_id);
func = is_bind ? FFA_NOTIFICATION_BIND : FFA_NOTIFICATION_UNBIND;
invoke_ffa_fn((ffa_value_t){
.a0 = func, .a1 = src_dst_ids, .a2 = flags,
.a3 = NOTIFICATION_BITMAP_LOW(bitmap),
.a4 = NOTIFICATION_BITMAP_HIGH(bitmap),
}, &ret);
if (ret.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)ret.a2);
else if (ret.a0 != FFA_SUCCESS)
return -EINVAL;
return 0;
}
static
int ffa_notification_set(u16 src_id, u16 dst_id, u32 flags, u64 bitmap)
{
ffa_value_t ret;
u32 src_dst_ids = PACK_TARGET_INFO(dst_id, src_id);
invoke_ffa_fn((ffa_value_t) {
.a0 = FFA_NOTIFICATION_SET, .a1 = src_dst_ids, .a2 = flags,
.a3 = NOTIFICATION_BITMAP_LOW(bitmap),
.a4 = NOTIFICATION_BITMAP_HIGH(bitmap),
}, &ret);
if (ret.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)ret.a2);
else if (ret.a0 != FFA_SUCCESS)
return -EINVAL;
return 0;
}
struct ffa_notify_bitmaps {
u64 sp_map;
u64 vm_map;
u64 arch_map;
};
static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify)
{
ffa_value_t ret;
u16 src_id = drv_info->vm_id;
u16 cpu_id = smp_processor_id();
u32 rec_vcpu_ids = PACK_NOTIFICATION_GET_RECEIVER_INFO(cpu_id, src_id);
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_NOTIFICATION_GET, .a1 = rec_vcpu_ids, .a2 = flags,
}, &ret);
if (ret.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)ret.a2);
else if (ret.a0 != FFA_SUCCESS)
return -EINVAL; /* Something else went wrong. */
notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7);
return 0;
}
struct ffa_dev_part_info {
ffa_sched_recv_cb callback;
void *cb_data;
rwlock_t rw_lock;
};
static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
{
struct ffa_dev_part_info *partition;
ffa_sched_recv_cb callback;
void *cb_data;
partition = xa_load(&drv_info->partition_info, part_id);
read_lock(&partition->rw_lock);
callback = partition->callback;
cb_data = partition->cb_data;
read_unlock(&partition->rw_lock);
if (callback)
callback(vcpu, is_per_vcpu, cb_data);
}
static void ffa_notification_info_get(void)
{
int idx, list, max_ids, lists_cnt, ids_processed, ids_count[MAX_IDS_64];
bool is_64b_resp;
ffa_value_t ret;
u64 id_list;
do {
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_FN_NATIVE(NOTIFICATION_INFO_GET),
}, &ret);
if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) {
if (ret.a2 != FFA_RET_NO_DATA)
pr_err("Notification Info fetch failed: 0x%lx (0x%lx)",
ret.a0, ret.a2);
return;
}
is_64b_resp = (ret.a0 == FFA_FN64_SUCCESS);
ids_processed = 0;
lists_cnt = FIELD_GET(NOTIFICATION_INFO_GET_ID_COUNT, ret.a2);
if (is_64b_resp) {
max_ids = MAX_IDS_64;
id_list = FIELD_GET(ID_LIST_MASK_64, ret.a2);
} else {
max_ids = MAX_IDS_32;
id_list = FIELD_GET(ID_LIST_MASK_32, ret.a2);
}
for (idx = 0; idx < lists_cnt; idx++, id_list >>= 2)
ids_count[idx] = (id_list & 0x3) + 1;
/* Process IDs */
for (list = 0; list < lists_cnt; list++) {
u16 vcpu_id, part_id, *packed_id_list = (u16 *)&ret.a3;
if (ids_processed >= max_ids - 1)
break;
part_id = packed_id_list[++ids_processed];
if (!ids_count[list]) { /* Global Notification */
__do_sched_recv_cb(part_id, 0, false);
continue;
}
/* Per vCPU Notification */
for (idx = 0; idx < ids_count[list]; idx++) {
if (ids_processed >= max_ids - 1)
break;
vcpu_id = packed_id_list[++ids_processed];
__do_sched_recv_cb(part_id, vcpu_id, true);
}
}
} while (ret.a2 & NOTIFICATION_INFO_GET_MORE_PEND_MASK);
}
static int ffa_run(struct ffa_device *dev, u16 vcpu)
{
ffa_value_t ret;
u32 target = dev->vm_id << 16 | vcpu;
invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = target, }, &ret);
while (ret.a0 == FFA_INTERRUPT)
invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = ret.a1, },
&ret);
if (ret.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)ret.a2);
return 0;
}
static void ffa_set_up_mem_ops_native_flag(void) static void ffa_set_up_mem_ops_native_flag(void)
{ {
if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) || if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) ||
@ -622,6 +887,231 @@ static int ffa_memory_lend(struct ffa_mem_ops_args *args)
return ffa_memory_ops(FFA_MEM_LEND, args); return ffa_memory_ops(FFA_MEM_LEND, args);
} }
#define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
enum notify_type {
NON_SECURE_VM,
SECURE_PARTITION,
FRAMEWORK,
};
struct notifier_cb_info {
struct hlist_node hnode;
ffa_notifier_cb cb;
void *cb_data;
enum notify_type type;
};
static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
void *cb_data, bool is_registration)
{
struct ffa_dev_part_info *partition;
bool cb_valid;
partition = xa_load(&drv_info->partition_info, part_id);
write_lock(&partition->rw_lock);
cb_valid = !!partition->callback;
if (!(is_registration ^ cb_valid)) {
write_unlock(&partition->rw_lock);
return -EINVAL;
}
partition->callback = callback;
partition->cb_data = cb_data;
write_unlock(&partition->rw_lock);
return 0;
}
static int ffa_sched_recv_cb_register(struct ffa_device *dev,
ffa_sched_recv_cb cb, void *cb_data)
{
return ffa_sched_recv_cb_update(dev->vm_id, cb, cb_data, true);
}
static int ffa_sched_recv_cb_unregister(struct ffa_device *dev)
{
return ffa_sched_recv_cb_update(dev->vm_id, NULL, NULL, false);
}
static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags)
{
return ffa_notification_bind_common(dst_id, bitmap, flags, true);
}
static int ffa_notification_unbind(u16 dst_id, u64 bitmap)
{
return ffa_notification_bind_common(dst_id, bitmap, 0, false);
}
/* Should be called while the notify_lock is taken */
static struct notifier_cb_info *
notifier_hash_node_get(u16 notify_id, enum notify_type type)
{
struct notifier_cb_info *node;
hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
if (type == node->type)
return node;
return NULL;
}
static int
update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
void *cb_data, bool is_registration)
{
struct notifier_cb_info *cb_info = NULL;
bool cb_found;
cb_info = notifier_hash_node_get(notify_id, type);
cb_found = !!cb_info;
if (!(is_registration ^ cb_found))
return -EINVAL;
if (is_registration) {
cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL);
if (!cb_info)
return -ENOMEM;
cb_info->type = type;
cb_info->cb = cb;
cb_info->cb_data = cb_data;
hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id);
} else {
hash_del(&cb_info->hnode);
}
return 0;
}
static enum notify_type ffa_notify_type_get(u16 vm_id)
{
if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
return SECURE_PARTITION;
else
return NON_SECURE_VM;
}
static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
{
int rc;
enum notify_type type = ffa_notify_type_get(dev->vm_id);
if (notify_id >= FFA_MAX_NOTIFICATIONS)
return -EINVAL;
mutex_lock(&drv_info->notify_lock);
rc = update_notifier_cb(notify_id, type, NULL, NULL, false);
if (rc) {
pr_err("Could not unregister notification callback\n");
mutex_unlock(&drv_info->notify_lock);
return rc;
}
rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
mutex_unlock(&drv_info->notify_lock);
return rc;
}
static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
ffa_notifier_cb cb, void *cb_data, int notify_id)
{
int rc;
u32 flags = 0;
enum notify_type type = ffa_notify_type_get(dev->vm_id);
if (notify_id >= FFA_MAX_NOTIFICATIONS)
return -EINVAL;
mutex_lock(&drv_info->notify_lock);
if (is_per_vcpu)
flags = PER_VCPU_NOTIFICATION_FLAG;
rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
if (rc) {
mutex_unlock(&drv_info->notify_lock);
return rc;
}
rc = update_notifier_cb(notify_id, type, cb, cb_data, true);
if (rc) {
pr_err("Failed to register callback for %d - %d\n",
notify_id, rc);
ffa_notification_unbind(dev->vm_id, BIT(notify_id));
}
mutex_unlock(&drv_info->notify_lock);
return rc;
}
static int ffa_notify_send(struct ffa_device *dev, int notify_id,
bool is_per_vcpu, u16 vcpu)
{
u32 flags = 0;
if (is_per_vcpu)
flags |= (PER_VCPU_NOTIFICATION_FLAG | vcpu << 16);
return ffa_notification_set(dev->vm_id, drv_info->vm_id, flags,
BIT(notify_id));
}
static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
{
int notify_id;
struct notifier_cb_info *cb_info = NULL;
for (notify_id = 0; notify_id <= FFA_MAX_NOTIFICATIONS && bitmap;
notify_id++, bitmap >>= 1) {
if (!(bitmap & 1))
continue;
mutex_lock(&drv_info->notify_lock);
cb_info = notifier_hash_node_get(notify_id, type);
mutex_unlock(&drv_info->notify_lock);
if (cb_info && cb_info->cb)
cb_info->cb(notify_id, cb_info->cb_data);
}
}
static void notif_pcpu_irq_work_fn(struct work_struct *work)
{
int rc;
struct ffa_notify_bitmaps bitmaps;
rc = ffa_notification_get(SECURE_PARTITION_BITMAP |
SPM_FRAMEWORK_BITMAP, &bitmaps);
if (rc) {
pr_err("Failed to retrieve notifications with %d!\n", rc);
return;
}
handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM);
handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION);
handle_notif_callbacks(bitmaps.arch_map, FRAMEWORK);
}
static void
ffa_self_notif_handle(u16 vcpu, bool is_per_vcpu, void *cb_data)
{
struct ffa_drv_info *info = cb_data;
if (!is_per_vcpu)
notif_pcpu_irq_work_fn(&info->notif_pcpu_work);
else
queue_work_on(vcpu, info->notif_pcpu_wq,
&info->notif_pcpu_work);
}
static const struct ffa_info_ops ffa_drv_info_ops = { static const struct ffa_info_ops ffa_drv_info_ops = {
.api_version_get = ffa_api_version_get, .api_version_get = ffa_api_version_get,
.partition_info_get = ffa_partition_info_get, .partition_info_get = ffa_partition_info_get,
@ -638,10 +1128,24 @@ static const struct ffa_mem_ops ffa_drv_mem_ops = {
.memory_lend = ffa_memory_lend, .memory_lend = ffa_memory_lend,
}; };
static const struct ffa_cpu_ops ffa_drv_cpu_ops = {
.run = ffa_run,
};
static const struct ffa_notifier_ops ffa_drv_notifier_ops = {
.sched_recv_cb_register = ffa_sched_recv_cb_register,
.sched_recv_cb_unregister = ffa_sched_recv_cb_unregister,
.notify_request = ffa_notify_request,
.notify_relinquish = ffa_notify_relinquish,
.notify_send = ffa_notify_send,
};
static const struct ffa_ops ffa_drv_ops = { static const struct ffa_ops ffa_drv_ops = {
.info_ops = &ffa_drv_info_ops, .info_ops = &ffa_drv_info_ops,
.msg_ops = &ffa_drv_msg_ops, .msg_ops = &ffa_drv_msg_ops,
.mem_ops = &ffa_drv_mem_ops, .mem_ops = &ffa_drv_mem_ops,
.cpu_ops = &ffa_drv_cpu_ops,
.notifier_ops = &ffa_drv_notifier_ops,
}; };
void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
@ -672,6 +1176,7 @@ static void ffa_setup_partitions(void)
int count, idx; int count, idx;
uuid_t uuid; uuid_t uuid;
struct ffa_device *ffa_dev; struct ffa_device *ffa_dev;
struct ffa_dev_part_info *info;
struct ffa_partition_info *pbuf, *tpbuf; struct ffa_partition_info *pbuf, *tpbuf;
count = ffa_partition_probe(&uuid_null, &pbuf); count = ffa_partition_probe(&uuid_null, &pbuf);
@ -680,6 +1185,7 @@ static void ffa_setup_partitions(void)
return; return;
} }
xa_init(&drv_info->partition_info);
for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) { for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
import_uuid(&uuid, (u8 *)tpbuf->uuid); import_uuid(&uuid, (u8 *)tpbuf->uuid);
@ -699,8 +1205,231 @@ static void ffa_setup_partitions(void)
if (drv_info->version > FFA_VERSION_1_0 && if (drv_info->version > FFA_VERSION_1_0 &&
!(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC)) !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
ffa_mode_32bit_set(ffa_dev); ffa_mode_32bit_set(ffa_dev);
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
ffa_device_unregister(ffa_dev);
continue;
}
xa_store(&drv_info->partition_info, tpbuf->id, info, GFP_KERNEL);
} }
drv_info->partition_count = count;
kfree(pbuf); kfree(pbuf);
/* Allocate for the host */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return;
xa_store(&drv_info->partition_info, drv_info->vm_id, info, GFP_KERNEL);
drv_info->partition_count++;
}
static void ffa_partitions_cleanup(void)
{
struct ffa_dev_part_info **info;
int idx, count = drv_info->partition_count;
if (!count)
return;
info = kcalloc(count, sizeof(**info), GFP_KERNEL);
if (!info)
return;
xa_extract(&drv_info->partition_info, (void **)info, 0, VM_ID_MASK,
count, XA_PRESENT);
for (idx = 0; idx < count; idx++)
kfree(info[idx]);
kfree(info);
drv_info->partition_count = 0;
xa_destroy(&drv_info->partition_info);
}
/* FFA FEATURE IDs */
#define FFA_FEAT_NOTIFICATION_PENDING_INT (1)
#define FFA_FEAT_SCHEDULE_RECEIVER_INT (2)
#define FFA_FEAT_MANAGED_EXIT_INT (3)
static irqreturn_t irq_handler(int irq, void *irq_data)
{
struct ffa_pcpu_irq *pcpu = irq_data;
struct ffa_drv_info *info = pcpu->info;
queue_work(info->notif_pcpu_wq, &info->irq_work);
return IRQ_HANDLED;
}
static void ffa_sched_recv_irq_work_fn(struct work_struct *work)
{
ffa_notification_info_get();
}
static int ffa_sched_recv_irq_map(void)
{
int ret, irq, sr_intid;
/* The returned sr_intid is assumed to be SGI donated to NS world */
ret = ffa_features(FFA_FEAT_SCHEDULE_RECEIVER_INT, 0, &sr_intid, NULL);
if (ret < 0) {
if (ret != -EOPNOTSUPP)
pr_err("Failed to retrieve scheduler Rx interrupt\n");
return ret;
}
if (acpi_disabled) {
struct of_phandle_args oirq = {};
struct device_node *gic;
/* Only GICv3 supported currently with the device tree */
gic = of_find_compatible_node(NULL, NULL, "arm,gic-v3");
if (!gic)
return -ENXIO;
oirq.np = gic;
oirq.args_count = 1;
oirq.args[0] = sr_intid;
irq = irq_create_of_mapping(&oirq);
of_node_put(gic);
#ifdef CONFIG_ACPI
} else {
irq = acpi_register_gsi(NULL, sr_intid, ACPI_EDGE_SENSITIVE,
ACPI_ACTIVE_HIGH);
#endif
}
if (irq <= 0) {
pr_err("Failed to create IRQ mapping!\n");
return -ENODATA;
}
return irq;
}
static void ffa_sched_recv_irq_unmap(void)
{
if (drv_info->sched_recv_irq)
irq_dispose_mapping(drv_info->sched_recv_irq);
}
static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu)
{
enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE);
return 0;
}
static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu)
{
disable_percpu_irq(drv_info->sched_recv_irq);
return 0;
}
static void ffa_uninit_pcpu_irq(void)
{
if (drv_info->cpuhp_state)
cpuhp_remove_state(drv_info->cpuhp_state);
if (drv_info->notif_pcpu_wq)
destroy_workqueue(drv_info->notif_pcpu_wq);
if (drv_info->sched_recv_irq)
free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu);
if (drv_info->irq_pcpu)
free_percpu(drv_info->irq_pcpu);
}
static int ffa_init_pcpu_irq(unsigned int irq)
{
struct ffa_pcpu_irq __percpu *irq_pcpu;
int ret, cpu;
irq_pcpu = alloc_percpu(struct ffa_pcpu_irq);
if (!irq_pcpu)
return -ENOMEM;
for_each_present_cpu(cpu)
per_cpu_ptr(irq_pcpu, cpu)->info = drv_info;
drv_info->irq_pcpu = irq_pcpu;
ret = request_percpu_irq(irq, irq_handler, "ARM-FFA", irq_pcpu);
if (ret) {
pr_err("Error registering notification IRQ %d: %d\n", irq, ret);
return ret;
}
INIT_WORK(&drv_info->irq_work, ffa_sched_recv_irq_work_fn);
INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn);
drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification");
if (!drv_info->notif_pcpu_wq)
return -EINVAL;
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ffa/pcpu-irq:starting",
ffa_cpuhp_pcpu_irq_enable,
ffa_cpuhp_pcpu_irq_disable);
if (ret < 0)
return ret;
drv_info->cpuhp_state = ret;
return 0;
}
static void ffa_notifications_cleanup(void)
{
ffa_uninit_pcpu_irq();
ffa_sched_recv_irq_unmap();
if (drv_info->bitmap_created) {
ffa_notification_bitmap_destroy();
drv_info->bitmap_created = false;
}
}
static int ffa_notifications_setup(void)
{
int ret, irq;
ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL);
if (ret) {
pr_err("Notifications not supported, continuing with it ..\n");
return 0;
}
ret = ffa_notification_bitmap_create();
if (ret) {
pr_err("notification_bitmap_create error %d\n", ret);
return ret;
}
drv_info->bitmap_created = true;
irq = ffa_sched_recv_irq_map();
if (irq <= 0) {
ret = irq;
goto cleanup;
}
drv_info->sched_recv_irq = irq;
ret = ffa_init_pcpu_irq(irq);
if (ret)
goto cleanup;
hash_init(drv_info->notifier_hash);
mutex_init(&drv_info->notify_lock);
/* Register internal scheduling callback */
ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
drv_info, true);
if (!ret)
return ret;
cleanup:
ffa_notifications_cleanup();
return ret;
} }
static int __init ffa_init(void) static int __init ffa_init(void)
@ -758,7 +1487,13 @@ static int __init ffa_init(void)
ffa_set_up_mem_ops_native_flag(); ffa_set_up_mem_ops_native_flag();
ret = ffa_notifications_setup();
if (ret)
goto partitions_cleanup;
return 0; return 0;
partitions_cleanup:
ffa_partitions_cleanup();
free_pages: free_pages:
if (drv_info->tx_buffer) if (drv_info->tx_buffer)
free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
@ -773,9 +1508,12 @@ subsys_initcall(ffa_init);
static void __exit ffa_exit(void) static void __exit ffa_exit(void)
{ {
ffa_notifications_cleanup();
ffa_partitions_cleanup();
ffa_rxtx_unmap(drv_info->vm_id); ffa_rxtx_unmap(drv_info->vm_id);
free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
xa_destroy(&drv_info->partition_info);
kfree(drv_info); kfree(drv_info);
arm_ffa_bus_exit(); arm_ffa_bus_exit();
} }

View File

@ -9,6 +9,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <net/genetlink.h> #include <net/genetlink.h>
#include <trace/hooks/thermal.h>
#include <uapi/linux/thermal.h> #include <uapi/linux/thermal.h>
#include "thermal_core.h" #include "thermal_core.h"
@ -274,6 +275,11 @@ static int thermal_genl_send_event(enum thermal_genl_event event,
struct sk_buff *msg; struct sk_buff *msg;
int ret = -EMSGSIZE; int ret = -EMSGSIZE;
void *hdr; void *hdr;
int enable_thermal_genl = 1;
trace_android_vh_enable_thermal_genl_check(event, p->tz_id, &enable_thermal_genl);
if (!enable_thermal_genl)
return 0;
msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg) if (!msg)

View File

@ -82,7 +82,7 @@ out:
int dwc3_host_init(struct dwc3 *dwc) int dwc3_host_init(struct dwc3 *dwc)
{ {
struct property_entry props[5]; struct property_entry props[6];
struct platform_device *xhci; struct platform_device *xhci;
int ret, irq; int ret, irq;
int prop_idx = 0; int prop_idx = 0;
@ -112,6 +112,8 @@ int dwc3_host_init(struct dwc3 *dwc)
props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-sg-trb-cache-size-quirk"); props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-sg-trb-cache-size-quirk");
props[prop_idx++] = PROPERTY_ENTRY_BOOL("write-64-hi-lo-quirk");
if (dwc->usb3_lpm_capable) if (dwc->usb3_lpm_capable)
props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable"); props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");

View File

@ -2351,7 +2351,10 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
erst_base &= ERST_BASE_RSVDP; erst_base &= ERST_BASE_RSVDP;
erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP; erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP;
xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base); if (xhci->quirks & XHCI_WRITE_64_HI_LO)
hi_lo_writeq(erst_base, &ir->ir_set->erst_base);
else
xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base);
/* Set the event ring dequeue address of this interrupter */ /* Set the event ring dequeue address of this interrupter */
xhci_set_hc_event_deq(xhci, ir); xhci_set_hc_event_deq(xhci, ir);

View File

@ -253,6 +253,9 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
if (device_property_read_bool(tmpdev, "xhci-sg-trb-cache-size-quirk")) if (device_property_read_bool(tmpdev, "xhci-sg-trb-cache-size-quirk"))
xhci->quirks |= XHCI_SG_TRB_CACHE_SIZE_QUIRK; xhci->quirks |= XHCI_SG_TRB_CACHE_SIZE_QUIRK;
if (device_property_read_bool(tmpdev, "write-64-hi-lo-quirk"))
xhci->quirks |= XHCI_WRITE_64_HI_LO;
device_property_read_u32(tmpdev, "imod-interval-ns", device_property_read_u32(tmpdev, "imod-interval-ns",
&xhci->imod_interval); &xhci->imod_interval);
device_property_read_u16(tmpdev, "num-hc-interrupters", device_property_read_u16(tmpdev, "num-hc-interrupters",

View File

@ -17,6 +17,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/usb/hcd.h> #include <linux/usb/hcd.h>
#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/android_kabi.h> #include <linux/android_kabi.h>
/* Code sharing between pci-quirks and xhci hcd */ /* Code sharing between pci-quirks and xhci hcd */
@ -1932,6 +1933,7 @@ struct xhci_hcd {
#define XHCI_RESET_TO_DEFAULT BIT_ULL(44) #define XHCI_RESET_TO_DEFAULT BIT_ULL(44)
#define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45) #define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45)
#define XHCI_ZHAOXIN_HOST BIT_ULL(46) #define XHCI_ZHAOXIN_HOST BIT_ULL(46)
#define XHCI_WRITE_64_HI_LO BIT_ULL(47)
unsigned int num_active_eps; unsigned int num_active_eps;
unsigned int limit_active_eps; unsigned int limit_active_eps;

View File

@ -1240,6 +1240,7 @@ void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
perf_event_comm(tsk, exec); perf_event_comm(tsk, exec);
trace_android_rvh_set_task_comm(tsk, exec); trace_android_rvh_set_task_comm(tsk, exec);
} }
EXPORT_SYMBOL_GPL(__set_task_comm);
/* /*
* Calling this is the point of no return. None of the failures will be * Calling this is the point of no return. None of the failures will be

View File

@ -99,6 +99,7 @@
#include <linux/ksm.h> #include <linux/ksm.h>
#include <linux/cpufreq_times.h> #include <linux/cpufreq_times.h>
#include <trace/events/oom.h> #include <trace/events/oom.h>
#include <trace/hooks/sched.h>
#include "internal.h" #include "internal.h"
#include "fd.h" #include "fd.h"
@ -345,13 +346,24 @@ static ssize_t get_task_cmdline(struct task_struct *tsk, char __user *buf,
size_t count, loff_t *pos) size_t count, loff_t *pos)
{ {
struct mm_struct *mm; struct mm_struct *mm;
bool prio_inherited = false;
int saved_prio;
ssize_t ret; ssize_t ret;
mm = get_task_mm(tsk); mm = get_task_mm(tsk);
if (!mm) if (!mm)
return 0; return 0;
/*
* access_remote_vm() holds the hot mmap_sem lock which can cause the
* task for which we read cmdline etc for by some debug deamon to slow
* down and suffer a performance hit. Especially if the reader task has
* a low nice value.
*/
trace_android_vh_prio_inheritance(tsk, &saved_prio, &prio_inherited);
ret = get_mm_cmdline(mm, buf, count, pos); ret = get_mm_cmdline(mm, buf, count, pos);
if (prio_inherited)
trace_android_vh_prio_restore(saved_prio);
mmput(mm); mmput(mm);
return ret; return ret;
} }

View File

@ -18,6 +18,8 @@ enum android_debug_symbol {
ADS_DROP_SLAB, ADS_DROP_SLAB,
ADS_FREE_PAGES, ADS_FREE_PAGES,
ADS_COMPACT_PAGES, ADS_COMPACT_PAGES,
ADS_SHOW_MEM, /* for debugging memory usage */
ADS_TOTAL_CMA, /* for debugging total cma pages */
ADS_END ADS_END
}; };

View File

@ -6,6 +6,7 @@
#ifndef _LINUX_ARM_FFA_H #ifndef _LINUX_ARM_FFA_H
#define _LINUX_ARM_FFA_H #define _LINUX_ARM_FFA_H
#include <linux/bitfield.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/types.h> #include <linux/types.h>
@ -20,6 +21,7 @@
#define FFA_ERROR FFA_SMC_32(0x60) #define FFA_ERROR FFA_SMC_32(0x60)
#define FFA_SUCCESS FFA_SMC_32(0x61) #define FFA_SUCCESS FFA_SMC_32(0x61)
#define FFA_FN64_SUCCESS FFA_SMC_64(0x61)
#define FFA_INTERRUPT FFA_SMC_32(0x62) #define FFA_INTERRUPT FFA_SMC_32(0x62)
#define FFA_VERSION FFA_SMC_32(0x63) #define FFA_VERSION FFA_SMC_32(0x63)
#define FFA_FEATURES FFA_SMC_32(0x64) #define FFA_FEATURES FFA_SMC_32(0x64)
@ -54,6 +56,23 @@
#define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A) #define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A)
#define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B) #define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B)
#define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C) #define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C)
#define FFA_NOTIFICATION_BITMAP_CREATE FFA_SMC_32(0x7D)
#define FFA_NOTIFICATION_BITMAP_DESTROY FFA_SMC_32(0x7E)
#define FFA_NOTIFICATION_BIND FFA_SMC_32(0x7F)
#define FFA_NOTIFICATION_UNBIND FFA_SMC_32(0x80)
#define FFA_NOTIFICATION_SET FFA_SMC_32(0x81)
#define FFA_NOTIFICATION_GET FFA_SMC_32(0x82)
#define FFA_NOTIFICATION_INFO_GET FFA_SMC_32(0x83)
#define FFA_FN64_NOTIFICATION_INFO_GET FFA_SMC_64(0x83)
#define FFA_RX_ACQUIRE FFA_SMC_32(0x84)
#define FFA_SPM_ID_GET FFA_SMC_32(0x85)
#define FFA_MSG_SEND2 FFA_SMC_32(0x86)
#define FFA_SECONDARY_EP_REGISTER FFA_SMC_32(0x87)
#define FFA_FN64_SECONDARY_EP_REGISTER FFA_SMC_64(0x87)
#define FFA_MEM_PERM_GET FFA_SMC_32(0x88)
#define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88)
#define FFA_MEM_PERM_SET FFA_SMC_32(0x89)
#define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89)
/* /*
* For some calls it is necessary to use SMC64 to pass or return 64-bit values. * For some calls it is necessary to use SMC64 to pass or return 64-bit values.
@ -76,6 +95,7 @@
#define FFA_RET_DENIED (-6) #define FFA_RET_DENIED (-6)
#define FFA_RET_RETRY (-7) #define FFA_RET_RETRY (-7)
#define FFA_RET_ABORTED (-8) #define FFA_RET_ABORTED (-8)
#define FFA_RET_NO_DATA (-9)
/* FFA version encoding */ /* FFA version encoding */
#define FFA_MAJOR_VERSION_MASK GENMASK(30, 16) #define FFA_MAJOR_VERSION_MASK GENMASK(30, 16)
@ -86,6 +106,7 @@
(FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \ (FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \
FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor))) FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor)))
#define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0) #define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0)
#define FFA_VERSION_1_1 FFA_PACK_VERSION_INFO(1, 1)
/** /**
* FF-A specification mentions explicitly about '4K pages'. This should * FF-A specification mentions explicitly about '4K pages'. This should
@ -188,6 +209,9 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; }
#define module_ffa_driver(__ffa_driver) \ #define module_ffa_driver(__ffa_driver) \
module_driver(__ffa_driver, ffa_register, ffa_unregister) module_driver(__ffa_driver, ffa_register, ffa_unregister)
/* The FF-A 1.0 partition structure lacks the uuid[4] */
#define FFA_1_0_PARTITON_INFO_SZ (8)
/* FFA transport related */ /* FFA transport related */
struct ffa_partition_info { struct ffa_partition_info {
u16 id; u16 id;
@ -278,8 +302,8 @@ struct ffa_mem_region {
#define FFA_MEM_NON_SHAREABLE (0) #define FFA_MEM_NON_SHAREABLE (0)
#define FFA_MEM_OUTER_SHAREABLE (2) #define FFA_MEM_OUTER_SHAREABLE (2)
#define FFA_MEM_INNER_SHAREABLE (3) #define FFA_MEM_INNER_SHAREABLE (3)
u8 attributes; /* Memory region attributes, upper byte MBZ pre v1.1 */
u8 reserved_0; u16 attributes;
/* /*
* Clear memory region contents after unmapping it from the sender and * Clear memory region contents after unmapping it from the sender and
* before mapping it for any receiver. * before mapping it for any receiver.
@ -317,27 +341,41 @@ struct ffa_mem_region {
* memory region. * memory region.
*/ */
u64 tag; u64 tag;
u32 reserved_1; /* Size of each endpoint memory access descriptor, MBZ pre v1.1 */
u32 ep_mem_size;
/* /*
* The number of `ffa_mem_region_attributes` entries included in this * The number of `ffa_mem_region_attributes` entries included in this
* transaction. * transaction.
*/ */
u32 ep_count; u32 ep_count;
/* /*
* An array of endpoint memory access descriptors. * 16-byte aligned offset from the base address of this descriptor
* Each one specifies a memory region offset, an endpoint and the * to the first element of the endpoint memory access descriptor array
* attributes with which this memory region should be mapped in that * Valid only from v1.1
* endpoint's page table.
*/ */
struct ffa_mem_region_attributes ep_mem_access[]; u32 ep_mem_offset;
/* MBZ, valid only from v1.1 */
u32 reserved[3];
}; };
#define COMPOSITE_OFFSET(x) \
(offsetof(struct ffa_mem_region, ep_mem_access[x]))
#define CONSTITUENTS_OFFSET(x) \ #define CONSTITUENTS_OFFSET(x) \
(offsetof(struct ffa_composite_mem_region, constituents[x])) (offsetof(struct ffa_composite_mem_region, constituents[x]))
#define COMPOSITE_CONSTITUENTS_OFFSET(x, y) \
(COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y)) static inline u32
ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version)
{
u32 offset = count * sizeof(struct ffa_mem_region_attributes);
/*
* Earlier to v1.1, the endpoint memory descriptor array started at
* offset 32(i.e. offset of ep_mem_offset in the current structure)
*/
if (ffa_version <= FFA_VERSION_1_0)
offset += offsetof(struct ffa_mem_region, ep_mem_offset);
else
offset += sizeof(struct ffa_mem_region);
return offset;
}
struct ffa_mem_ops_args { struct ffa_mem_ops_args {
bool use_txbuf; bool use_txbuf;
@ -367,10 +405,30 @@ struct ffa_mem_ops {
int (*memory_lend)(struct ffa_mem_ops_args *args); int (*memory_lend)(struct ffa_mem_ops_args *args);
}; };
struct ffa_cpu_ops {
int (*run)(struct ffa_device *dev, u16 vcpu);
};
typedef void (*ffa_sched_recv_cb)(u16 vcpu, bool is_per_vcpu, void *cb_data);
typedef void (*ffa_notifier_cb)(int notify_id, void *cb_data);
struct ffa_notifier_ops {
int (*sched_recv_cb_register)(struct ffa_device *dev,
ffa_sched_recv_cb cb, void *cb_data);
int (*sched_recv_cb_unregister)(struct ffa_device *dev);
int (*notify_request)(struct ffa_device *dev, bool per_vcpu,
ffa_notifier_cb cb, void *cb_data, int notify_id);
int (*notify_relinquish)(struct ffa_device *dev, int notify_id);
int (*notify_send)(struct ffa_device *dev, int notify_id, bool per_vcpu,
u16 vcpu);
};
struct ffa_ops { struct ffa_ops {
const struct ffa_info_ops *info_ops; const struct ffa_info_ops *info_ops;
const struct ffa_msg_ops *msg_ops; const struct ffa_msg_ops *msg_ops;
const struct ffa_mem_ops *mem_ops; const struct ffa_mem_ops *mem_ops;
const struct ffa_cpu_ops *cpu_ops;
const struct ffa_notifier_ops *notifier_ops;
}; };
#endif /* _LINUX_ARM_FFA_H */ #endif /* _LINUX_ARM_FFA_H */

View File

@ -33,7 +33,6 @@
#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4) #define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
#define HCI_LINK_KEY_SIZE 16 #define HCI_LINK_KEY_SIZE 16
#define HCI_AMP_LINK_KEY_SIZE (2 * HCI_LINK_KEY_SIZE)
#define HCI_MAX_AMP_ASSOC_SIZE 672 #define HCI_MAX_AMP_ASSOC_SIZE 672
@ -71,26 +70,6 @@
#define HCI_SMD 9 #define HCI_SMD 9
#define HCI_VIRTIO 10 #define HCI_VIRTIO 10
/* HCI controller types */
#define HCI_PRIMARY 0x00
#define HCI_AMP 0x01
/* First BR/EDR Controller shall have ID = 0 */
#define AMP_ID_BREDR 0x00
/* AMP controller types */
#define AMP_TYPE_BREDR 0x00
#define AMP_TYPE_80211 0x01
/* AMP controller status */
#define AMP_STATUS_POWERED_DOWN 0x00
#define AMP_STATUS_BLUETOOTH_ONLY 0x01
#define AMP_STATUS_NO_CAPACITY 0x02
#define AMP_STATUS_LOW_CAPACITY 0x03
#define AMP_STATUS_MEDIUM_CAPACITY 0x04
#define AMP_STATUS_HIGH_CAPACITY 0x05
#define AMP_STATUS_FULL_CAPACITY 0x06
/* HCI device quirks */ /* HCI device quirks */
enum { enum {
/* When this quirk is set, the HCI Reset command is send when /* When this quirk is set, the HCI Reset command is send when
@ -526,7 +505,6 @@ enum {
#define ESCO_LINK 0x02 #define ESCO_LINK 0x02
/* Low Energy links do not have defined link type. Use invented one */ /* Low Energy links do not have defined link type. Use invented one */
#define LE_LINK 0x80 #define LE_LINK 0x80
#define AMP_LINK 0x81
#define ISO_LINK 0x82 #define ISO_LINK 0x82
#define INVALID_LINK 0xff #define INVALID_LINK 0xff
@ -940,56 +918,6 @@ struct hci_cp_io_capability_neg_reply {
__u8 reason; __u8 reason;
} __packed; } __packed;
#define HCI_OP_CREATE_PHY_LINK 0x0435
struct hci_cp_create_phy_link {
__u8 phy_handle;
__u8 key_len;
__u8 key_type;
__u8 key[HCI_AMP_LINK_KEY_SIZE];
} __packed;
#define HCI_OP_ACCEPT_PHY_LINK 0x0436
struct hci_cp_accept_phy_link {
__u8 phy_handle;
__u8 key_len;
__u8 key_type;
__u8 key[HCI_AMP_LINK_KEY_SIZE];
} __packed;
#define HCI_OP_DISCONN_PHY_LINK 0x0437
struct hci_cp_disconn_phy_link {
__u8 phy_handle;
__u8 reason;
} __packed;
struct ext_flow_spec {
__u8 id;
__u8 stype;
__le16 msdu;
__le32 sdu_itime;
__le32 acc_lat;
__le32 flush_to;
} __packed;
#define HCI_OP_CREATE_LOGICAL_LINK 0x0438
#define HCI_OP_ACCEPT_LOGICAL_LINK 0x0439
struct hci_cp_create_accept_logical_link {
__u8 phy_handle;
struct ext_flow_spec tx_flow_spec;
struct ext_flow_spec rx_flow_spec;
} __packed;
#define HCI_OP_DISCONN_LOGICAL_LINK 0x043a
struct hci_cp_disconn_logical_link {
__le16 log_handle;
} __packed;
#define HCI_OP_LOGICAL_LINK_CANCEL 0x043b
struct hci_cp_logical_link_cancel {
__u8 phy_handle;
__u8 flow_spec_id;
} __packed;
#define HCI_OP_ENHANCED_SETUP_SYNC_CONN 0x043d #define HCI_OP_ENHANCED_SETUP_SYNC_CONN 0x043d
struct hci_coding_format { struct hci_coding_format {
__u8 id; __u8 id;
@ -1611,46 +1539,6 @@ struct hci_rp_read_enc_key_size {
__u8 key_size; __u8 key_size;
} __packed; } __packed;
#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409
struct hci_rp_read_local_amp_info {
__u8 status;
__u8 amp_status;
__le32 total_bw;
__le32 max_bw;
__le32 min_latency;
__le32 max_pdu;
__u8 amp_type;
__le16 pal_cap;
__le16 max_assoc_size;
__le32 max_flush_to;
__le32 be_flush_to;
} __packed;
#define HCI_OP_READ_LOCAL_AMP_ASSOC 0x140a
struct hci_cp_read_local_amp_assoc {
__u8 phy_handle;
__le16 len_so_far;
__le16 max_len;
} __packed;
struct hci_rp_read_local_amp_assoc {
__u8 status;
__u8 phy_handle;
__le16 rem_len;
__u8 frag[];
} __packed;
#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b
struct hci_cp_write_remote_amp_assoc {
__u8 phy_handle;
__le16 len_so_far;
__le16 rem_len;
__u8 frag[];
} __packed;
struct hci_rp_write_remote_amp_assoc {
__u8 status;
__u8 phy_handle;
} __packed;
#define HCI_OP_GET_MWS_TRANSPORT_CONFIG 0x140c #define HCI_OP_GET_MWS_TRANSPORT_CONFIG 0x140c
#define HCI_OP_ENABLE_DUT_MODE 0x1803 #define HCI_OP_ENABLE_DUT_MODE 0x1803

View File

@ -782,7 +782,6 @@ struct hci_conn {
void *l2cap_data; void *l2cap_data;
void *sco_data; void *sco_data;
void *iso_data; void *iso_data;
struct amp_mgr *amp_mgr;
struct list_head link_list; struct list_head link_list;
struct hci_conn *parent; struct hci_conn *parent;
@ -1026,9 +1025,6 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
case ACL_LINK: case ACL_LINK:
h->acl_num++; h->acl_num++;
break; break;
case AMP_LINK:
h->amp_num++;
break;
case LE_LINK: case LE_LINK:
h->le_num++; h->le_num++;
if (c->role == HCI_ROLE_SLAVE) if (c->role == HCI_ROLE_SLAVE)
@ -1055,9 +1051,6 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
case ACL_LINK: case ACL_LINK:
h->acl_num--; h->acl_num--;
break; break;
case AMP_LINK:
h->amp_num--;
break;
case LE_LINK: case LE_LINK:
h->le_num--; h->le_num--;
if (c->role == HCI_ROLE_SLAVE) if (c->role == HCI_ROLE_SLAVE)
@ -1079,8 +1072,6 @@ static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
switch (type) { switch (type) {
case ACL_LINK: case ACL_LINK:
return h->acl_num; return h->acl_num;
case AMP_LINK:
return h->amp_num;
case LE_LINK: case LE_LINK:
return h->le_num; return h->le_num;
case SCO_LINK: case SCO_LINK:
@ -1097,7 +1088,7 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev)
{ {
struct hci_conn_hash *c = &hdev->conn_hash; struct hci_conn_hash *c = &hdev->conn_hash;
return c->acl_num + c->amp_num + c->sco_num + c->le_num + c->iso_num; return c->acl_num + c->sco_num + c->le_num + c->iso_num;
} }
static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle) static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
@ -1583,10 +1574,6 @@ static inline void hci_conn_drop(struct hci_conn *conn)
} }
break; break;
case AMP_LINK:
timeo = conn->disc_timeout;
break;
default: default:
timeo = 0; timeo = 0;
break; break;

22
include/trace/hooks/blk.h Normal file
View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM blk
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_BLK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_BLK_H
#include <trace/hooks/vendor_hooks.h>
struct block_device;
struct gendisk;
DECLARE_HOOK(android_vh_bd_link_disk_holder,
TP_PROTO(struct block_device *bdev, struct gendisk *disk),
TP_ARGS(bdev, disk));
#endif /* _TRACE_HOOK_BLK_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM bpf_jit_comp
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_BPF_JIT_COMP_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_BPF_JIT_COMP_H
#include <trace/hooks/vendor_hooks.h>
/*
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
struct bpf_binary_header;
DECLARE_RESTRICTED_HOOK(android_rvh_bpf_int_jit_compile_ro,
TP_PROTO(const struct bpf_binary_header *header, u32 size),
TP_ARGS(header, size), 1);
#endif /* _TRACE_HOOK_BPF_JIT_COMP_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM cpuinfo
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_CPUINFO_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_CPUINFO_H
#include <trace/hooks/vendor_hooks.h>
DECLARE_RESTRICTED_HOOK(android_rvh_cpuinfo_c_show,
TP_PROTO(struct seq_file *m),
TP_ARGS(m), 1);
#endif /* _TRACE_HOOK_CPUINFO_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -62,6 +62,8 @@ DECLARE_HOOK(android_vh_build_skb_around,
TP_PROTO(struct sk_buff *skb), TP_ARGS(skb)); TP_PROTO(struct sk_buff *skb), TP_ARGS(skb));
DECLARE_HOOK(android_vh_tcp_write_timeout_estab_retrans, DECLARE_HOOK(android_vh_tcp_write_timeout_estab_retrans,
TP_PROTO(struct sock *sk), TP_ARGS(sk)); TP_PROTO(struct sock *sk), TP_ARGS(sk));
DECLARE_HOOK(android_vh_tcp_connect,
TP_PROTO(struct sk_buff *skb), TP_ARGS(skb));
/* macro versions of hooks are no longer required */ /* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_NET_VH_H */ #endif /* _TRACE_HOOK_NET_VH_H */

View File

@ -76,6 +76,10 @@ DECLARE_RESTRICTED_HOOK(android_rvh_set_user_nice,
TP_PROTO(struct task_struct *p, long *nice, bool *allowed), TP_PROTO(struct task_struct *p, long *nice, bool *allowed),
TP_ARGS(p, nice, allowed), 1); TP_ARGS(p, nice, allowed), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_set_user_nice_locked,
TP_PROTO(struct task_struct *p, long *nice),
TP_ARGS(p, nice), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_setscheduler, DECLARE_RESTRICTED_HOOK(android_rvh_setscheduler,
TP_PROTO(struct task_struct *p), TP_PROTO(struct task_struct *p),
TP_ARGS(p), 1); TP_ARGS(p), 1);
@ -193,6 +197,11 @@ DECLARE_RESTRICTED_HOOK(android_rvh_update_misfit_status,
TP_PROTO(struct task_struct *p, struct rq *rq, bool *need_update), TP_PROTO(struct task_struct *p, struct rq *rq, bool *need_update),
TP_ARGS(p, rq, need_update), 1); TP_ARGS(p, rq, need_update), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_util_fits_cpu,
TP_PROTO(unsigned long util, unsigned long uclamp_min, unsigned long uclamp_max,
int cpu, bool *fits, bool *done),
TP_ARGS(util, uclamp_min, uclamp_max, cpu, fits, done), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_sched_fork_init, DECLARE_RESTRICTED_HOOK(android_rvh_sched_fork_init,
TP_PROTO(struct task_struct *p), TP_PROTO(struct task_struct *p),
TP_ARGS(p), 1); TP_ARGS(p), 1);
@ -317,6 +326,11 @@ DECLARE_HOOK(android_vh_setscheduler_uclamp,
TP_PROTO(struct task_struct *tsk, int clamp_id, unsigned int value), TP_PROTO(struct task_struct *tsk, int clamp_id, unsigned int value),
TP_ARGS(tsk, clamp_id, value)); TP_ARGS(tsk, clamp_id, value));
DECLARE_HOOK(android_vh_uclamp_validate,
TP_PROTO(struct task_struct *p, const struct sched_attr *attr,
int *ret, bool *done),
TP_ARGS(p, attr, ret, done));
DECLARE_HOOK(android_vh_update_topology_flags_workfn, DECLARE_HOOK(android_vh_update_topology_flags_workfn,
TP_PROTO(void *unused), TP_PROTO(void *unused),
TP_ARGS(unused)); TP_ARGS(unused));
@ -420,6 +434,52 @@ DECLARE_HOOK(android_vh_mmput,
TP_PROTO(void *unused), TP_PROTO(void *unused),
TP_ARGS(unused)); TP_ARGS(unused));
DECLARE_RESTRICTED_HOOK(android_rvh_attach_entity_load_avg,
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(cfs_rq, se), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_detach_entity_load_avg,
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(cfs_rq, se), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_update_load_avg,
TP_PROTO(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(now, cfs_rq, se), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_remove_entity_load_avg,
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(cfs_rq, se), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_update_blocked_fair,
TP_PROTO(struct rq *rq),
TP_ARGS(rq), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_update_load_sum,
TP_PROTO(struct sched_avg *sa, u64 *delta, unsigned int *sched_pelt_lshift),
TP_ARGS(sa, delta, sched_pelt_lshift), 1);
struct sched_attr;
DECLARE_HOOK(android_vh_set_sugov_sched_attr,
TP_PROTO(struct sched_attr *attr),
TP_ARGS(attr));
DECLARE_RESTRICTED_HOOK(android_rvh_set_iowait,
TP_PROTO(struct task_struct *p, struct rq *rq, int *should_iowait_boost),
TP_ARGS(p, rq, should_iowait_boost), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_update_rt_rq_load_avg,
TP_PROTO(u64 now, struct rq *rq, struct task_struct *tsk, int running),
TP_ARGS(now, rq, tsk, running), 1);
DECLARE_HOOK(android_vh_prio_inheritance,
TP_PROTO(struct task_struct *p, int *saved_prio, bool *prio_inherited),
TP_ARGS(p, saved_prio, prio_inherited));
DECLARE_HOOK(android_vh_prio_restore,
TP_PROTO(int saved_prio),
TP_ARGS(saved_prio));
/* macro versions of hooks are no longer required */ /* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_SCHED_H */ #endif /* _TRACE_HOOK_SCHED_H */

View File

@ -0,0 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM suspend
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_SUSPEND_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_SUSPEND_H
#include <trace/hooks/vendor_hooks.h>
DECLARE_HOOK(android_vh_resume_begin,
TP_PROTO(void *unused),
TP_ARGS(unused))
DECLARE_HOOK(android_vh_resume_end,
TP_PROTO(void *unused),
TP_ARGS(unused))
DECLARE_HOOK(android_vh_early_resume_begin,
TP_PROTO(void *unused),
TP_ARGS(unused))
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_SUSPEND_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -9,6 +9,10 @@
#include <trace/hooks/vendor_hooks.h> #include <trace/hooks/vendor_hooks.h>
DECLARE_HOOK(android_vh_enable_thermal_genl_check,
TP_PROTO(int event, int tz_id, int *enable_thermal_genl),
TP_ARGS(event, tz_id, enable_thermal_genl));
struct thermal_cooling_device; struct thermal_cooling_device;
DECLARE_HOOK(android_vh_disable_thermal_cooling_stats, DECLARE_HOOK(android_vh_disable_thermal_cooling_stats,
TP_PROTO(struct thermal_cooling_device *cdev, bool *disable_stats), TP_PROTO(struct thermal_cooling_device *cdev, bool *disable_stats),

View File

@ -28,6 +28,10 @@ DECLARE_RESTRICTED_HOOK(android_rvh_cpu_capacity_show,
#endif #endif
DECLARE_HOOK(android_vh_use_amu_fie,
TP_PROTO(bool *use_amu_fie),
TP_ARGS(use_amu_fie));
#endif /* _TRACE_HOOK_TOPOLOGY_H */ #endif /* _TRACE_HOOK_TOPOLOGY_H */
/* This part must be outside protection */ /* This part must be outside protection */
#include <trace/define_trace.h> #include <trace/define_trace.h>

View File

@ -36,7 +36,18 @@ DECLARE_HOOK(android_vh_tune_scan_type,
DECLARE_HOOK(android_vh_page_referenced_check_bypass, DECLARE_HOOK(android_vh_page_referenced_check_bypass,
TP_PROTO(struct folio *folio, unsigned long nr_to_scan, int lru, bool *bypass), TP_PROTO(struct folio *folio, unsigned long nr_to_scan, int lru, bool *bypass),
TP_ARGS(folio, nr_to_scan, lru, bypass)); TP_ARGS(folio, nr_to_scan, lru, bypass));
DECLARE_HOOK(android_vh_modify_scan_control,
TP_PROTO(u64 *ext, unsigned long *nr_to_reclaim,
struct mem_cgroup *target_mem_cgroup,
bool *file_is_tiny, bool *may_writepage),
TP_ARGS(ext, nr_to_reclaim, target_mem_cgroup, file_is_tiny, may_writepage));
DECLARE_HOOK(android_vh_should_continue_reclaim,
TP_PROTO(u64 *ext, unsigned long *nr_to_reclaim,
unsigned long *nr_reclaimed, bool *continue_reclaim),
TP_ARGS(ext, nr_to_reclaim, nr_reclaimed, continue_reclaim));
DECLARE_HOOK(android_vh_file_is_tiny_bypass,
TP_PROTO(bool file_is_tiny, bool *bypass),
TP_ARGS(file_is_tiny, bypass));
#endif /* _TRACE_HOOK_VMSCAN_H */ #endif /* _TRACE_HOOK_VMSCAN_H */
/* This part must be outside protection */ /* This part must be outside protection */
#include <trace/define_trace.h> #include <trace/define_trace.h>

View File

@ -14,6 +14,10 @@ DECLARE_HOOK(android_vh_wq_lockup_pool,
TP_PROTO(int cpu, unsigned long pool_ts), TP_PROTO(int cpu, unsigned long pool_ts),
TP_ARGS(cpu, pool_ts)); TP_ARGS(cpu, pool_ts));
DECLARE_HOOK(android_rvh_alloc_and_link_pwqs,
TP_PROTO(struct workqueue_struct *wq, int *ret, bool *skip),
TP_ARGS(wq, ret, skip));
#endif /* _TRACE_HOOK_WQLOCKUP_H */ #endif /* _TRACE_HOOK_WQLOCKUP_H */
/* This part must be outside protection */ /* This part must be outside protection */
#include <trace/define_trace.h> #include <trace/define_trace.h>

View File

@ -13,7 +13,6 @@
enum virtio_bt_config_type { enum virtio_bt_config_type {
VIRTIO_BT_CONFIG_TYPE_PRIMARY = 0, VIRTIO_BT_CONFIG_TYPE_PRIMARY = 0,
VIRTIO_BT_CONFIG_TYPE_AMP = 1,
}; };
enum virtio_bt_config_vendor { enum virtio_bt_config_vendor {

View File

@ -321,6 +321,7 @@ out_free_pages:
out_leak_pages: out_leak_pages:
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(dma_direct_alloc);
void dma_direct_free(struct device *dev, size_t size, void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
@ -366,6 +367,7 @@ void dma_direct_free(struct device *dev, size_t size,
__dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size); __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
} }
EXPORT_SYMBOL_GPL(dma_direct_free);
struct page *dma_direct_alloc_pages(struct device *dev, size_t size, struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)

View File

@ -31,6 +31,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/wakeup_reason.h> #include <linux/wakeup_reason.h>
#include <trace/hooks/suspend.h>
#include "power.h" #include "power.h"
@ -467,6 +468,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
error = suspend_ops->enter(state); error = suspend_ops->enter(state);
trace_suspend_resume(TPS("machine_suspend"), trace_suspend_resume(TPS("machine_suspend"),
state, false); state, false);
trace_android_vh_early_resume_begin(NULL);
} else if (*wakeup) { } else if (*wakeup) {
error = -EBUSY; error = -EBUSY;
} }
@ -535,6 +537,7 @@ int suspend_devices_and_enter(suspend_state_t state)
} while (!error && !wakeup && platform_suspend_again(state)); } while (!error && !wakeup && platform_suspend_again(state));
Resume_devices: Resume_devices:
trace_android_vh_resume_begin(NULL);
suspend_test_start(); suspend_test_start();
dpm_resume_end(PMSG_RESUME); dpm_resume_end(PMSG_RESUME);
suspend_test_finish("resume devices"); suspend_test_finish("resume devices");
@ -545,6 +548,7 @@ int suspend_devices_and_enter(suspend_state_t state)
Close: Close:
platform_resume_end(state); platform_resume_end(state);
pm_suspend_target_state = PM_SUSPEND_ON; pm_suspend_target_state = PM_SUSPEND_ON;
trace_android_vh_resume_end(NULL);
return error; return error;
Recover_platform: Recover_platform:

View File

@ -1931,6 +1931,12 @@ static int uclamp_validate(struct task_struct *p,
{ {
int util_min = p->uclamp_req[UCLAMP_MIN].value; int util_min = p->uclamp_req[UCLAMP_MIN].value;
int util_max = p->uclamp_req[UCLAMP_MAX].value; int util_max = p->uclamp_req[UCLAMP_MAX].value;
bool done = false;
int ret = 0;
trace_android_vh_uclamp_validate(p, attr, &ret, &done);
if (done)
return ret;
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
util_min = attr->sched_util_min; util_min = attr->sched_util_min;
@ -2792,6 +2798,7 @@ out_unlock:
put_task_struct(p); put_task_struct(p);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(push_cpu_stop);
/* /*
* sched_class::set_cpus_allowed must do the below, but is not required to * sched_class::set_cpus_allowed must do the below, but is not required to
@ -7345,6 +7352,10 @@ void set_user_nice(struct task_struct *p, long nice)
rq = task_rq_lock(p, &rf); rq = task_rq_lock(p, &rf);
update_rq_clock(rq); update_rq_clock(rq);
trace_android_rvh_set_user_nice_locked(p, &nice);
if (task_nice(p) == nice)
goto out_unlock;
/* /*
* The RT priorities are set via sched_setscheduler(), but we still * The RT priorities are set via sched_setscheduler(), but we still
* allow the 'normal' nice value to be set - but as expected * allow the 'normal' nice value to be set - but as expected

View File

@ -608,6 +608,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
if (policy->fast_switch_enabled) if (policy->fast_switch_enabled)
return 0; return 0;
trace_android_vh_set_sugov_sched_attr(&attr);
kthread_init_work(&sg_policy->work, sugov_work); kthread_init_work(&sg_policy->work, sugov_work);
kthread_init_worker(&sg_policy->worker); kthread_init_worker(&sg_policy->worker);
thread = kthread_create(kthread_worker_fn, &sg_policy->worker, thread = kthread_create(kthread_worker_fn, &sg_policy->worker,

View File

@ -96,6 +96,7 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
* (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/ */
unsigned int sysctl_sched_base_slice = 750000ULL; unsigned int sysctl_sched_base_slice = 750000ULL;
EXPORT_SYMBOL_GPL(sysctl_sched_base_slice);
static unsigned int normalized_sysctl_sched_base_slice = 750000ULL; static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
/* /*
@ -3807,6 +3808,7 @@ void reweight_task(struct task_struct *p, int prio)
reweight_entity(cfs_rq, se, weight); reweight_entity(cfs_rq, se, weight);
load->inv_weight = sched_prio_to_wmult[prio]; load->inv_weight = sched_prio_to_wmult[prio];
} }
EXPORT_SYMBOL_GPL(reweight_task);
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
@ -4582,6 +4584,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
else else
se->avg.load_sum = 1; se->avg.load_sum = 1;
trace_android_rvh_attach_entity_load_avg(cfs_rq, se);
enqueue_load_avg(cfs_rq, se); enqueue_load_avg(cfs_rq, se);
cfs_rq->avg.util_avg += se->avg.util_avg; cfs_rq->avg.util_avg += se->avg.util_avg;
cfs_rq->avg.util_sum += se->avg.util_sum; cfs_rq->avg.util_sum += se->avg.util_sum;
@ -4605,6 +4609,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
*/ */
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{ {
trace_android_rvh_detach_entity_load_avg(cfs_rq, se);
dequeue_load_avg(cfs_rq, se); dequeue_load_avg(cfs_rq, se);
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
@ -4649,6 +4655,8 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
decayed = update_cfs_rq_load_avg(now, cfs_rq); decayed = update_cfs_rq_load_avg(now, cfs_rq);
decayed |= propagate_entity_load_avg(se); decayed |= propagate_entity_load_avg(se);
trace_android_rvh_update_load_avg(now, cfs_rq, se);
if (!se->avg.last_update_time && (flags & DO_ATTACH)) { if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
/* /*
@ -4706,6 +4714,8 @@ static void remove_entity_load_avg(struct sched_entity *se)
sync_entity_load_avg(se); sync_entity_load_avg(se);
trace_android_rvh_remove_entity_load_avg(cfs_rq, se);
raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags); raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
++cfs_rq->removed.nr; ++cfs_rq->removed.nr;
cfs_rq->removed.util_avg += se->avg.util_avg; cfs_rq->removed.util_avg += se->avg.util_avg;
@ -4879,7 +4889,12 @@ static inline int util_fits_cpu(unsigned long util,
{ {
unsigned long capacity_orig, capacity_orig_thermal; unsigned long capacity_orig, capacity_orig_thermal;
unsigned long capacity = capacity_of(cpu); unsigned long capacity = capacity_of(cpu);
bool fits, uclamp_max_fits; bool fits, uclamp_max_fits, done = false;
trace_android_rvh_util_fits_cpu(util, uclamp_min, uclamp_max, cpu, &fits, &done);
if (done)
return fits;
/* /*
* Check if the real util fits without any uclamp boost/cap applied. * Check if the real util fits without any uclamp boost/cap applied.
@ -5018,7 +5033,7 @@ static inline int is_misfit_task(struct task_struct *p, struct rq *rq,
return 1; return 1;
} }
static inline void update_misfit_status(struct task_struct *p, struct rq *rq) inline void update_misfit_status(struct task_struct *p, struct rq *rq)
{ {
bool need_update = true; bool need_update = true;
misfit_reason_t reason; misfit_reason_t reason;
@ -5040,6 +5055,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1); rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
rq->misfit_reason = reason; rq->misfit_reason = reason;
} }
EXPORT_SYMBOL_GPL(update_misfit_status);
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
@ -6653,6 +6669,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct sched_entity *se = &p->se; struct sched_entity *se = &p->se;
int idle_h_nr_running = task_has_idle_policy(p); int idle_h_nr_running = task_has_idle_policy(p);
int task_new = !(flags & ENQUEUE_WAKEUP); int task_new = !(flags & ENQUEUE_WAKEUP);
int should_iowait_boost;
/* /*
* The code below (indirectly) updates schedutil which looks at * The code below (indirectly) updates schedutil which looks at
@ -6667,7 +6684,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
* utilization updates, so do it here explicitly with the IOWAIT flag * utilization updates, so do it here explicitly with the IOWAIT flag
* passed. * passed.
*/ */
if (p->in_iowait) should_iowait_boost = p->in_iowait;
trace_android_rvh_set_iowait(p, rq, &should_iowait_boost);
if (should_iowait_boost)
cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT); cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
for_each_sched_entity(se) { for_each_sched_entity(se) {
@ -9306,6 +9325,8 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
bool decayed = false; bool decayed = false;
int cpu = cpu_of(rq); int cpu = cpu_of(rq);
trace_android_rvh_update_blocked_fair(rq);
/* /*
* Iterates the task_group tree in a bottom up fashion, see * Iterates the task_group tree in a bottom up fashion, see
* list_add_leaf_cfs_rq() for details. * list_add_leaf_cfs_rq() for details.

View File

@ -24,6 +24,8 @@
* Author: Vincent Guittot <vincent.guittot@linaro.org> * Author: Vincent Guittot <vincent.guittot@linaro.org>
*/ */
#include <trace/hooks/sched.h>
/* /*
* Approximate: * Approximate:
* val * y^n, where y^32 ~= 0.5 (~1 scheduling period) * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
@ -176,7 +178,7 @@ accumulate_sum(u64 delta, struct sched_avg *sa,
* load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... ) * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
* = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}] * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
*/ */
static __always_inline int int
___update_load_sum(u64 now, struct sched_avg *sa, ___update_load_sum(u64 now, struct sched_avg *sa,
unsigned long load, unsigned long runnable, int running) unsigned long load, unsigned long runnable, int running)
{ {
@ -202,6 +204,8 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
sa->last_update_time += delta << 10; sa->last_update_time += delta << 10;
trace_android_rvh_update_load_sum(sa, &delta, &sched_pelt_lshift);
/* /*
* running is a subset of runnable (weight) so running can't be set if * running is a subset of runnable (weight) so running can't be set if
* runnable is clear. But there are some corner cases where the current * runnable is clear. But there are some corner cases where the current
@ -228,6 +232,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
return 1; return 1;
} }
EXPORT_SYMBOL_GPL(___update_load_sum);
/* /*
* When syncing *_avg with *_sum, we must take into account the current * When syncing *_avg with *_sum, we must take into account the current
@ -253,7 +258,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
* the period_contrib of cfs_rq when updating the sched_avg of a sched_entity * the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
* if it's more convenient. * if it's more convenient.
*/ */
static __always_inline void void
___update_load_avg(struct sched_avg *sa, unsigned long load) ___update_load_avg(struct sched_avg *sa, unsigned long load)
{ {
u32 divider = get_pelt_divider(sa); u32 divider = get_pelt_divider(sa);
@ -265,6 +270,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
sa->runnable_avg = div_u64(sa->runnable_sum, divider); sa->runnable_avg = div_u64(sa->runnable_sum, divider);
WRITE_ONCE(sa->util_avg, sa->util_sum / divider); WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
} }
EXPORT_SYMBOL_GPL(___update_load_avg);
/* /*
* sched_entity: * sched_entity:

View File

@ -1849,6 +1849,9 @@ static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool f
if (rq->curr->sched_class != &rt_sched_class) if (rq->curr->sched_class != &rt_sched_class)
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
/* Should always be called unlike update_rt_rq_load_avg() */
trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 0);
rt_queue_push_tasks(rq); rt_queue_push_tasks(rq);
} }
@ -1918,6 +1921,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
update_curr_rt(rq); update_curr_rt(rq);
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1);
/* /*
* The previous task needs to be made eligible for pushing * The previous task needs to be made eligible for pushing
@ -2745,6 +2749,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
update_curr_rt(rq); update_curr_rt(rq);
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1);
watchdog(rq, p); watchdog(rq, p);

View File

@ -25,6 +25,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_finish_prio_fork);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_rtmutex_force_update); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_rtmutex_force_update);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_rtmutex_prepare_setprio); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_rtmutex_prepare_setprio);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_user_nice); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_user_nice);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_user_nice_locked);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_setscheduler); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_setscheduler);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_busiest_group); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_busiest_group);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dump_throttled_rt_tasks); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dump_throttled_rt_tasks);
@ -70,6 +71,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_cpu_dying);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_account_irq); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_account_irq);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_place_entity); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_place_entity);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_build_perf_domains); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_build_perf_domains);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_util_fits_cpu);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_cpu_capacity); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_cpu_capacity);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_misfit_status); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_misfit_status);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_rto_next_cpu); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_rto_next_cpu);
@ -108,3 +110,15 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_cgroup_css_free);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_reweight_entity); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_reweight_entity);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_context_switch); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_context_switch);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmput); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmput);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_attach_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_detach_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_load_sum);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_remove_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_blocked_fair);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_rt_rq_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_uclamp_validate);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_sugov_sched_attr);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_iowait);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_prio_inheritance);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_prio_restore);

View File

@ -35,6 +35,10 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_entry); EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(softirq_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(softirq_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(tasklet_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(tasklet_exit);
/* /*
- No shared variables, all the data are CPU local. - No shared variables, all the data are CPU local.

View File

@ -3730,6 +3730,7 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs)
kfree(attrs); kfree(attrs);
} }
} }
EXPORT_SYMBOL_GPL(free_workqueue_attrs);
/** /**
* alloc_workqueue_attrs - allocate a workqueue_attrs * alloc_workqueue_attrs - allocate a workqueue_attrs
@ -3758,6 +3759,7 @@ fail:
free_workqueue_attrs(attrs); free_workqueue_attrs(attrs);
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(alloc_workqueue_attrs);
static void copy_workqueue_attrs(struct workqueue_attrs *to, static void copy_workqueue_attrs(struct workqueue_attrs *to,
const struct workqueue_attrs *from) const struct workqueue_attrs *from)
@ -4499,6 +4501,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(apply_workqueue_attrs);
/** /**
* wq_update_pod - update pod affinity of a wq for CPU hot[un]plug * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug
@ -4579,6 +4582,7 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
{ {
bool highpri = wq->flags & WQ_HIGHPRI; bool highpri = wq->flags & WQ_HIGHPRI;
int cpu, ret; int cpu, ret;
bool skip = false;
wq->cpu_pwq = alloc_percpu(struct pool_workqueue *); wq->cpu_pwq = alloc_percpu(struct pool_workqueue *);
if (!wq->cpu_pwq) if (!wq->cpu_pwq)
@ -4605,6 +4609,10 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
return 0; return 0;
} }
trace_android_rvh_alloc_and_link_pwqs(wq, &ret, &skip);
if (skip)
goto oem_skip;
cpus_read_lock(); cpus_read_lock();
if (wq->flags & __WQ_ORDERED) { if (wq->flags & __WQ_ORDERED) {
ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
@ -4617,6 +4625,7 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
} }
cpus_read_unlock(); cpus_read_unlock();
oem_skip:
/* for unbound pwq, flush the pwq_release_worker ensures that the /* for unbound pwq, flush the pwq_release_worker ensures that the
* pwq_release_workfn() completes before calling kfree(wq). * pwq_release_workfn() completes before calling kfree(wq).
*/ */

View File

@ -294,11 +294,14 @@ struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma)
/* Adjust the start to begin at the start of the padding section */ /* Adjust the start to begin at the start of the padding section */
pad->vm_start = VMA_PAD_START(pad); pad->vm_start = VMA_PAD_START(pad);
/*
* The below modifications to vm_flags don't need mmap write lock,
* since, pad does not belong to the VMA tree.
*/
/* Make the pad vma PROT_NONE */ /* Make the pad vma PROT_NONE */
vm_flags_clear(pad, VM_READ|VM_WRITE|VM_EXEC); __vm_flags_mod(pad, 0, VM_READ|VM_WRITE|VM_EXEC);
/* Remove padding bits */ /* Remove padding bits */
vm_flags_clear(pad, VM_PAD_MASK); __vm_flags_mod(pad, 0, VM_PAD_MASK);
return pad; return pad;
} }

View File

@ -434,3 +434,4 @@ void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
#endif #endif
trace_android_vh_show_mem(filter, nodemask); trace_android_vh_show_mem(filter, nodemask);
} }
EXPORT_SYMBOL_GPL(__show_mem);

View File

@ -2976,6 +2976,7 @@ static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
{ {
unsigned long file; unsigned long file;
struct lruvec *target_lruvec; struct lruvec *target_lruvec;
bool bypass = false;
if (lru_gen_enabled()) if (lru_gen_enabled())
return; return;
@ -3037,6 +3038,11 @@ static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
else else
sc->cache_trim_mode = 0; sc->cache_trim_mode = 0;
trace_android_vh_file_is_tiny_bypass(sc->file_is_tiny, &bypass);
if (bypass)
return;
/* /*
* Prevent the reclaimer from falling into the cache trap: as * Prevent the reclaimer from falling into the cache trap: as
* cache pages start out inactive, every cache fault will tip * cache pages start out inactive, every cache fault will tip
@ -6523,6 +6529,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
unsigned long pages_for_compaction; unsigned long pages_for_compaction;
unsigned long inactive_lru_pages; unsigned long inactive_lru_pages;
int z; int z;
bool continue_reclaim = true;
/* If not in reclaim/compaction mode, stop */ /* If not in reclaim/compaction mode, stop */
if (!in_reclaim_compaction(sc)) if (!in_reclaim_compaction(sc))
@ -6565,6 +6572,13 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc))
inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
#ifdef CONFIG_ANDROID_VENDOR_OEM_DATA
trace_android_vh_should_continue_reclaim(&sc->android_vendor_data1,
&sc->nr_to_reclaim, &sc->nr_reclaimed, &continue_reclaim);
#endif
if (!continue_reclaim)
return false;
return inactive_lru_pages > pages_for_compaction; return inactive_lru_pages > pages_for_compaction;
} }
@ -6917,6 +6931,22 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
target_lruvec->refaults[WORKINGSET_FILE] = refaults; target_lruvec->refaults[WORKINGSET_FILE] = refaults;
} }
static void modify_scan_control(struct scan_control *sc)
{
bool file_is_tiny = false, may_writepage = true;
#ifdef CONFIG_ANDROID_VENDOR_OEM_DATA
trace_android_vh_modify_scan_control(&sc->android_vendor_data1,
&sc->nr_to_reclaim, sc->target_mem_cgroup, &file_is_tiny,
&may_writepage);
#endif
if (file_is_tiny)
sc->file_is_tiny = true;
if (!may_writepage)
sc->may_writepage = false;
}
/* /*
* This is the main entry point to direct page reclaim. * This is the main entry point to direct page reclaim.
* *
@ -6940,6 +6970,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
pg_data_t *last_pgdat; pg_data_t *last_pgdat;
struct zoneref *z; struct zoneref *z;
struct zone *zone; struct zone *zone;
modify_scan_control(sc);
retry: retry:
delayacct_freepages_start(); delayacct_freepages_start();

View File

@ -93,6 +93,7 @@ _ARM_GKI_MODULES_LIST = [
_ARM64_GKI_MODULES_LIST = [ _ARM64_GKI_MODULES_LIST = [
# keep sorted # keep sorted
"arch/arm64/geniezone/gzvm.ko", "arch/arm64/geniezone/gzvm.ko",
"drivers/android/rust_binder.ko",
"drivers/char/hw_random/cctrng.ko", "drivers/char/hw_random/cctrng.ko",
"drivers/misc/open-dice.ko", "drivers/misc/open-dice.ko",
"drivers/ptp/ptp_kvm.ko", "drivers/ptp/ptp_kvm.ko",
@ -105,6 +106,7 @@ _X86_GKI_MODULES_LIST = [
_X86_64_GKI_MODULES_LIST = [ _X86_64_GKI_MODULES_LIST = [
# keep sorted # keep sorted
"drivers/android/rust_binder.ko",
"drivers/ptp/ptp_kvm.ko", "drivers/ptp/ptp_kvm.ko",
] ]

View File

@ -1200,8 +1200,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
list_for_each_entry(d, &hci_dev_list, list) { list_for_each_entry(d, &hci_dev_list, list) {
if (!test_bit(HCI_UP, &d->flags) || if (!test_bit(HCI_UP, &d->flags) ||
hci_dev_test_flag(d, HCI_USER_CHANNEL) || hci_dev_test_flag(d, HCI_USER_CHANNEL))
d->dev_type != HCI_PRIMARY)
continue; continue;
/* Simple routing: /* Simple routing:

View File

@ -395,11 +395,6 @@ int hci_inquiry(void __user *arg)
goto done; goto done;
} }
if (hdev->dev_type != HCI_PRIMARY) {
err = -EOPNOTSUPP;
goto done;
}
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto done; goto done;
@ -752,11 +747,6 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
goto done; goto done;
} }
if (hdev->dev_type != HCI_PRIMARY) {
err = -EOPNOTSUPP;
goto done;
}
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto done; goto done;
@ -910,7 +900,7 @@ int hci_get_dev_info(void __user *arg)
strscpy(di.name, hdev->name, sizeof(di.name)); strscpy(di.name, hdev->name, sizeof(di.name));
di.bdaddr = hdev->bdaddr; di.bdaddr = hdev->bdaddr;
di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4); di.type = (hdev->bus & 0x0f);
di.flags = flags; di.flags = flags;
di.pkt_type = hdev->pkt_type; di.pkt_type = hdev->pkt_type;
if (lmp_bredr_capable(hdev)) { if (lmp_bredr_capable(hdev)) {
@ -995,8 +985,7 @@ static void hci_power_on(struct work_struct *work)
*/ */
if (hci_dev_test_flag(hdev, HCI_RFKILLED) || if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
(hdev->dev_type == HCI_PRIMARY && (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
!bacmp(&hdev->static_addr, BDADDR_ANY))) { !bacmp(&hdev->static_addr, BDADDR_ANY))) {
hci_dev_clear_flag(hdev, HCI_AUTO_OFF); hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
hci_dev_do_close(hdev); hci_dev_do_close(hdev);
@ -2604,21 +2593,7 @@ int hci_register_dev(struct hci_dev *hdev)
if (!hdev->open || !hdev->close || !hdev->send) if (!hdev->open || !hdev->close || !hdev->send)
return -EINVAL; return -EINVAL;
/* Do not allow HCI_AMP devices to register at index 0, id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
* so the index can be used as the AMP controller ID.
*/
switch (hdev->dev_type) {
case HCI_PRIMARY:
id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
break;
case HCI_AMP:
id = ida_alloc_range(&hci_index_ida, 1, HCI_MAX_ID - 1,
GFP_KERNEL);
break;
default:
return -EINVAL;
}
if (id < 0) if (id < 0)
return id; return id;
@ -2670,12 +2645,10 @@ int hci_register_dev(struct hci_dev *hdev)
hci_dev_set_flag(hdev, HCI_SETUP); hci_dev_set_flag(hdev, HCI_SETUP);
hci_dev_set_flag(hdev, HCI_AUTO_OFF); hci_dev_set_flag(hdev, HCI_AUTO_OFF);
if (hdev->dev_type == HCI_PRIMARY) { /* Assume BR/EDR support until proven otherwise (such as
/* Assume BR/EDR support until proven otherwise (such as * through reading supported features during init.
* through reading supported features during init. */
*/ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
}
write_lock(&hci_dev_list_lock); write_lock(&hci_dev_list_lock);
list_add(&hdev->list, &hci_dev_list); list_add(&hdev->list, &hci_dev_list);
@ -3211,17 +3184,7 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
switch (hdev->dev_type) { hci_add_acl_hdr(skb, conn->handle, flags);
case HCI_PRIMARY:
hci_add_acl_hdr(skb, conn->handle, flags);
break;
case HCI_AMP:
hci_add_acl_hdr(skb, chan->handle, flags);
break;
default:
bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
return;
}
list = skb_shinfo(skb)->frag_list; list = skb_shinfo(skb)->frag_list;
if (!list) { if (!list) {
@ -3381,9 +3344,6 @@ static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
case ACL_LINK: case ACL_LINK:
cnt = hdev->acl_cnt; cnt = hdev->acl_cnt;
break; break;
case AMP_LINK:
cnt = hdev->block_cnt;
break;
case SCO_LINK: case SCO_LINK:
case ESCO_LINK: case ESCO_LINK:
cnt = hdev->sco_cnt; cnt = hdev->sco_cnt;
@ -3581,12 +3541,6 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
} }
static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
{
/* Calculate count of blocks used by this packet */
return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
}
static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type) static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
{ {
unsigned long last_tx; unsigned long last_tx;
@ -3700,81 +3654,15 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev)
hci_prio_recalculate(hdev, ACL_LINK); hci_prio_recalculate(hdev, ACL_LINK);
} }
static void hci_sched_acl_blk(struct hci_dev *hdev)
{
unsigned int cnt = hdev->block_cnt;
struct hci_chan *chan;
struct sk_buff *skb;
int quote;
u8 type;
BT_DBG("%s", hdev->name);
if (hdev->dev_type == HCI_AMP)
type = AMP_LINK;
else
type = ACL_LINK;
__check_timeout(hdev, cnt, type);
while (hdev->block_cnt > 0 &&
(chan = hci_chan_sent(hdev, type, &quote))) {
u32 priority = (skb_peek(&chan->data_q))->priority;
while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
int blocks;
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
skb->len, skb->priority);
/* Stop if priority has changed */
if (skb->priority < priority)
break;
skb = skb_dequeue(&chan->data_q);
blocks = __get_blocks(hdev, skb);
if (blocks > hdev->block_cnt)
return;
hci_conn_enter_active_mode(chan->conn,
bt_cb(skb)->force_active);
hci_send_frame(hdev, skb);
hdev->acl_last_tx = jiffies;
hdev->block_cnt -= blocks;
quote -= blocks;
chan->sent += blocks;
chan->conn->sent += blocks;
}
}
if (cnt != hdev->block_cnt)
hci_prio_recalculate(hdev, type);
}
static void hci_sched_acl(struct hci_dev *hdev) static void hci_sched_acl(struct hci_dev *hdev)
{ {
BT_DBG("%s", hdev->name); BT_DBG("%s", hdev->name);
/* No ACL link over BR/EDR controller */ /* No ACL link over BR/EDR controller */
if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY) if (!hci_conn_num(hdev, ACL_LINK))
return; return;
/* No AMP link over AMP controller */ hci_sched_acl_pkt(hdev);
if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
return;
switch (hdev->flow_ctl_mode) {
case HCI_FLOW_CTL_MODE_PACKET_BASED:
hci_sched_acl_pkt(hdev);
break;
case HCI_FLOW_CTL_MODE_BLOCK_BASED:
hci_sched_acl_blk(hdev);
break;
}
} }
static void hci_sched_le(struct hci_dev *hdev) static void hci_sched_le(struct hci_dev *hdev)

View File

@ -917,21 +917,6 @@ static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
return rp->status; return rp->status;
} }
static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
struct hci_rp_read_flow_control_mode *rp = data;
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
return rp->status;
hdev->flow_ctl_mode = rp->mode;
return rp->status;
}
static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
struct sk_buff *skb) struct sk_buff *skb)
{ {
@ -1075,28 +1060,6 @@ static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
return rp->status; return rp->status;
} }
static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
struct hci_rp_read_data_block_size *rp = data;
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
return rp->status;
hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
hdev->block_len = __le16_to_cpu(rp->block_len);
hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
hdev->block_cnt = hdev->num_blocks;
BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
hdev->block_cnt, hdev->block_len);
return rp->status;
}
static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data, static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
struct sk_buff *skb) struct sk_buff *skb)
{ {
@ -1131,30 +1094,6 @@ unlock:
return rp->status; return rp->status;
} }
static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
struct hci_rp_read_local_amp_info *rp = data;
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
return rp->status;
hdev->amp_status = rp->amp_status;
hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
hdev->amp_type = rp->amp_type;
hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
return rp->status;
}
static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data, static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
struct sk_buff *skb) struct sk_buff *skb)
{ {
@ -4134,12 +4073,6 @@ static const struct hci_cc {
HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type, HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
sizeof(struct hci_rp_read_page_scan_type)), sizeof(struct hci_rp_read_page_scan_type)),
HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type), HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
sizeof(struct hci_rp_read_data_block_size)),
HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
sizeof(struct hci_rp_read_flow_control_mode)),
HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
sizeof(struct hci_rp_read_local_amp_info)),
HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock, HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
sizeof(struct hci_rp_read_clock)), sizeof(struct hci_rp_read_clock)),
HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size, HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
@ -4474,11 +4407,6 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
flex_array_size(ev, handles, ev->num))) flex_array_size(ev, handles, ev->num)))
return; return;
if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
return;
}
bt_dev_dbg(hdev, "num %d", ev->num); bt_dev_dbg(hdev, "num %d", ev->num);
for (i = 0; i < ev->num; i++) { for (i = 0; i < ev->num; i++) {
@ -4546,78 +4474,6 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
queue_work(hdev->workqueue, &hdev->tx_work); queue_work(hdev->workqueue, &hdev->tx_work);
} }
static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
__u16 handle)
{
struct hci_chan *chan;
switch (hdev->dev_type) {
case HCI_PRIMARY:
return hci_conn_hash_lookup_handle(hdev, handle);
case HCI_AMP:
chan = hci_chan_lookup_handle(hdev, handle);
if (chan)
return chan->conn;
break;
default:
bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
break;
}
return NULL;
}
static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
struct hci_ev_num_comp_blocks *ev = data;
int i;
if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
flex_array_size(ev, handles, ev->num_hndl)))
return;
if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
bt_dev_err(hdev, "wrong event for mode %d",
hdev->flow_ctl_mode);
return;
}
bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
ev->num_hndl);
for (i = 0; i < ev->num_hndl; i++) {
struct hci_comp_blocks_info *info = &ev->handles[i];
struct hci_conn *conn = NULL;
__u16 handle, block_count;
handle = __le16_to_cpu(info->handle);
block_count = __le16_to_cpu(info->blocks);
conn = __hci_conn_lookup_handle(hdev, handle);
if (!conn)
continue;
conn->sent -= block_count;
switch (conn->type) {
case ACL_LINK:
case AMP_LINK:
hdev->block_cnt += block_count;
if (hdev->block_cnt > hdev->num_blocks)
hdev->block_cnt = hdev->num_blocks;
break;
default:
bt_dev_err(hdev, "unknown type %d conn %p",
conn->type, conn);
break;
}
}
queue_work(hdev->workqueue, &hdev->tx_work);
}
static void hci_mode_change_evt(struct hci_dev *hdev, void *data, static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb) struct sk_buff *skb)
{ {
@ -7531,9 +7387,6 @@ static const struct hci_ev {
/* [0x3e = HCI_EV_LE_META] */ /* [0x3e = HCI_EV_LE_META] */
HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt, HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE), sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
/* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
sizeof(struct hci_ev_num_comp_blocks)),
/* [0xff = HCI_EV_VENDOR] */ /* [0xff = HCI_EV_VENDOR] */
HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE), HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
}; };

View File

@ -485,7 +485,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
return NULL; return NULL;
ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE); ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
ni->type = hdev->dev_type; ni->type = 0x00; /* Old hdev->dev_type */
ni->bus = hdev->bus; ni->bus = hdev->bus;
bacpy(&ni->bdaddr, &hdev->bdaddr); bacpy(&ni->bdaddr, &hdev->bdaddr);
memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name, memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
@ -1007,9 +1007,6 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (hdev->dev_type != HCI_PRIMARY)
return -EOPNOTSUPP;
switch (cmd) { switch (cmd) {
case HCISETRAW: case HCISETRAW:
if (!capable(CAP_NET_ADMIN)) if (!capable(CAP_NET_ADMIN))

View File

@ -3440,10 +3440,6 @@ static int hci_unconf_init_sync(struct hci_dev *hdev)
/* Read Local Supported Features. */ /* Read Local Supported Features. */
static int hci_read_local_features_sync(struct hci_dev *hdev) static int hci_read_local_features_sync(struct hci_dev *hdev)
{ {
/* Not all AMP controllers support this command */
if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20))
return 0;
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES, return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
0, NULL, HCI_CMD_TIMEOUT); 0, NULL, HCI_CMD_TIMEOUT);
} }
@ -3478,51 +3474,6 @@ static int hci_read_local_cmds_sync(struct hci_dev *hdev)
return 0; return 0;
} }
/* Read Local AMP Info */
static int hci_read_local_amp_info_sync(struct hci_dev *hdev)
{
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO,
0, NULL, HCI_CMD_TIMEOUT);
}
/* Read Data Blk size */
static int hci_read_data_block_size_sync(struct hci_dev *hdev)
{
return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE,
0, NULL, HCI_CMD_TIMEOUT);
}
/* Read Flow Control Mode */
static int hci_read_flow_control_mode_sync(struct hci_dev *hdev)
{
return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE,
0, NULL, HCI_CMD_TIMEOUT);
}
/* Read Location Data */
static int hci_read_location_data_sync(struct hci_dev *hdev)
{
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA,
0, NULL, HCI_CMD_TIMEOUT);
}
/* AMP Controller init stage 1 command sequence */
static const struct hci_init_stage amp_init1[] = {
/* HCI_OP_READ_LOCAL_VERSION */
HCI_INIT(hci_read_local_version_sync),
/* HCI_OP_READ_LOCAL_COMMANDS */
HCI_INIT(hci_read_local_cmds_sync),
/* HCI_OP_READ_LOCAL_AMP_INFO */
HCI_INIT(hci_read_local_amp_info_sync),
/* HCI_OP_READ_DATA_BLOCK_SIZE */
HCI_INIT(hci_read_data_block_size_sync),
/* HCI_OP_READ_FLOW_CONTROL_MODE */
HCI_INIT(hci_read_flow_control_mode_sync),
/* HCI_OP_READ_LOCATION_DATA */
HCI_INIT(hci_read_location_data_sync),
{}
};
static int hci_init1_sync(struct hci_dev *hdev) static int hci_init1_sync(struct hci_dev *hdev)
{ {
int err; int err;
@ -3536,28 +3487,9 @@ static int hci_init1_sync(struct hci_dev *hdev)
return err; return err;
} }
switch (hdev->dev_type) { return hci_init_stage_sync(hdev, br_init1);
case HCI_PRIMARY:
hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
return hci_init_stage_sync(hdev, br_init1);
case HCI_AMP:
hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
return hci_init_stage_sync(hdev, amp_init1);
default:
bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
break;
}
return 0;
} }
/* AMP Controller init stage 2 command sequence */
static const struct hci_init_stage amp_init2[] = {
/* HCI_OP_READ_LOCAL_FEATURES */
HCI_INIT(hci_read_local_features_sync),
{}
};
/* Read Buffer Size (ACL mtu, max pkt, etc.) */ /* Read Buffer Size (ACL mtu, max pkt, etc.) */
static int hci_read_buffer_size_sync(struct hci_dev *hdev) static int hci_read_buffer_size_sync(struct hci_dev *hdev)
{ {
@ -3815,9 +3747,6 @@ static int hci_init2_sync(struct hci_dev *hdev)
bt_dev_dbg(hdev, ""); bt_dev_dbg(hdev, "");
if (hdev->dev_type == HCI_AMP)
return hci_init_stage_sync(hdev, amp_init2);
err = hci_init_stage_sync(hdev, hci_init2); err = hci_init_stage_sync(hdev, hci_init2);
if (err) if (err)
return err; return err;
@ -4655,13 +4584,6 @@ static int hci_init_sync(struct hci_dev *hdev)
if (err < 0) if (err < 0)
return err; return err;
/* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
* BR/EDR/LE type controllers. AMP controllers only need the
* first two stages of init.
*/
if (hdev->dev_type != HCI_PRIMARY)
return 0;
err = hci_init3_sync(hdev); err = hci_init3_sync(hdev);
if (err < 0) if (err < 0)
return err; return err;
@ -4890,12 +4812,8 @@ int hci_dev_open_sync(struct hci_dev *hdev)
* In case of user channel usage, it is not important * In case of user channel usage, it is not important
* if a public address or static random address is * if a public address or static random address is
* available. * available.
*
* This check is only valid for BR/EDR controllers
* since AMP controllers do not have an address.
*/ */
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hdev->dev_type == HCI_PRIMARY &&
!bacmp(&hdev->bdaddr, BDADDR_ANY) && !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
!bacmp(&hdev->static_addr, BDADDR_ANY)) { !bacmp(&hdev->static_addr, BDADDR_ANY)) {
ret = -EADDRNOTAVAIL; ret = -EADDRNOTAVAIL;
@ -4930,8 +4848,7 @@ int hci_dev_open_sync(struct hci_dev *hdev)
!hci_dev_test_flag(hdev, HCI_CONFIG) && !hci_dev_test_flag(hdev, HCI_CONFIG) &&
!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hci_dev_test_flag(hdev, HCI_MGMT) && hci_dev_test_flag(hdev, HCI_MGMT)) {
hdev->dev_type == HCI_PRIMARY) {
ret = hci_powered_update_sync(hdev); ret = hci_powered_update_sync(hdev);
mgmt_power_on(hdev, ret); mgmt_power_on(hdev, ret);
} }
@ -5077,8 +4994,7 @@ int hci_dev_close_sync(struct hci_dev *hdev)
auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
if (!auto_off && hdev->dev_type == HCI_PRIMARY && if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hci_dev_test_flag(hdev, HCI_MGMT)) hci_dev_test_flag(hdev, HCI_MGMT))
__mgmt_power_off(hdev); __mgmt_power_off(hdev);
@ -5140,9 +5056,6 @@ int hci_dev_close_sync(struct hci_dev *hdev)
hdev->flags &= BIT(HCI_RAW); hdev->flags &= BIT(HCI_RAW);
hci_dev_clear_volatile_flags(hdev); hci_dev_clear_volatile_flags(hdev);
/* Controller radio is available but is currently powered down */
hdev->amp_status = AMP_STATUS_POWERED_DOWN;
memset(hdev->eir, 0, sizeof(hdev->eir)); memset(hdev->eir, 0, sizeof(hdev->eir));
memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
bacpy(&hdev->random_addr, BDADDR_ANY); bacpy(&hdev->random_addr, BDADDR_ANY);
@ -5179,8 +5092,7 @@ static int hci_power_on_sync(struct hci_dev *hdev)
*/ */
if (hci_dev_test_flag(hdev, HCI_RFKILLED) || if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
(hdev->dev_type == HCI_PRIMARY && (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
!bacmp(&hdev->static_addr, BDADDR_ANY))) { !bacmp(&hdev->static_addr, BDADDR_ANY))) {
hci_dev_clear_flag(hdev, HCI_AUTO_OFF); hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
hci_dev_close_sync(hdev); hci_dev_close_sync(hdev);
@ -5283,27 +5195,11 @@ int hci_stop_discovery_sync(struct hci_dev *hdev)
return 0; return 0;
} }
static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle,
u8 reason)
{
struct hci_cp_disconn_phy_link cp;
memset(&cp, 0, sizeof(cp));
cp.phy_handle = HCI_PHY_HANDLE(handle);
cp.reason = reason;
return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
}
static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
u8 reason) u8 reason)
{ {
struct hci_cp_disconnect cp; struct hci_cp_disconnect cp;
if (conn->type == AMP_LINK)
return hci_disconnect_phy_link_sync(hdev, conn->handle, reason);
if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) { if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) {
/* This is a BIS connection, hci_conn_del will /* This is a BIS connection, hci_conn_del will
* do the necessary cleanup. * do the necessary cleanup.

View File

@ -3930,7 +3930,7 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn,
} }
static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
u8 *data, u8 rsp_code, u8 amp_id) u8 *data, u8 rsp_code)
{ {
struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
struct l2cap_conn_rsp rsp; struct l2cap_conn_rsp rsp;
@ -4009,17 +4009,8 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
status = L2CAP_CS_AUTHOR_PEND; status = L2CAP_CS_AUTHOR_PEND;
chan->ops->defer(chan); chan->ops->defer(chan);
} else { } else {
/* Force pending result for AMP controllers. l2cap_state_change(chan, BT_CONNECT2);
* The connection will succeed after the result = L2CAP_CR_PEND;
* physical link is up.
*/
if (amp_id == AMP_ID_BREDR) {
l2cap_state_change(chan, BT_CONFIG);
result = L2CAP_CR_SUCCESS;
} else {
l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
}
status = L2CAP_CS_NO_INFO; status = L2CAP_CS_NO_INFO;
} }
} else { } else {
@ -4084,7 +4075,7 @@ static int l2cap_connect_req(struct l2cap_conn *conn,
mgmt_device_connected(hdev, hcon, NULL, 0); mgmt_device_connected(hdev, hcon, NULL, 0);
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0); l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
return 0; return 0;
} }
@ -7497,10 +7488,6 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
struct l2cap_conn *conn = hcon->l2cap_data; struct l2cap_conn *conn = hcon->l2cap_data;
int len; int len;
/* For AMP controller do not create l2cap conn */
if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
goto drop;
if (!conn) if (!conn)
conn = l2cap_conn_add(hcon); conn = l2cap_conn_add(hcon);

View File

@ -443,8 +443,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
count = 0; count = 0;
list_for_each_entry(d, &hci_dev_list, list) { list_for_each_entry(d, &hci_dev_list, list) {
if (d->dev_type == HCI_PRIMARY && if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
!hci_dev_test_flag(d, HCI_UNCONFIGURED))
count++; count++;
} }
@ -468,8 +467,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
continue; continue;
if (d->dev_type == HCI_PRIMARY && if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
rp->index[count++] = cpu_to_le16(d->id); rp->index[count++] = cpu_to_le16(d->id);
bt_dev_dbg(hdev, "Added hci%u", d->id); bt_dev_dbg(hdev, "Added hci%u", d->id);
} }
@ -503,8 +501,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
count = 0; count = 0;
list_for_each_entry(d, &hci_dev_list, list) { list_for_each_entry(d, &hci_dev_list, list) {
if (d->dev_type == HCI_PRIMARY && if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
hci_dev_test_flag(d, HCI_UNCONFIGURED))
count++; count++;
} }
@ -528,8 +525,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
continue; continue;
if (d->dev_type == HCI_PRIMARY && if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
rp->index[count++] = cpu_to_le16(d->id); rp->index[count++] = cpu_to_le16(d->id);
bt_dev_dbg(hdev, "Added hci%u", d->id); bt_dev_dbg(hdev, "Added hci%u", d->id);
} }
@ -561,10 +557,8 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
read_lock(&hci_dev_list_lock); read_lock(&hci_dev_list_lock);
count = 0; count = 0;
list_for_each_entry(d, &hci_dev_list, list) { list_for_each_entry(d, &hci_dev_list, list)
if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP) count++;
count++;
}
rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC); rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
if (!rp) { if (!rp) {
@ -585,16 +579,10 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
continue; continue;
if (d->dev_type == HCI_PRIMARY) { if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) rp->entry[count].type = 0x01;
rp->entry[count].type = 0x01; else
else rp->entry[count].type = 0x00;
rp->entry[count].type = 0x00;
} else if (d->dev_type == HCI_AMP) {
rp->entry[count].type = 0x02;
} else {
continue;
}
rp->entry[count].bus = d->bus; rp->entry[count].bus = d->bus;
rp->entry[count++].index = cpu_to_le16(d->id); rp->entry[count++].index = cpu_to_le16(d->id);
@ -9324,23 +9312,14 @@ void mgmt_index_added(struct hci_dev *hdev)
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
return; return;
switch (hdev->dev_type) { if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
case HCI_PRIMARY: mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { HCI_MGMT_UNCONF_INDEX_EVENTS);
mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, ev.type = 0x01;
NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS); } else {
ev.type = 0x01; mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
} else { HCI_MGMT_INDEX_EVENTS);
mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, ev.type = 0x00;
HCI_MGMT_INDEX_EVENTS);
ev.type = 0x00;
}
break;
case HCI_AMP:
ev.type = 0x02;
break;
default:
return;
} }
ev.bus = hdev->bus; ev.bus = hdev->bus;
@ -9357,25 +9336,16 @@ void mgmt_index_removed(struct hci_dev *hdev)
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
return; return;
switch (hdev->dev_type) { mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
case HCI_PRIMARY:
mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS); HCI_MGMT_UNCONF_INDEX_EVENTS);
ev.type = 0x01; ev.type = 0x01;
} else { } else {
mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
HCI_MGMT_INDEX_EVENTS); HCI_MGMT_INDEX_EVENTS);
ev.type = 0x00; ev.type = 0x00;
}
break;
case HCI_AMP:
ev.type = 0x02;
break;
default:
return;
} }
ev.bus = hdev->bus; ev.bus = hdev->bus;

View File

@ -3995,6 +3995,9 @@ int tcp_connect(struct sock *sk)
*/ */
WRITE_ONCE(tp->snd_nxt, tp->write_seq); WRITE_ONCE(tp->snd_nxt, tp->write_seq);
tp->pushed_seq = tp->write_seq; tp->pushed_seq = tp->write_seq;
trace_android_vh_tcp_connect(buff);
buff = tcp_send_head(sk); buff = tcp_send_head(sk);
if (unlikely(buff)) { if (unlikely(buff)) {
WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq); WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);

View File

@ -369,7 +369,7 @@ $(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers.c FORCE
quiet_cmd_exports = EXPORTS $@ quiet_cmd_exports = EXPORTS $@
cmd_exports = \ cmd_exports = \
$(NM) -p --defined-only $< \ $(NM) -p --defined-only $< \
| awk '$$2~/(T|R|D)/ && $$3!~/__cfi/ {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@ | awk '$$2~/(T|R|D|B)/ && $$3!~/__cfi/ {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@
$(obj)/exports_core_generated.h: $(obj)/core.o FORCE $(obj)/exports_core_generated.h: $(obj)/core.o FORCE
$(call if_changed,exports) $(call if_changed,exports)

View File

@ -13,7 +13,7 @@
#include <linux/export.h> #include <linux/export.h>
#define EXPORT_SYMBOL_RUST_GPL(sym) #define EXPORT_SYMBOL_RUST_GPL(sym) extern int sym; EXPORT_SYMBOL_GPL(sym)
#include "exports_core_generated.h" #include "exports_core_generated.h"
#include "exports_alloc_generated.h" #include "exports_alloc_generated.h"

View File

@ -1960,9 +1960,9 @@ static void add_versions(struct buffer *b, struct module *mod)
continue; continue;
} }
if (strlen(s->name) >= MODULE_NAME_LEN) { if (strlen(s->name) >= MODULE_NAME_LEN) {
error("too long symbol \"%s\" [%s.ko]\n", warn("too long symbol \"%s\" [%s.ko]\n",
s->name, mod->name); s->name, mod->name);
break; continue;
} }
buf_printf(b, "\t{ %#8x, \"%s\" },\n", buf_printf(b, "\t{ %#8x, \"%s\" },\n",
s->crc, s->name); s->crc, s->name);