mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-10 11:25:18 +02:00
Merge branch 'android15-6.6' into android15-6.6-lts
Catch up with the big backmerge in order to sync up with everything. Changes included in here include: *375ddbe924
ANDROID: KVM: arm64: Fix hyp module base address in pkvm_el2_mod_va() *54fe7d362f
Merge tag 'android15-6.6.46_r00' into android15-6.6 *d9de370ec0
UPSTREAM: mm: zswap: fix zswap_never_enabled() for CONFIG_ZSWAP==N *e603a53edb
BACKPORT: mm: zswap: add zswap_never_enabled() *6bdb6c7cb1
BACKPORT: mm/vmscan: drop checking if _deferred_list is empty before using TTU_SYNC *66f7137c92
BACKPORT: mm: remove folio_test_anon(folio)==false path in __folio_add_anon_rmap() *7882adc2b6
UPSTREAM: mm: use folio_add_new_anon_rmap() if folio_test_anon(folio)==false *870700e450
BACKPORT: mm: extend rmap flags arguments for folio_add_new_anon_rmap *4c68f621cd
BACKPORT: mm: remove page_add_new_anon_rmap and lru_cache_add_inactive_or_unevictable *4988260399
BACKPORT: wifi: mac80211: Use flexible array in struct ieee80211_tim_ie *cfdfb5e043
UPSTREAM: mm: convert collapse_huge_page() to use a folio *297debad09
UPSTREAM: mm: convert migrate_vma_insert_page() to use a folio *b2debfc14b
BACKPORT: mm: remove references to page_add_new_anon_rmap in comments *453679c573
UPSTREAM: mm: remove stale example from comment *10338a2e16
BACKPORT: mm: convert unuse_pte() to use a folio throughout *38de31bd03
BACKPORT: mm: remove PageAnonExclusive assertions in unuse_pte() *fe52579d8c
UPSTREAM: mm: convert ksm_might_need_to_copy() to work on folios *7f4f768e38
ANDROID: GKI: update symbol list for honor *b4a27402dc
ANDROID: GKI: modify vendor hook name for Honor ogki *561b551d17
ANDROID: KVM: arm64: Remove unnecessary stage2_unmap_clear_pte from stage2_make_pte *459097f391
ANDROID: KVM: arm64: Disallow kvm_pgtable_stage2_unmap on host S2 *34fc61f8ef
FROMGIT: BACKPORT: KVM: arm64: Don't pass a TLBI level hint when zapping table entries *517b52f760
FROMGIT: BACKPORT: KVM: arm64: Don't defer TLB invalidation when zapping table entries *dbaa197dec
FROMGIT: KVM: arm64: Ensure TLBI uses correct VMID after changing context *c7e69c2cf0
FROMGIT: KVM: arm64: Invalidate EL1&0 TLB entries for all VMIDs in nvhe hyp init *3f4c0346d3
ANDROID: drivers/arm-smmu-v3-kvm: Add SMMUs to domain for live devices *341b6a0a7d
ANDROID: drivers/arm-smmu-v3-kvm: Invalidate ASID/VMID on detach *396901e4b7
ANDROID: drivers/arm-smmu-v3-kvm: Ensure domain_id is in range *b7d3ae5e08
ANDROID: drivers/arm-smmu-v3-kvm: Fix wrong type cast in capable *98fd837574
ANDROID: GKI: Add symbol to symbol list for imx *08b53b0b1a
ANDROID: Add CtsCameraTestCases to the kernel-presubmit group *b42ed94769
FROMLIST: binder: fix UAF caused by offsets overwrite *cfd01b503f
BACKPORT: binder: remove redundant variable page_addr *8da6283ef3
ANDROID: GKI: Update symbols to symbol list for honor *d21d656201
ANDROID: signal: Add vendor hook for memory reap *a94ba5ab28
BACKPORT: timekeeping: Use READ/WRITE_ONCE() for tick_do_timer_cpu *4b38cee3d2
ANDROID: GKI: Update symbols to symbol list *65ebb00fe7
ANDROID: vendor_hooks: add hook to perform targeted memory management *800cd29117
ANDROID: ABI: update symbol list for honor *0517467c26
ANDROID: vendor_hooks: add hook to optimize the madvise processing flow *d974a8b61d
ANDROID: GKI: Update symbol list for mtk *e7c05152aa
BACKPORT: thermal: gov_power_allocator: Avoid overwriting PID coefficients from setup time *bea8a4babb
UPSTREAM: scsi: ufs: Uninitialized variable in ufshcd_devfreq_target() *1896bfd941
UPSTREAM: pmdomain: arm: Fix NULL dereference on scmi_perf_domain removal *9b2996c782
UPSTREAM: mm/userfaultfd: UFFDIO_MOVE implementation should use ptep_get() *82094b2e32
UPSTREAM: ceph: always set initial i_blkbits to CEPH_FSCRYPT_BLOCK_SHIFT *837cb4d209
UPSTREAM: usb: gadget: uvc: Remove nested locking *d897e12e91
UPSTREAM: pmdomain: arm: Avoid polling for scmi_perf_domain *6dcad5bb22
FROMLIST: scsi: ufs: core: Fix the code for entering hibernation *4c0eff913d
FROMLIST: scsi: ufs: core: Make ufshcd_uic_cmd_compl() easier to read *b7459d5905
FROMGIT: rcu/nocb: Fix RT throttling hrtimer armed from offline CPU *3921b71388
ANDROID: GKI: update symbol list for honor *eba98a6e2a
ANDROID: GKI: Add vendor hook define for Honor ogki *d9ce6b0b1b
ANDROID: Add sound/usb/card.h to aarch64 allowlist *bdd9aa1db4
ANDROID: Add aoc-usb headers to aarch64 allowlist *6c3bccd3d8
ANDROID: GKI: Update symbol list for vivo *3cfe1de3c6
FROMGIT: tcp: Update window clamping condition *c8968f29f4
ANDROID: vendor_hooks: Fix incorrect declaration of restricted hook *1933a7b8ab
ANDROID: GKI: Update symbols to symbol list *0d9cb95184
ANDROID: GKI: Add hooks for socket management. *dbfe4cdfa6
FROMGIT: workqueue: add cmdline parameter workqueue.panic_on_stall *24fc5d66a1
ANDROID: abi_gki_aarch64_vivo: Update symbol list *2041959e88
ANDROID: vendor_hooks: add hook in create_worker() *aeaa3ba902
ANDROID: KVM: Fix fast-forward size in pkvm_mem_abort_range() Change-Id: I5b892e833ebe82502dcf6b731d2207383566cb63 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
1bd56c7d11
22
BUILD.bazel
22
BUILD.bazel
|
@ -876,6 +876,24 @@ ddk_headers(
|
|||
# Implementation details for DDK headers. The targets below cannot be directly
|
||||
# depended on by DDK modules.
|
||||
|
||||
# Headers needed to include drivers/usb/host/xhci.h.
|
||||
ddk_headers(
|
||||
name = "xhci_headers",
|
||||
hdrs = [
|
||||
"drivers/usb/core/hub.h",
|
||||
"drivers/usb/core/usb.h",
|
||||
"drivers/usb/host/pci-quirks.h",
|
||||
"drivers/usb/host/xhci.h",
|
||||
"drivers/usb/host/xhci-ext-caps.h",
|
||||
"drivers/usb/host/xhci-plat.h",
|
||||
],
|
||||
linux_includes = [
|
||||
"drivers/usb",
|
||||
"drivers/usb/host",
|
||||
],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
# DDK headers allowlist. This is the list of all headers and include
|
||||
# directories that are safe to use in DDK modules.
|
||||
ddk_headers(
|
||||
|
@ -883,8 +901,11 @@ ddk_headers(
|
|||
hdrs = [
|
||||
"drivers/thermal/thermal_core.h",
|
||||
"drivers/thermal/thermal_netlink.h",
|
||||
"sound/usb/card.h",
|
||||
"sound/usb/usbaudio.h",
|
||||
":all_headers_allowlist_aarch64_globs",
|
||||
":all_headers_allowlist_common_globs",
|
||||
":xhci_headers",
|
||||
],
|
||||
# The list of include directories where source files can #include headers
|
||||
# from. In other words, these are the `-I` option to the C compiler.
|
||||
|
@ -893,6 +914,7 @@ ddk_headers(
|
|||
"arch/arm64/include",
|
||||
"arch/arm64/include/uapi",
|
||||
"drivers/thermal",
|
||||
"sound/usb",
|
||||
"include",
|
||||
"include/uapi",
|
||||
],
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -56,6 +56,8 @@
|
|||
tty_set_ldisc
|
||||
tty_kclose
|
||||
__kfifo_len_r
|
||||
__traceiter_android_vh_killed_process
|
||||
__tracepoint_android_vh_killed_process
|
||||
__traceiter_android_vh_rwsem_write_wait_finish
|
||||
__tracepoint_android_vh_rwsem_write_wait_finish
|
||||
__tracepoint_android_rvh_cpuinfo_c_show
|
||||
|
@ -64,8 +66,18 @@
|
|||
__traceiter_android_vh_dc_send_copy
|
||||
__tracepoint_android_vh_dc_receive
|
||||
__traceiter_android_vh_dc_receive
|
||||
__traceiter_android_vh_inet_create
|
||||
__tracepoint_android_vh_inet_create
|
||||
__traceiter_android_vh_uplink_send_msg
|
||||
__tracepoint_android_vh_uplink_send_msg
|
||||
__traceiter_android_vh_sock_create
|
||||
__tracepoint_android_vh_sock_create
|
||||
__traceiter_android_vh_modify_scan_control
|
||||
__traceiter_android_vh_should_continue_reclaim
|
||||
__tracepoint_android_vh_process_madvise_begin
|
||||
__traceiter_android_vh_process_madvise_begin
|
||||
__tracepoint_android_vh_process_madvise_iter
|
||||
__traceiter_android_vh_process_madvise_iter
|
||||
__traceiter_android_vh_file_is_tiny_bypass
|
||||
__tracepoint_android_vh_modify_scan_control
|
||||
__tracepoint_android_vh_should_continue_reclaim
|
||||
|
@ -80,6 +92,8 @@
|
|||
__traceiter_android_vh_slab_alloc_node
|
||||
__tracepoint_android_vh_slab_free
|
||||
__traceiter_android_vh_slab_free
|
||||
__traceiter_android_vh_should_fault_around
|
||||
__tracepoint_android_vh_should_fault_around
|
||||
__traceiter_android_vh_tcp_connect
|
||||
__tracepoint_android_vh_tcp_connect
|
||||
__traceiter_android_vh_tcp_write_timeout_estab_retrans
|
||||
|
@ -124,3 +138,71 @@
|
|||
__tracepoint_android_vh_sk_free
|
||||
__traceiter_android_vh_sk_clone_lock
|
||||
__tracepoint_android_vh_sk_clone_lock
|
||||
__traceiter_android_rvh_ogki_vfree_bypass
|
||||
__traceiter_android_rvh_ogki_vmalloc_node_bypass
|
||||
__traceiter_android_vh_ogki_async_psi_bypass
|
||||
__traceiter_android_vh_ogki_f2fs_dsm
|
||||
__traceiter_android_vh_ogki_f2fs_dsm_get
|
||||
__traceiter_android_vh_ogki_check_vip_status
|
||||
__traceiter_android_vh_ogki_cma_alloc_retry
|
||||
__traceiter_android_vh_ogki_ufs_dsm
|
||||
__tracepoint_android_rvh_ogki_vfree_bypass
|
||||
__tracepoint_android_rvh_ogki_vmalloc_node_bypass
|
||||
__tracepoint_android_vh_ogki_async_psi_bypass
|
||||
__tracepoint_android_vh_ogki_f2fs_dsm
|
||||
__tracepoint_android_vh_ogki_f2fs_dsm_get
|
||||
__tracepoint_android_vh_ogki_check_vip_status
|
||||
__tracepoint_android_vh_ogki_cma_alloc_retry
|
||||
__tracepoint_android_vh_ogki_ufs_dsm
|
||||
__traceiter_android_vh_ogki_tcp_srtt_estimator
|
||||
__tracepoint_android_vh_ogki_tcp_srtt_estimator
|
||||
__traceiter_android_vh_ogki_tcp_rcv_estab_fastpath
|
||||
__tracepoint_android_vh_ogki_tcp_rcv_estab_fastpath
|
||||
__traceiter_android_vh_ogki_tcp_rcv_estab_slowpath
|
||||
__tracepoint_android_vh_ogki_tcp_rcv_estab_slowpath
|
||||
__traceiter_android_vh_ogki_tcp_rcv_rtt_update
|
||||
__tracepoint_android_vh_ogki_tcp_rcv_rtt_update
|
||||
__traceiter_android_vh_ogki_tcp_retransmit_timer
|
||||
__tracepoint_android_vh_ogki_tcp_retransmit_timer
|
||||
__traceiter_android_vh_ogki_udp_unicast_rcv_skb
|
||||
__tracepoint_android_vh_ogki_udp_unicast_rcv_skb
|
||||
__traceiter_android_vh_ogki_udp6_unicast_rcv_skb
|
||||
__tracepoint_android_vh_ogki_udp6_unicast_rcv_skb
|
||||
__tracepoint_android_rvh_ogki_task_util
|
||||
__traceiter_android_rvh_ogki_task_util
|
||||
__tracepoint_android_rvh_ogki_uclamp_task_util
|
||||
__traceiter_android_rvh_ogki_uclamp_task_util
|
||||
__tracepoint_android_rvh_ogki_get_task_tags
|
||||
__traceiter_android_rvh_ogki_get_task_tags
|
||||
__tracepoint_android_rvh_ogki_get_task_rsum
|
||||
__traceiter_android_rvh_ogki_get_task_rsum
|
||||
__tracepoint_android_rvh_ogki_check_task_tags
|
||||
__traceiter_android_rvh_ogki_check_task_tags
|
||||
__tracepoint_android_vh_ogki_audit_log_cfi
|
||||
__tracepoint_android_rvh_ogki_audit_log_usercopy
|
||||
__tracepoint_android_rvh_ogki_audit_log_module_sign
|
||||
__tracepoint_android_vh_ogki_audit_log_setid
|
||||
__traceiter_android_vh_ogki_audit_log_cfi
|
||||
__traceiter_android_rvh_ogki_audit_log_usercopy
|
||||
__traceiter_android_rvh_ogki_audit_log_module_sign
|
||||
__traceiter_android_vh_ogki_audit_log_setid
|
||||
__traceiter_android_vh_ogki_get_log_usertype
|
||||
__tracepoint_android_vh_ogki_get_log_usertype
|
||||
__traceiter_android_rvh_ogki_hievent_create
|
||||
__tracepoint_android_rvh_ogki_hievent_create
|
||||
__traceiter_android_rvh_ogki_hievent_put_string
|
||||
__tracepoint_android_rvh_ogki_hievent_put_string
|
||||
__traceiter_android_rvh_ogki_hievent_put_integral
|
||||
__tracepoint_android_rvh_ogki_hievent_put_integral
|
||||
__traceiter_android_rvh_ogki_hievent_report
|
||||
__tracepoint_android_rvh_ogki_hievent_report
|
||||
__traceiter_android_rvh_ogki_hievent_destroy
|
||||
__tracepoint_android_rvh_ogki_hievent_destroy
|
||||
__traceiter_android_vh_ogki_hievent_to_jank
|
||||
__tracepoint_android_vh_ogki_hievent_to_jank
|
||||
__tracepoint_android_vh_ogki_set_wifi_state_connect
|
||||
__traceiter_android_vh_ogki_set_wifi_state_connect
|
||||
__tracepoint_android_vh_ogki_set_wifi_state_disconnect
|
||||
__traceiter_android_vh_ogki_set_wifi_state_disconnect
|
||||
__traceiter_android_vh_ogki_kmem_cache_create_usercopy
|
||||
__tracepoint_android_vh_ogki_kmem_cache_create_usercopy
|
||||
|
|
|
@ -267,14 +267,17 @@
|
|||
debugfs_remove
|
||||
debugfs_rename
|
||||
default_llseek
|
||||
default_qdisc_ops
|
||||
default_wake_function
|
||||
delayed_work_timer_fn
|
||||
destroy_workqueue
|
||||
dev_activate
|
||||
dev_addr_mod
|
||||
dev_alloc_name
|
||||
__dev_change_net_namespace
|
||||
dev_close
|
||||
_dev_crit
|
||||
dev_deactivate
|
||||
dev_driver_string
|
||||
_dev_err
|
||||
dev_err_probe
|
||||
|
@ -291,7 +294,9 @@
|
|||
__dev_fwnode_const
|
||||
__dev_get_by_index
|
||||
dev_get_by_index
|
||||
__dev_get_by_name
|
||||
dev_get_regmap
|
||||
dev_graft_qdisc
|
||||
device_add
|
||||
device_create
|
||||
device_create_file
|
||||
|
@ -875,6 +880,8 @@
|
|||
ethtool_aggregate_rmon_stats
|
||||
ethtool_convert_legacy_u32_to_link_mode
|
||||
ethtool_convert_link_mode_to_legacy_u32
|
||||
ethtool_dev_mm_supported
|
||||
__ethtool_get_link_ksettings
|
||||
ethtool_op_get_link
|
||||
ethtool_op_get_ts_info
|
||||
eth_type_trans
|
||||
|
@ -895,6 +902,7 @@
|
|||
firmware_kobj
|
||||
fixed_size_llseek
|
||||
flow_block_cb_setup_simple
|
||||
flow_rule_alloc
|
||||
flow_rule_match_basic
|
||||
flow_rule_match_eth_addrs
|
||||
flow_rule_match_ipv4_addrs
|
||||
|
@ -982,6 +990,11 @@
|
|||
get_random_u8
|
||||
get_unused_fd_flags
|
||||
get_user_pages
|
||||
gnet_stats_add_basic
|
||||
gnet_stats_add_queue
|
||||
gnet_stats_basic_sync_init
|
||||
gnet_stats_copy_basic
|
||||
gnet_stats_copy_queue
|
||||
gpiochip_add_data_with_key
|
||||
gpiochip_disable_irq
|
||||
gpiochip_enable_irq
|
||||
|
@ -1082,12 +1095,15 @@
|
|||
ida_destroy
|
||||
ida_free
|
||||
idr_alloc
|
||||
idr_alloc_u32
|
||||
idr_destroy
|
||||
idr_find
|
||||
idr_for_each
|
||||
idr_get_next
|
||||
idr_get_next_ul
|
||||
idr_preload
|
||||
idr_remove
|
||||
idr_replace
|
||||
ignore_console_lock_warning
|
||||
iio_alloc_pollfunc
|
||||
iio_buffer_enabled
|
||||
|
@ -1206,6 +1222,7 @@
|
|||
iw_handler_set_spy
|
||||
iw_handler_set_thrspy
|
||||
jiffies
|
||||
jiffies_to_clock_t
|
||||
jiffies_to_msecs
|
||||
jiffies_to_usecs
|
||||
kasan_flag_enabled
|
||||
|
@ -1273,6 +1290,7 @@
|
|||
ktime_get_seconds
|
||||
ktime_get_ts64
|
||||
ktime_get_with_offset
|
||||
ktime_mono_to_any
|
||||
kvfree
|
||||
kvfree_call_rcu
|
||||
kvmalloc_node
|
||||
|
@ -1400,6 +1418,7 @@
|
|||
__module_get
|
||||
module_layout
|
||||
module_put
|
||||
mq_change_real_num_tx
|
||||
__msecs_to_jiffies
|
||||
msi_domain_get_virq
|
||||
msleep
|
||||
|
@ -1443,10 +1462,12 @@
|
|||
netif_receive_skb
|
||||
netif_receive_skb_list
|
||||
netif_rx
|
||||
__netif_schedule
|
||||
netif_schedule_queue
|
||||
netif_set_real_num_rx_queues
|
||||
netif_set_real_num_tx_queues
|
||||
netif_set_tso_max_segs
|
||||
netif_skb_features
|
||||
netif_tx_lock
|
||||
netif_tx_stop_all_queues
|
||||
netif_tx_unlock
|
||||
|
@ -1468,9 +1489,11 @@
|
|||
nla_put
|
||||
nla_put_64bit
|
||||
nla_reserve
|
||||
nla_strscpy
|
||||
__nla_validate
|
||||
nonseekable_open
|
||||
noop_llseek
|
||||
noop_qdisc
|
||||
nr_cpu_ids
|
||||
nsecs_to_jiffies
|
||||
ns_to_timespec64
|
||||
|
@ -1674,6 +1697,8 @@
|
|||
perf_pmu_unregister
|
||||
perf_trace_buf_alloc
|
||||
perf_trace_run_bpf_submit
|
||||
pfifo_fast_ops
|
||||
pfifo_qdisc_ops
|
||||
pfn_is_map_memory
|
||||
phy_attached_info
|
||||
phy_basic_t1_features
|
||||
|
@ -1882,9 +1907,21 @@
|
|||
pwm_apply_state
|
||||
pwmchip_add
|
||||
pwmchip_remove
|
||||
__qdisc_calculate_pkt_len
|
||||
qdisc_create_dflt
|
||||
qdisc_hash_add
|
||||
qdisc_offload_query_caps
|
||||
qdisc_put
|
||||
qdisc_reset
|
||||
qdisc_tree_reduce_backlog
|
||||
qdisc_watchdog_cancel
|
||||
qdisc_watchdog_init
|
||||
qdisc_watchdog_init_clockid
|
||||
qdisc_watchdog_schedule_range_ns
|
||||
queue_delayed_work_on
|
||||
queue_work_on
|
||||
radix_tree_insert
|
||||
radix_tree_tagged
|
||||
___ratelimit
|
||||
rational_best_approximation
|
||||
raw_notifier_call_chain
|
||||
|
@ -1933,11 +1970,13 @@
|
|||
register_oom_notifier
|
||||
register_pernet_device
|
||||
register_pm_notifier
|
||||
register_qdisc
|
||||
register_reboot_notifier
|
||||
register_restart_handler
|
||||
__register_rpmsg_driver
|
||||
register_shrinker
|
||||
register_syscore_ops
|
||||
register_tcf_proto_ops
|
||||
register_virtio_device
|
||||
register_virtio_driver
|
||||
regmap_add_irq_chip
|
||||
|
@ -2016,7 +2055,9 @@
|
|||
rfkill_resume_polling
|
||||
rfkill_set_hw_state_reason
|
||||
rfkill_unregister
|
||||
rhashtable_destroy
|
||||
rhashtable_free_and_destroy
|
||||
rhashtable_init
|
||||
rhashtable_insert_slow
|
||||
rhltable_init
|
||||
__rht_bucket_nested
|
||||
|
@ -2063,6 +2104,7 @@
|
|||
rt_mutex_trylock
|
||||
rt_mutex_unlock
|
||||
rtnl_is_locked
|
||||
rtnl_kfree_skbs
|
||||
rtnl_lock
|
||||
rtnl_unlock
|
||||
sched_clock
|
||||
|
@ -2155,6 +2197,12 @@
|
|||
skb_csum_hwoffload_help
|
||||
skb_dequeue
|
||||
skb_ensure_writable
|
||||
__skb_flow_dissect
|
||||
skb_flow_dissect_ct
|
||||
skb_flow_dissect_hash
|
||||
skb_flow_dissect_meta
|
||||
skb_flow_dissector_init
|
||||
skb_flow_dissect_tunnel_info
|
||||
__skb_get_hash
|
||||
__skb_gso_segment
|
||||
skb_pull
|
||||
|
@ -2299,6 +2347,7 @@
|
|||
soc_device_match
|
||||
soc_device_register
|
||||
__sock_create
|
||||
sock_queue_err_skb
|
||||
sock_release
|
||||
sort
|
||||
__spi_alloc_controller
|
||||
|
@ -2387,6 +2436,35 @@
|
|||
__tasklet_schedule
|
||||
tasklet_setup
|
||||
tasklet_unlock_wait
|
||||
tc_cleanup_offload_action
|
||||
tcf_action_check_ctrlact
|
||||
tcf_action_exec
|
||||
tcf_action_set_ctrlact
|
||||
tcf_action_update_hw_stats
|
||||
tcf_action_update_stats
|
||||
tcf_chain_put_by_act
|
||||
tcf_exts_destroy
|
||||
tcf_exts_dump
|
||||
tcf_exts_dump_stats
|
||||
tcf_exts_init_ex
|
||||
tcf_exts_num_actions
|
||||
tcf_exts_terse_dump
|
||||
tcf_exts_validate_ex
|
||||
tcf_idr_check_alloc
|
||||
tcf_idr_cleanup
|
||||
tcf_idr_create_from_flags
|
||||
tcf_idrinfo_destroy
|
||||
tcf_idr_release
|
||||
tcf_queue_work
|
||||
tcf_register_action
|
||||
tcf_unregister_action
|
||||
tc_setup_cb_add
|
||||
tc_setup_cb_call
|
||||
tc_setup_cb_destroy
|
||||
tc_setup_cb_reoffload
|
||||
tc_setup_offload_action
|
||||
tc_skb_ext_tc_disable
|
||||
tc_skb_ext_tc_enable
|
||||
tegra_mc_probe_device
|
||||
tegra_sku_info
|
||||
thermal_cooling_device_unregister
|
||||
|
@ -2471,9 +2549,11 @@
|
|||
unregister_netdevice_queue
|
||||
unregister_oom_notifier
|
||||
unregister_pernet_device
|
||||
unregister_qdisc
|
||||
unregister_reboot_notifier
|
||||
unregister_rpmsg_driver
|
||||
unregister_shrinker
|
||||
unregister_tcf_proto_ops
|
||||
unregister_virtio_device
|
||||
unregister_virtio_driver
|
||||
up
|
||||
|
|
|
@ -2029,6 +2029,7 @@
|
|||
phy_basic_t1_features
|
||||
phy_connect
|
||||
phy_connect_direct
|
||||
phy_create_lookup
|
||||
phy_disconnect
|
||||
phy_do_ioctl_running
|
||||
phy_drivers_register
|
||||
|
|
|
@ -33,6 +33,8 @@
|
|||
__kmalloc_node
|
||||
__lock_buffer
|
||||
mipi_dsi_dcs_write
|
||||
__neigh_create
|
||||
netdev_is_rx_handler_busy
|
||||
noop_qdisc
|
||||
of_css
|
||||
pfifo_qdisc_ops
|
||||
|
@ -81,6 +83,7 @@
|
|||
__traceiter_android_rvh_bpf_skb_load_bytes
|
||||
__traceiter_android_rvh_check_preempt_wakeup
|
||||
__traceiter_android_rvh_cpufreq_transition
|
||||
__traceiter_android_rvh_create_worker
|
||||
__traceiter_android_rvh_dequeue_task_fair
|
||||
__traceiter_android_rvh_enqueue_task_fair
|
||||
__traceiter_android_rvh_inet_sock_create
|
||||
|
@ -198,6 +201,7 @@
|
|||
__tracepoint_android_rvh_bpf_skb_load_bytes
|
||||
__tracepoint_android_rvh_check_preempt_wakeup
|
||||
__tracepoint_android_rvh_cpufreq_transition
|
||||
__tracepoint_android_rvh_create_worker
|
||||
__tracepoint_android_rvh_dequeue_task_fair
|
||||
__tracepoint_android_rvh_enqueue_task_fair
|
||||
__tracepoint_android_rvh_inet_sock_create
|
||||
|
|
|
@ -279,11 +279,11 @@ int pkvm_load_early_modules(void);
|
|||
*/
|
||||
#define pkvm_el2_mod_va(kern_va, token) \
|
||||
({ \
|
||||
unsigned long hyp_text_kern_va = \
|
||||
(unsigned long)THIS_MODULE->arch.hyp.text.start; \
|
||||
unsigned long hyp_mod_kern_va = \
|
||||
(unsigned long)THIS_MODULE->arch.hyp.sections.start; \
|
||||
unsigned long offset; \
|
||||
\
|
||||
offset = (unsigned long)kern_va - hyp_text_kern_va; \
|
||||
offset = (unsigned long)kern_va - hyp_mod_kern_va; \
|
||||
token + offset; \
|
||||
})
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ alternative_else_nop_endif
|
|||
|
||||
/* Invalidate the stale TLBs from Bootloader */
|
||||
tlbi alle2
|
||||
tlbi vmalls12e1
|
||||
tlbi alle1
|
||||
dsb sy
|
||||
|
||||
mov_q x0, INIT_SCTLR_EL2_MMU_ON
|
||||
|
|
|
@ -129,10 +129,10 @@ static void exit_vmid_context(struct tlb_inv_context *cxt)
|
|||
else
|
||||
__load_host_stage2();
|
||||
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
|
||||
/* Ensure write of the old VMID */
|
||||
isb();
|
||||
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
|
||||
if (!(cxt->sctlr & SCTLR_ELx_M)) {
|
||||
write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
|
||||
isb();
|
||||
|
|
|
@ -27,9 +27,6 @@ struct kvm_pgtable_walk_data {
|
|||
const u64 end;
|
||||
};
|
||||
|
||||
static void stage2_unmap_clear_pte(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
struct kvm_s2_mmu *mmu);
|
||||
|
||||
static bool kvm_pgtable_walk_skip_bbm_tlbi(const struct kvm_pgtable_visit_ctx *ctx)
|
||||
{
|
||||
return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_BBM_TLBI);
|
||||
|
@ -483,7 +480,7 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
|
||||
kvm_clear_pte(ctx->ptep);
|
||||
dsb(ishst);
|
||||
__tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
|
||||
__tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), 0);
|
||||
} else {
|
||||
if (ctx->end - ctx->addr < granule)
|
||||
return -EINVAL;
|
||||
|
@ -812,8 +809,7 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
kvm_pte_t new, struct kvm_s2_mmu *mmu)
|
||||
static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
|
||||
{
|
||||
struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
|
||||
struct kvm_pgtable_pte_ops *pte_ops = ctx->pte_ops;
|
||||
|
@ -822,8 +818,6 @@ static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
|
||||
if (pte_ops->pte_is_counted_cb(new, ctx->level))
|
||||
mm_ops->get_page(ctx->ptep);
|
||||
else
|
||||
stage2_unmap_clear_pte(ctx, mmu);
|
||||
|
||||
smp_store_release(ctx->ptep, new);
|
||||
}
|
||||
|
@ -848,9 +842,13 @@ static void stage2_unmap_clear_pte(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
if (kvm_pte_valid(ctx->old)) {
|
||||
kvm_clear_pte(ctx->ptep);
|
||||
|
||||
if (!stage2_unmap_defer_tlb_flush(pgt))
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
|
||||
ctx->addr, ctx->level);
|
||||
if (kvm_pte_table(ctx->old, ctx->level)) {
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
|
||||
0);
|
||||
} else if (!stage2_unmap_defer_tlb_flush(pgt)) {
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
|
||||
ctx->level);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -973,7 +971,7 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
stage2_pte_executable(new))
|
||||
mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
|
||||
|
||||
stage2_make_pte(ctx, new, data->mmu);
|
||||
stage2_make_pte(ctx, new);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1075,7 +1073,7 @@ static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
* will be mapped lazily.
|
||||
*/
|
||||
new = kvm_init_table_pte(childp, mm_ops);
|
||||
stage2_make_pte(ctx, new, data->mmu);
|
||||
stage2_make_pte(ctx, new);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1139,7 +1137,7 @@ static int stage2_coalesce_walk_table_post(const struct kvm_pgtable_visit_ctx *c
|
|||
/* Host doesn't require CMOs. */
|
||||
WARN_ON(mm_ops->dcache_clean_inval_poc || mm_ops->icache_inval_pou);
|
||||
|
||||
stage2_make_pte(ctx, new, data->mmu);
|
||||
stage2_make_pte(ctx, new);
|
||||
|
||||
/* Finally, free the unlinked table. */
|
||||
mm_ops->put_page(childp);
|
||||
|
@ -1291,6 +1289,18 @@ int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
|
|||
.flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
|
||||
};
|
||||
|
||||
/*
|
||||
* stage2_unmap_walker's TLBI logic is unsafe for the pKVM host stage-2
|
||||
* table because a child table may have a refcount of 1 while still
|
||||
* containing valid mappings. The use of __kvm_tlb_flush_vmid_ipa in
|
||||
* stage2_unmap_clear_pte is then insufficient to invalidate all leaf
|
||||
* mappings reachable from the child table. All other stage-2 tables
|
||||
* hold a reference for every non-zero PTE, and are thus guaranteed to
|
||||
* be completely empty when refcount is 1.
|
||||
*/
|
||||
if (WARN_ON(pgt->flags & KVM_PGTABLE_S2_IDMAP))
|
||||
return -EINVAL;
|
||||
|
||||
ret = kvm_pgtable_walk(pgt, addr, size, &walker);
|
||||
if (stage2_unmap_defer_tlb_flush(pgt))
|
||||
/* Perform the deferred TLB invalidations */
|
||||
|
@ -1694,7 +1704,7 @@ static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
* writes the PTE using smp_store_release().
|
||||
*/
|
||||
new = kvm_init_table_pte(childp, mm_ops);
|
||||
stage2_make_pte(ctx, new, mmu);
|
||||
stage2_make_pte(ctx, new);
|
||||
dsb(ishst);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1804,7 +1804,7 @@ int pkvm_mem_abort_range(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, size_t si
|
|||
read_lock(&vcpu->kvm->mmu_lock);
|
||||
ppage = find_ppage_or_above(vcpu->kvm, fault_ipa);
|
||||
|
||||
while (size) {
|
||||
while (fault_ipa < ipa_end) {
|
||||
if (ppage && ppage->ipa == fault_ipa) {
|
||||
page_size = PAGE_SIZE << ppage->order;
|
||||
ppage = mt_next(&vcpu->kvm->arch.pkvm.pinned_pages,
|
||||
|
@ -1832,11 +1832,10 @@ int pkvm_mem_abort_range(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, size_t si
|
|||
* We had to release the mmu_lock so let's update the
|
||||
* reference.
|
||||
*/
|
||||
ppage = find_ppage_or_above(vcpu->kvm, fault_ipa + PAGE_SIZE);
|
||||
ppage = find_ppage_or_above(vcpu->kvm, fault_ipa + page_size);
|
||||
}
|
||||
|
||||
size = size_sub(size, PAGE_SIZE);
|
||||
fault_ipa += PAGE_SIZE;
|
||||
fault_ipa += page_size;
|
||||
}
|
||||
end:
|
||||
read_unlock(&vcpu->kvm->mmu_lock);
|
||||
|
|
|
@ -255,5 +255,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -255,5 +255,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -255,5 +255,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -263,5 +263,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -255,5 +255,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -3634,6 +3634,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|||
*/
|
||||
copy_size = object_offset - user_offset;
|
||||
if (copy_size && (user_offset > object_offset ||
|
||||
object_offset > tr->data_size ||
|
||||
binder_alloc_copy_user_to_buffer(
|
||||
&target_proc->alloc,
|
||||
t->buffer, user_offset,
|
||||
|
|
|
@ -937,7 +937,6 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
|
||||
unsigned long page_addr;
|
||||
bool on_lru;
|
||||
|
||||
if (!alloc->pages[i].page_ptr)
|
||||
|
@ -945,7 +944,6 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
|||
|
||||
on_lru = list_lru_del(&binder_freelist,
|
||||
&alloc->pages[i].lru);
|
||||
page_addr = alloc->buffer + i * PAGE_SIZE;
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||
"%s: %d: page %d %s\n",
|
||||
__func__, alloc->pid, i,
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <trace/hooks/fips140.h>
|
||||
#include <trace/hooks/ufshcd.h>
|
||||
#include <trace/hooks/mmc.h>
|
||||
#include <trace/hooks/ogki_honor.h>
|
||||
#include <trace/hooks/cgroup.h>
|
||||
#include <trace/hooks/sys.h>
|
||||
#include <trace/hooks/madvise.h>
|
||||
|
@ -98,6 +99,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_udp_enqueue_schedule_skb);
|
|||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_build_skb_around);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_refrigerator);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_send_sig_info);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_killed_process);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_start);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_finish);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_init);
|
||||
|
@ -127,6 +129,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_idle_exit);
|
|||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mpam_set);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_wq_lockup_pool);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_alloc_and_link_pwqs);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_create_worker);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ipi_stop);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sysrq_crash);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_printk_hotplug);
|
||||
|
@ -236,6 +239,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_irqs_disable);
|
|||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_irqs_enable);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_is_fpsimd_save);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_slab_folio_alloced);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_process_madvise_begin);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_process_madvise_iter);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kmalloc_large_alloced);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_swappiness);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_show_task);
|
||||
|
@ -289,6 +294,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gzvm_vcpu_exit_reason);
|
|||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_mutex_list_add);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_unlock_slowpath);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_save_track_hash);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_should_fault_around);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_id_remove);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_css_offline);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_css_online);
|
||||
|
@ -410,6 +416,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swapmem_gather_finish);
|
|||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_oom_swapmem_gather_init);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_oom_swapmem_gather_finish);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_f2fs_file_open);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_uplink_send_msg);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_inet_create);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sock_create);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_uid);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_user);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_save_vmalloc_stack);
|
||||
|
@ -492,3 +501,38 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dma_heap_buffer_alloc_start);
|
|||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dma_heap_buffer_alloc_end);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_zs_shrinker_adjust);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_zs_shrinker_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_get_log_usertype);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_hievent_to_jank);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ogki_hievent_create);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ogki_hievent_put_string);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ogki_hievent_put_integral);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ogki_hievent_report);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ogki_hievent_destroy);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_check_vip_status);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ogki_task_util);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ogki_uclamp_task_util);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ogki_get_task_tags);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ogki_get_task_rsum);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ogki_check_task_tags);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_ufs_dsm);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_f2fs_dsm);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_f2fs_dsm_get);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_set_wifi_state_connect);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_set_wifi_state_disconnect);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_audit_log_setid);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_audit_log_cfi);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ogki_audit_log_usercopy);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ogki_audit_log_module_sign);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_cma_alloc_retry);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_async_psi_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ogki_vmalloc_node_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ogki_vfree_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_tcp_srtt_estimator);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_tcp_rcv_estab_fastpath);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_tcp_rcv_estab_slowpath);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_tcp_rcv_rtt_update);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_tcp_retransmit_timer);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_udp_unicast_rcv_skb);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_udp6_unicast_rcv_skb);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ogki_kmem_cache_create_usercopy);
|
||||
|
||||
|
|
|
@ -255,5 +255,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -204,5 +204,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -255,5 +255,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -228,5 +228,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -255,5 +255,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -207,6 +207,7 @@ static int kvm_arm_smmu_domain_finalize(struct kvm_arm_smmu_domain *kvm_smmu_dom
|
|||
int ret = 0;
|
||||
struct arm_smmu_device *smmu = master->smmu;
|
||||
struct host_arm_smmu_device *host_smmu = smmu_to_host(smmu);
|
||||
unsigned int max_domains;
|
||||
|
||||
if (kvm_smmu_domain->smmu) {
|
||||
if (kvm_smmu_domain->smmu != smmu)
|
||||
|
@ -225,25 +226,34 @@ static int kvm_arm_smmu_domain_finalize(struct kvm_arm_smmu_domain *kvm_smmu_dom
|
|||
return 0;
|
||||
}
|
||||
|
||||
ret = ida_alloc_range(&kvm_arm_smmu_domain_ida, KVM_IOMMU_DOMAIN_NR_START,
|
||||
KVM_IOMMU_MAX_DOMAINS, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
kvm_smmu_domain->id = ret;
|
||||
|
||||
/* Default to stage-1. */
|
||||
if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) {
|
||||
kvm_smmu_domain->type = KVM_ARM_SMMU_DOMAIN_S1;
|
||||
kvm_smmu_domain->domain.pgsize_bitmap = host_smmu->cfg_s1.pgsize_bitmap;
|
||||
kvm_smmu_domain->domain.geometry.aperture_end = (1UL << host_smmu->cfg_s1.ias) - 1;
|
||||
max_domains = 1 << smmu->asid_bits;
|
||||
} else {
|
||||
kvm_smmu_domain->type = KVM_ARM_SMMU_DOMAIN_S2;
|
||||
kvm_smmu_domain->domain.pgsize_bitmap = host_smmu->cfg_s2.pgsize_bitmap;
|
||||
kvm_smmu_domain->domain.geometry.aperture_end = (1UL << host_smmu->cfg_s2.ias) - 1;
|
||||
max_domains = 1 << smmu->vmid_bits;
|
||||
}
|
||||
kvm_smmu_domain->domain.geometry.force_aperture = true;
|
||||
|
||||
/*
|
||||
* The hypervisor uses the domain_id for asid/vmid so it has to be
|
||||
* unique, and it has to be in range of this smmu, which can be
|
||||
* either 8 or 16 bits, this can be improved a bit to make
|
||||
* 16 bit asids or vmids allocate from the end of the range to
|
||||
* give more chance to the smmus with 8 bits.
|
||||
*/
|
||||
ret = ida_alloc_range(&kvm_arm_smmu_domain_ida, KVM_IOMMU_DOMAIN_NR_START,
|
||||
min(KVM_IOMMU_MAX_DOMAINS, max_domains), GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
kvm_smmu_domain->id = ret;
|
||||
|
||||
ret = kvm_call_hyp_nvhe_mc(__pkvm_host_iommu_alloc_domain,
|
||||
kvm_smmu_domain->id, kvm_smmu_domain->type);
|
||||
|
||||
|
@ -442,8 +452,24 @@ static int kvm_arm_smmu_def_domain_type(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool kvm_arm_smmu_capable(struct device *dev, enum iommu_cap cap)
|
||||
{
|
||||
struct kvm_arm_smmu_master *master = dev_iommu_priv_get(dev);
|
||||
|
||||
switch (cap) {
|
||||
case IOMMU_CAP_CACHE_COHERENCY:
|
||||
/* Assume that a coherent TCU implies coherent TBUs */
|
||||
return master->smmu->features & ARM_SMMU_FEAT_COHERENCY;
|
||||
case IOMMU_CAP_NOEXEC:
|
||||
case IOMMU_CAP_DEFERRED_FLUSH:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static struct iommu_ops kvm_arm_smmu_ops = {
|
||||
.capable = arm_smmu_capable,
|
||||
.capable = kvm_arm_smmu_capable,
|
||||
.device_group = arm_smmu_device_group,
|
||||
.of_xlate = arm_smmu_of_xlate,
|
||||
.get_resv_regions = arm_smmu_get_resv_regions,
|
||||
|
|
|
@ -530,13 +530,11 @@ static struct hyp_arm_smmu_v3_device *to_smmu(struct kvm_hyp_iommu *iommu)
|
|||
return container_of(iommu, struct hyp_arm_smmu_v3_device, iommu);
|
||||
}
|
||||
|
||||
static void smmu_tlb_flush_all(void *cookie)
|
||||
static void smmu_inv_domain(struct hyp_arm_smmu_v3_device *smmu,
|
||||
struct hyp_arm_smmu_v3_domain *smmu_domain)
|
||||
{
|
||||
struct kvm_hyp_iommu_domain *domain = cookie;
|
||||
struct hyp_arm_smmu_v3_domain *smmu_domain = domain->priv;
|
||||
struct hyp_arm_smmu_v3_device *smmu;
|
||||
struct domain_iommu_node *iommu_node;
|
||||
struct arm_smmu_cmdq_ent cmd;
|
||||
struct kvm_hyp_iommu_domain *domain = smmu_domain->domain;
|
||||
struct arm_smmu_cmdq_ent cmd = {};
|
||||
|
||||
if (smmu_domain->pgtable->cfg.fmt == ARM_64_LPAE_S2) {
|
||||
cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
|
||||
|
@ -544,19 +542,26 @@ static void smmu_tlb_flush_all(void *cookie)
|
|||
} else {
|
||||
cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
|
||||
cmd.tlbi.asid = domain->domain_id;
|
||||
/* Domain ID is unique across all VMs. */
|
||||
cmd.tlbi.vmid = 0;
|
||||
}
|
||||
|
||||
if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on)
|
||||
return;
|
||||
|
||||
WARN_ON(smmu_send_cmd(smmu, &cmd));
|
||||
}
|
||||
|
||||
static void smmu_tlb_flush_all(void *cookie)
|
||||
{
|
||||
struct kvm_hyp_iommu_domain *domain = cookie;
|
||||
struct hyp_arm_smmu_v3_domain *smmu_domain = domain->priv;
|
||||
struct hyp_arm_smmu_v3_device *smmu;
|
||||
struct domain_iommu_node *iommu_node;
|
||||
|
||||
hyp_read_lock(&smmu_domain->lock);
|
||||
list_for_each_entry(iommu_node, &smmu_domain->iommu_list, list) {
|
||||
smmu = to_smmu(iommu_node->iommu);
|
||||
kvm_iommu_lock(&smmu->iommu);
|
||||
if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on) {
|
||||
kvm_iommu_unlock(&smmu->iommu);
|
||||
continue;
|
||||
}
|
||||
WARN_ON(smmu_send_cmd(smmu, &cmd));
|
||||
smmu_inv_domain(smmu, smmu_domain);
|
||||
kvm_iommu_unlock(&smmu->iommu);
|
||||
}
|
||||
hyp_read_unlock(&smmu_domain->lock);
|
||||
|
@ -1082,11 +1087,15 @@ static int smmu_attach_dev(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_dom
|
|||
WRITE_ONCE(dst[0], cpu_to_le64(ent[0]));
|
||||
ret = smmu_sync_ste(smmu, dst, sid);
|
||||
WARN_ON(ret);
|
||||
if (iommu_node)
|
||||
list_add_tail(&iommu_node->list, &smmu_domain->iommu_list);
|
||||
|
||||
out_unlock:
|
||||
if (ret && iommu_node)
|
||||
if (iommu_node) {
|
||||
if (ret)
|
||||
hyp_free(iommu_node);
|
||||
else
|
||||
list_add_tail(&iommu_node->list, &smmu_domain->iommu_list);
|
||||
}
|
||||
|
||||
kvm_iommu_unlock(iommu);
|
||||
hyp_write_unlock(&smmu_domain->lock);
|
||||
return ret;
|
||||
|
@ -1143,6 +1152,12 @@ static int smmu_detach_dev(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_dom
|
|||
ret = smmu_sync_ste(smmu, dst, sid);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure no stale tlb enteries when domain_id
|
||||
* is re-used for this SMMU.
|
||||
*/
|
||||
smmu_inv_domain(smmu, smmu_domain);
|
||||
|
||||
smmu_put_ref_domain(smmu, smmu_domain);
|
||||
out_unlock:
|
||||
kvm_iommu_unlock(iommu);
|
||||
|
|
|
@ -255,5 +255,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -255,5 +255,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ scmi_pd_set_perf_state(struct generic_pm_domain *genpd, unsigned int state)
|
|||
if (!state)
|
||||
return -EINVAL;
|
||||
|
||||
ret = pd->perf_ops->level_set(pd->ph, pd->domain_id, state, true);
|
||||
ret = pd->perf_ops->level_set(pd->ph, pd->domain_id, state, false);
|
||||
if (ret)
|
||||
dev_warn(&genpd->dev, "Failed with %d when trying to set %d perf level",
|
||||
ret, state);
|
||||
|
@ -159,6 +159,9 @@ static void scmi_perf_domain_remove(struct scmi_device *sdev)
|
|||
struct genpd_onecell_data *scmi_pd_data = dev_get_drvdata(dev);
|
||||
int i;
|
||||
|
||||
if (!scmi_pd_data)
|
||||
return;
|
||||
|
||||
of_genpd_del_provider(dev->of_node);
|
||||
|
||||
for (i = 0; i < scmi_pd_data->num_domains; i++)
|
||||
|
|
|
@ -658,6 +658,8 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
|
|||
|
||||
if (!tz->tzp->sustainable_power)
|
||||
dev_warn(&tz->device, "power_allocator: sustainable_power will be estimated\n");
|
||||
else
|
||||
params->sustainable_power = tz->tzp->sustainable_power;
|
||||
|
||||
get_governor_trips(tz, params);
|
||||
|
||||
|
|
|
@ -1411,7 +1411,7 @@ static int ufshcd_devfreq_target(struct device *dev,
|
|||
int ret = 0;
|
||||
struct ufs_hba *hba = dev_get_drvdata(dev);
|
||||
ktime_t start;
|
||||
bool scale_up, sched_clk_scaling_suspend_work = false;
|
||||
bool scale_up = false, sched_clk_scaling_suspend_work = false;
|
||||
struct list_head *clk_list = &hba->clk_list_head;
|
||||
struct ufs_clk_info *clki;
|
||||
unsigned long irq_flags;
|
||||
|
@ -2459,6 +2459,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
|
|||
ufshcd_hold(hba);
|
||||
mutex_lock(&hba->uic_cmd_mutex);
|
||||
ufshcd_add_delay_before_dme_cmd(hba);
|
||||
WARN_ON(hba->uic_async_done);
|
||||
|
||||
ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
|
||||
if (!ret)
|
||||
|
@ -4163,7 +4164,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
|
|||
unsigned long flags;
|
||||
u8 status;
|
||||
int ret;
|
||||
bool reenable_intr = false;
|
||||
|
||||
mutex_lock(&hba->uic_cmd_mutex);
|
||||
ufshcd_add_delay_before_dme_cmd(hba);
|
||||
|
@ -4174,15 +4174,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
|
|||
goto out_unlock;
|
||||
}
|
||||
hba->uic_async_done = &uic_async_done;
|
||||
if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
|
||||
ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
|
||||
/*
|
||||
* Make sure UIC command completion interrupt is disabled before
|
||||
* issuing UIC command.
|
||||
*/
|
||||
ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
|
||||
reenable_intr = true;
|
||||
}
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
ret = __ufshcd_send_uic_cmd(hba, cmd, false);
|
||||
if (ret) {
|
||||
|
@ -4226,8 +4217,6 @@ out:
|
|||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
hba->active_uic_cmd = NULL;
|
||||
hba->uic_async_done = NULL;
|
||||
if (reenable_intr)
|
||||
ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
|
||||
if (ret) {
|
||||
ufshcd_set_link_broken(hba);
|
||||
ufshcd_schedule_eh_work(hba);
|
||||
|
@ -5386,31 +5375,31 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
|
|||
static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
|
||||
{
|
||||
irqreturn_t retval = IRQ_NONE;
|
||||
struct uic_command *cmd;
|
||||
|
||||
spin_lock(hba->host->host_lock);
|
||||
cmd = hba->active_uic_cmd;
|
||||
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
|
||||
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
|
||||
|
||||
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
|
||||
hba->active_uic_cmd->argument2 |=
|
||||
ufshcd_get_uic_cmd_result(hba);
|
||||
hba->active_uic_cmd->argument3 =
|
||||
ufshcd_get_dme_attr_val(hba);
|
||||
if (!hba->uic_async_done)
|
||||
hba->active_uic_cmd->cmd_active = 0;
|
||||
complete(&hba->active_uic_cmd->done);
|
||||
if (intr_status & UIC_COMMAND_COMPL && cmd) {
|
||||
if (!hba->uic_async_done) {
|
||||
cmd->argument2 |= ufshcd_get_uic_cmd_result(hba);
|
||||
cmd->argument3 = ufshcd_get_dme_attr_val(hba);
|
||||
cmd->cmd_active = 0;
|
||||
complete(&cmd->done);
|
||||
}
|
||||
retval = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
|
||||
hba->active_uic_cmd->cmd_active = 0;
|
||||
if (intr_status & UFSHCD_UIC_PWR_MASK && hba->uic_async_done) {
|
||||
cmd->cmd_active = 0;
|
||||
complete(hba->uic_async_done);
|
||||
retval = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
if (retval == IRQ_HANDLED)
|
||||
ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
|
||||
UFS_CMD_COMP);
|
||||
ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP);
|
||||
spin_unlock(hba->host->host_lock);
|
||||
return retval;
|
||||
}
|
||||
|
|
|
@ -472,13 +472,15 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
|
|||
* Put request back in req_free for it to be cleaned
|
||||
* up later.
|
||||
*/
|
||||
uvcg_queue_cancel(queue, 0);
|
||||
list_add_tail(&to_queue->list, &video->req_free);
|
||||
}
|
||||
} else {
|
||||
uvc_video_free_request(ureq, ep);
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock_irqrestore(&video->req_lock, flags);
|
||||
if (ret < 0)
|
||||
uvcg_queue_cancel(queue, 0);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -255,5 +255,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -242,6 +242,14 @@
|
|||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "libdm_test"
|
||||
},
|
||||
|
|
|
@ -78,6 +78,8 @@ struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
|
|||
if (!inode)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT;
|
||||
|
||||
if (!S_ISLNK(*mode)) {
|
||||
err = ceph_pre_init_acls(dir, mode, as_ctx);
|
||||
if (err < 0)
|
||||
|
|
|
@ -255,5 +255,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -255,5 +255,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -263,5 +263,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -263,5 +263,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -87,7 +87,7 @@ static inline void ksm_exit(struct mm_struct *mm)
|
|||
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
|
||||
* but what if the vma was unmerged while the page was swapped out?
|
||||
*/
|
||||
struct page *ksm_might_need_to_copy(struct page *page,
|
||||
struct folio *ksm_might_need_to_copy(struct folio *folio,
|
||||
struct vm_area_struct *vma, unsigned long addr);
|
||||
|
||||
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
|
||||
|
@ -140,10 +140,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline struct page *ksm_might_need_to_copy(struct page *page,
|
||||
static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
|
||||
struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
return page;
|
||||
return folio;
|
||||
}
|
||||
|
||||
static inline void rmap_walk_ksm(struct folio *folio,
|
||||
|
|
|
@ -112,4 +112,6 @@ extern void oom_killer_enable(void);
|
|||
|
||||
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
|
||||
|
||||
/* call for adding killed process to reaper. */
|
||||
extern void add_to_oom_reaper(struct task_struct *p);
|
||||
#endif /* _INCLUDE_LINUX_OOM_H */
|
||||
|
|
|
@ -243,10 +243,8 @@ void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages,
|
|||
folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags)
|
||||
void folio_add_anon_rmap_pmd(struct folio *, struct page *,
|
||||
struct vm_area_struct *, unsigned long address, rmap_t flags);
|
||||
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
|
||||
unsigned long address);
|
||||
void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
|
||||
unsigned long address);
|
||||
unsigned long address, rmap_t flags);
|
||||
void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
|
||||
struct vm_area_struct *);
|
||||
#define folio_add_file_rmap_pte(folio, page, vma) \
|
||||
|
|
|
@ -421,9 +421,6 @@ void folio_deactivate(struct folio *folio);
|
|||
void folio_mark_lazyfree(struct folio *folio);
|
||||
extern void swap_setup(void);
|
||||
|
||||
extern void lru_cache_add_inactive_or_unevictable(struct page *page,
|
||||
struct vm_area_struct *vma);
|
||||
|
||||
/* linux/mm/vmscan.c */
|
||||
extern unsigned long zone_reclaimable_pages(struct zone *zone);
|
||||
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
|
||||
|
|
|
@ -15,6 +15,7 @@ bool zswap_load(struct folio *folio);
|
|||
void zswap_invalidate(int type, pgoff_t offset);
|
||||
void zswap_swapon(int type);
|
||||
void zswap_swapoff(int type);
|
||||
bool zswap_never_enabled(void);
|
||||
|
||||
#else
|
||||
|
||||
|
@ -32,6 +33,11 @@ static inline void zswap_invalidate(int type, pgoff_t offset) {}
|
|||
static inline void zswap_swapon(int type) {}
|
||||
static inline void zswap_swapoff(int type) {}
|
||||
|
||||
static inline bool zswap_never_enabled(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_ZSWAP_H */
|
||||
|
|
|
@ -263,5 +263,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -263,5 +263,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -54,6 +54,12 @@ DECLARE_HOOK(android_vh_si_meminfo_adjust,
|
|||
DECLARE_HOOK(android_vh_slab_folio_alloced,
|
||||
TP_PROTO(unsigned int order, gfp_t flags),
|
||||
TP_ARGS(order, flags));
|
||||
DECLARE_HOOK(android_vh_process_madvise_begin,
|
||||
TP_PROTO(struct task_struct *task, int behavior),
|
||||
TP_ARGS(task, behavior));
|
||||
DECLARE_HOOK(android_vh_process_madvise_iter,
|
||||
TP_PROTO(struct task_struct *task, int behavior, ssize_t *ret),
|
||||
TP_ARGS(task, behavior, ret));
|
||||
DECLARE_HOOK(android_vh_kmalloc_large_alloced,
|
||||
TP_PROTO(struct page *page, unsigned int order, gfp_t flags),
|
||||
TP_ARGS(page, order, flags));
|
||||
|
@ -127,6 +133,9 @@ DECLARE_HOOK(android_vh_mem_cgroup_css_offline,
|
|||
DECLARE_HOOK(android_vh_save_track_hash,
|
||||
TP_PROTO(bool alloc, struct track *p),
|
||||
TP_ARGS(alloc, p));
|
||||
DECLARE_HOOK(android_vh_should_fault_around,
|
||||
TP_PROTO(struct vm_fault *vmf, bool *should_around),
|
||||
TP_ARGS(vmf, should_around));
|
||||
DECLARE_HOOK(android_vh_kmalloc_slab,
|
||||
TP_PROTO(unsigned int index, gfp_t flags, struct kmem_cache **s),
|
||||
TP_ARGS(index, flags, s));
|
||||
|
|
|
@ -83,6 +83,12 @@ DECLARE_HOOK(android_vh_udp_v4_connect,
|
|||
TP_ARGS(sk, daddr, dport, family));
|
||||
DECLARE_HOOK(android_vh_udp_v6_connect,
|
||||
TP_PROTO(struct sock *sk, struct sockaddr_in6 *sin6), TP_ARGS(sk, sin6));
|
||||
DECLARE_HOOK(android_vh_inet_create,
|
||||
TP_PROTO(struct sock *sk, bool err), TP_ARGS(sk, err));
|
||||
DECLARE_HOOK(android_vh_uplink_send_msg,
|
||||
TP_PROTO(struct sock *sk), TP_ARGS(sk));
|
||||
DECLARE_HOOK(android_vh_sock_create,
|
||||
TP_PROTO(struct sock *sk), TP_ARGS(sk));
|
||||
DECLARE_HOOK(android_vh_tcp_rtt_estimator,
|
||||
TP_PROTO(struct sock *sk, long mrtt_us), TP_ARGS(sk, mrtt_us));
|
||||
DECLARE_HOOK(android_vh_udp_enqueue_schedule_skb,
|
||||
|
|
123
include/trace/hooks/ogki_honor.h
Normal file
123
include/trace/hooks/ogki_honor.h
Normal file
|
@ -0,0 +1,123 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM ogki_honor
|
||||
#ifdef TRACE_INCLUDE_PATH
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#endif
|
||||
#define TRACE_INCLUDE_PATH trace/hooks
|
||||
|
||||
#if !defined(_TRACE_HOOK_OGKI_HONOR_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_HOOK_OGKI_HONOR_H
|
||||
|
||||
#include <trace/hooks/vendor_hooks.h>
|
||||
|
||||
struct task_struct;
|
||||
struct sock;
|
||||
struct sk_buff;
|
||||
struct ufs_hba;
|
||||
struct tcp_sock;
|
||||
struct net_device;
|
||||
struct cfg80211_registered_device;
|
||||
DECLARE_HOOK(android_vh_ogki_async_psi_bypass,
|
||||
TP_PROTO(bool *bypass),
|
||||
TP_ARGS(bypass));
|
||||
DECLARE_HOOK(android_vh_ogki_ufs_clock_scaling,
|
||||
TP_PROTO(struct ufs_hba *hba, bool *force_out, bool *force_scaling, bool *scale_up),
|
||||
TP_ARGS(hba, force_out, force_scaling, scale_up));
|
||||
DECLARE_HOOK(android_vh_ogki_ufs_dsm,
|
||||
TP_PROTO(struct ufs_hba *hba, unsigned long code, char *err_msg),
|
||||
TP_ARGS(hba, code, err_msg));
|
||||
DECLARE_HOOK(android_vh_ogki_audit_log_setid,
|
||||
TP_PROTO(u32 type, u32 old_id, u32 new_id),
|
||||
TP_ARGS(type, old_id, new_id));
|
||||
DECLARE_HOOK(android_vh_ogki_audit_log_cfi,
|
||||
TP_PROTO(unsigned long addr, unsigned long* target),
|
||||
TP_ARGS(addr, target));
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_ogki_audit_log_usercopy,
|
||||
TP_PROTO(bool to_user, const char* name, unsigned long len),
|
||||
TP_ARGS(to_user, name, len), 1);
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_ogki_audit_log_module_sign,
|
||||
TP_PROTO(int err),
|
||||
TP_ARGS(err), 1);
|
||||
DECLARE_HOOK(android_vh_ogki_check_vip_status,
|
||||
TP_PROTO(int cur_pid, int cur_tgid, struct task_struct* task, int* ret),
|
||||
TP_ARGS(cur_pid, cur_tgid, task, ret));
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_ogki_task_util,
|
||||
TP_PROTO(struct task_struct* p, unsigned long* ret),
|
||||
TP_ARGS(p, ret), 1);
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_ogki_uclamp_task_util,
|
||||
TP_PROTO(struct task_struct* p, unsigned long* ret),
|
||||
TP_ARGS(p, ret), 1);
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_ogki_get_task_tags,
|
||||
TP_PROTO(struct task_struct* p, unsigned long long* ret),
|
||||
TP_ARGS(p, ret), 1);
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_ogki_get_task_rsum,
|
||||
TP_PROTO(struct task_struct* p, unsigned long long* ret),
|
||||
TP_ARGS(p, ret), 1);
|
||||
DECLARE_HOOK(android_rvh_ogki_check_task_tags,
|
||||
TP_PROTO(struct task_struct *p, int *ret),
|
||||
TP_ARGS(p, ret));
|
||||
DECLARE_HOOK(android_vh_ogki_tcp_srtt_estimator,
|
||||
TP_PROTO(struct sock *sk), TP_ARGS(sk));
|
||||
DECLARE_HOOK(android_vh_ogki_tcp_rcv_estab_fastpath,
|
||||
TP_PROTO(struct sock *sk), TP_ARGS(sk));
|
||||
DECLARE_HOOK(android_vh_ogki_tcp_rcv_estab_slowpath,
|
||||
TP_PROTO(struct sock *sk), TP_ARGS(sk));
|
||||
DECLARE_HOOK(android_vh_ogki_set_wifi_state_connect,
|
||||
TP_PROTO(const char *name, struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *mac_addr),
|
||||
TP_ARGS(name, rdev, dev, mac_addr));
|
||||
DECLARE_HOOK(android_vh_ogki_set_wifi_state_disconnect,
|
||||
TP_PROTO(const char *name), TP_ARGS(name));
|
||||
DECLARE_HOOK(android_vh_ogki_tcp_rcv_rtt_update,
|
||||
TP_PROTO(struct tcp_sock *tp, u32 sample, int win_dep), TP_ARGS(tp, sample, win_dep));
|
||||
DECLARE_HOOK(android_vh_ogki_tcp_retransmit_timer,
|
||||
TP_PROTO(struct sock *sk), TP_ARGS(sk));
|
||||
DECLARE_HOOK(android_vh_ogki_udp_unicast_rcv_skb,
|
||||
TP_PROTO(struct sk_buff *skb, struct sock *sk),
|
||||
TP_ARGS(skb, sk));
|
||||
DECLARE_HOOK(android_vh_ogki_udp6_unicast_rcv_skb,
|
||||
TP_PROTO(struct sk_buff *skb, struct sock *sk),
|
||||
TP_ARGS(skb, sk));
|
||||
DECLARE_HOOK(android_vh_ogki_get_log_usertype,
|
||||
TP_PROTO(unsigned int *type),
|
||||
TP_ARGS(type));
|
||||
DECLARE_HOOK(android_vh_ogki_hievent_to_jank,
|
||||
TP_PROTO(int tag, int prio, const char *buf, int *ret),
|
||||
TP_ARGS(tag, prio, buf, ret));
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_ogki_hievent_create,
|
||||
TP_PROTO(unsigned int event_id, void **event),
|
||||
TP_ARGS(event_id, event), 1);
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_ogki_hievent_put_string,
|
||||
TP_PROTO(void *event, const char *key, const char *value, int *ret),
|
||||
TP_ARGS(event, key, value, ret), 1);
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_ogki_hievent_put_integral,
|
||||
TP_PROTO(void *event, const char *key, long long value, int *ret),
|
||||
TP_ARGS(event, key, value, ret), 1);
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_ogki_hievent_report,
|
||||
TP_PROTO(void *event, int *ret),
|
||||
TP_ARGS(event, ret), 1);
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_ogki_hievent_destroy,
|
||||
TP_PROTO(void *event),
|
||||
TP_ARGS(event), 1);
|
||||
DECLARE_HOOK(android_vh_ogki_f2fs_dsm,
|
||||
TP_PROTO(char *name, int len),
|
||||
TP_ARGS(name, len));
|
||||
DECLARE_HOOK(android_vh_ogki_f2fs_dsm_get,
|
||||
TP_PROTO(unsigned long code, char *err_msg),
|
||||
TP_ARGS(code, err_msg));
|
||||
DECLARE_HOOK(android_vh_ogki_cma_alloc_retry,
|
||||
TP_PROTO(char *name, int *retry),
|
||||
TP_ARGS(name, retry));
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_ogki_vmalloc_node_bypass,
|
||||
TP_PROTO(unsigned long size, gfp_t gfp_mask, void **addr),
|
||||
TP_ARGS(size, gfp_mask, addr), 1);
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_ogki_vfree_bypass,
|
||||
TP_PROTO(const void *addr, bool *bypass),
|
||||
TP_ARGS(addr, bypass), 1);
|
||||
DECLARE_HOOK(android_vh_ogki_kmem_cache_create_usercopy,
|
||||
TP_PROTO(unsigned int flags),
|
||||
TP_ARGS(flags));
|
||||
#endif /* _TRACE_HOOK_OGKI_ogki_H */
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
||||
|
|
@ -14,6 +14,9 @@ DECLARE_HOOK(android_vh_do_send_sig_info,
|
|||
DECLARE_HOOK(android_vh_exit_signal,
|
||||
TP_PROTO(struct task_struct *task),
|
||||
TP_ARGS(task));
|
||||
DECLARE_HOOK(android_vh_killed_process,
|
||||
TP_PROTO(struct task_struct *killer, struct task_struct *dst, bool *reap),
|
||||
TP_ARGS(killer, dst, reap));
|
||||
#endif /* _TRACE_HOOK_SIGNAL_H */
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
||||
|
|
|
@ -10,13 +10,23 @@
|
|||
* Following tracepoints are not exported in tracefs and provide a
|
||||
* mechanism for vendor modules to hook and extend functionality
|
||||
*/
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_create_worker,
|
||||
TP_PROTO(struct task_struct *p, struct workqueue_attrs *attrs),
|
||||
TP_ARGS(p, attrs), 1);
|
||||
|
||||
DECLARE_HOOK(android_vh_wq_lockup_pool,
|
||||
TP_PROTO(int cpu, unsigned long pool_ts),
|
||||
TP_ARGS(cpu, pool_ts));
|
||||
|
||||
#ifndef __GENKSYMS__
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_alloc_and_link_pwqs,
|
||||
TP_PROTO(struct workqueue_struct *wq, int *ret, bool *skip),
|
||||
TP_ARGS(wq, ret, skip), 1);
|
||||
#else
|
||||
DECLARE_HOOK(android_rvh_alloc_and_link_pwqs,
|
||||
TP_PROTO(struct workqueue_struct *wq, int *ret, bool *skip),
|
||||
TP_ARGS(wq, ret, skip));
|
||||
#endif
|
||||
|
||||
#endif /* _TRACE_HOOK_WQLOCKUP_H */
|
||||
/* This part must be outside protection */
|
||||
|
|
|
@ -876,9 +876,10 @@ enum ufshcd_mcq_opr {
|
|||
* @tmf_tag_set: TMF tag set.
|
||||
* @tmf_queue: Used to allocate TMF tags.
|
||||
* @tmf_rqs: array with pointers to TMF requests while these are in progress.
|
||||
* @active_uic_cmd: handle of active UIC command
|
||||
* @uic_cmd_mutex: mutex for UIC command
|
||||
* @uic_async_done: completion used during UIC processing
|
||||
* @active_uic_cmd: active UIC command pointer.
|
||||
* @uic_cmd_mutex: mutex used to serialize UIC command processing.
|
||||
* @uic_async_done: completion used to wait for power mode or hibernation state
|
||||
* changes.
|
||||
* @ufshcd_state: UFSHCD state
|
||||
* @eh_flags: Error handling flags
|
||||
* @intr_mask: Interrupt Mask Bits
|
||||
|
|
|
@ -255,5 +255,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -255,5 +255,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -255,5 +255,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -184,7 +184,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
|
|||
|
||||
if (new_page) {
|
||||
folio_get(new_folio);
|
||||
folio_add_new_anon_rmap(new_folio, vma, addr);
|
||||
folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE);
|
||||
folio_add_lru_vma(new_folio, vma);
|
||||
trace_android_vh_uprobes_replace_page(new_folio, old_folio);
|
||||
} else
|
||||
|
|
|
@ -238,6 +238,9 @@ static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
|
|||
raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
|
||||
if (needwake) {
|
||||
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
|
||||
if (cpu_is_offline(raw_smp_processor_id()))
|
||||
swake_up_one_online(&rdp_gp->nocb_gp_wq);
|
||||
else
|
||||
wake_up_process(rdp_gp->nocb_gp_kthread);
|
||||
}
|
||||
|
||||
|
|
|
@ -247,5 +247,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
#include <linux/cgroup.h>
|
||||
#include <linux/audit.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/oom.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/signal.h>
|
||||
|
@ -1465,8 +1466,16 @@ int group_send_sig_info(int sig, struct kernel_siginfo *info,
|
|||
ret = check_kill_permission(sig, info, p);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!ret && sig)
|
||||
if (!ret && sig) {
|
||||
ret = do_send_sig_info(sig, info, p, type);
|
||||
if (!ret && sig == SIGKILL) {
|
||||
bool reap = false;
|
||||
|
||||
trace_android_vh_killed_process(current, p, &reap);
|
||||
if (reap)
|
||||
add_to_oom_reaper(p);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ int tick_is_oneshot_available(void)
|
|||
*/
|
||||
static void tick_periodic(int cpu)
|
||||
{
|
||||
if (tick_do_timer_cpu == cpu) {
|
||||
if (READ_ONCE(tick_do_timer_cpu) == cpu) {
|
||||
raw_spin_lock(&jiffies_lock);
|
||||
write_seqcount_begin(&jiffies_seq);
|
||||
|
||||
|
@ -199,8 +199,8 @@ static void tick_setup_device(struct tick_device *td,
|
|||
* If no cpu took the do_timer update, assign it to
|
||||
* this cpu:
|
||||
*/
|
||||
if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
|
||||
tick_do_timer_cpu = cpu;
|
||||
if (READ_ONCE(tick_do_timer_cpu) == TICK_DO_TIMER_BOOT) {
|
||||
WRITE_ONCE(tick_do_timer_cpu, cpu);
|
||||
tick_next_period = ktime_get();
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
/*
|
||||
|
|
|
@ -185,7 +185,7 @@ static ktime_t tick_init_jiffy_update(void)
|
|||
|
||||
static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
int tick_cpu, cpu = smp_processor_id();
|
||||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
/*
|
||||
|
@ -198,16 +198,18 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
|
|||
* If nohz_full is enabled, this should not happen because the
|
||||
* tick_do_timer_cpu never relinquishes.
|
||||
*/
|
||||
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
|
||||
tick_cpu = READ_ONCE(tick_do_timer_cpu);
|
||||
if (unlikely(tick_cpu == TICK_DO_TIMER_NONE)) {
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
WARN_ON_ONCE(tick_nohz_full_running);
|
||||
#endif
|
||||
tick_do_timer_cpu = cpu;
|
||||
WRITE_ONCE(tick_do_timer_cpu, cpu);
|
||||
tick_cpu = cpu;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Check, if the jiffies need an update */
|
||||
if (tick_do_timer_cpu == cpu) {
|
||||
if (tick_cpu == cpu) {
|
||||
tick_do_update_jiffies64(now);
|
||||
trace_android_vh_jiffies_update(NULL);
|
||||
}
|
||||
|
@ -553,7 +555,7 @@ bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
|
|||
* timers, workqueues, timekeeping, ...) on behalf of full dynticks
|
||||
* CPUs. It must remain online when nohz full is enabled.
|
||||
*/
|
||||
if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
|
||||
if (tick_nohz_full_running && READ_ONCE(tick_do_timer_cpu) == cpu)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
@ -806,6 +808,7 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
|
|||
u64 basemono, next_tick, delta, expires;
|
||||
unsigned long basejiff;
|
||||
unsigned int seq;
|
||||
int tick_cpu;
|
||||
|
||||
/* Read jiffies and the time when jiffies were updated last */
|
||||
do {
|
||||
|
@ -868,8 +871,9 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
|
|||
* Otherwise we can sleep as long as we want.
|
||||
*/
|
||||
delta = timekeeping_max_deferment();
|
||||
if (cpu != tick_do_timer_cpu &&
|
||||
(tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last))
|
||||
tick_cpu = READ_ONCE(tick_do_timer_cpu);
|
||||
if (tick_cpu != cpu &&
|
||||
(tick_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last))
|
||||
delta = KTIME_MAX;
|
||||
|
||||
/* Calculate the next expiry time */
|
||||
|
@ -890,6 +894,7 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
|
|||
u64 basemono = ts->timer_expires_base;
|
||||
u64 expires = ts->timer_expires;
|
||||
ktime_t tick = expires;
|
||||
int tick_cpu;
|
||||
|
||||
/* Make sure we won't be trying to stop it twice in a row. */
|
||||
ts->timer_expires_base = 0;
|
||||
|
@ -902,10 +907,11 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
|
|||
* do_timer() never invoked. Keep track of the fact that it
|
||||
* was the one which had the do_timer() duty last.
|
||||
*/
|
||||
if (cpu == tick_do_timer_cpu) {
|
||||
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
|
||||
tick_cpu = READ_ONCE(tick_do_timer_cpu);
|
||||
if (tick_cpu == cpu) {
|
||||
WRITE_ONCE(tick_do_timer_cpu, TICK_DO_TIMER_NONE);
|
||||
ts->do_timer_last = 1;
|
||||
} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
|
||||
} else if (tick_cpu != TICK_DO_TIMER_NONE) {
|
||||
ts->do_timer_last = 0;
|
||||
}
|
||||
|
||||
|
@ -1068,8 +1074,10 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
|
|||
* invoked.
|
||||
*/
|
||||
if (unlikely(!cpu_online(cpu))) {
|
||||
if (cpu == tick_do_timer_cpu)
|
||||
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
|
||||
int tick_cpu = READ_ONCE(tick_do_timer_cpu);
|
||||
|
||||
if (tick_cpu == cpu)
|
||||
WRITE_ONCE(tick_do_timer_cpu, TICK_DO_TIMER_NONE);
|
||||
/*
|
||||
* Make sure the CPU doesn't get fooled by obsolete tick
|
||||
* deadline if it comes back online later.
|
||||
|
@ -1088,15 +1096,16 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
|
|||
return false;
|
||||
|
||||
if (tick_nohz_full_enabled()) {
|
||||
int tick_cpu = READ_ONCE(tick_do_timer_cpu);
|
||||
/*
|
||||
* Keep the tick alive to guarantee timekeeping progression
|
||||
* if there are full dynticks CPUs around
|
||||
*/
|
||||
if (tick_do_timer_cpu == cpu)
|
||||
if (tick_cpu == cpu)
|
||||
return false;
|
||||
|
||||
/* Should not happen for nohz-full */
|
||||
if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
|
||||
if (WARN_ON_ONCE(tick_cpu == TICK_DO_TIMER_NONE))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -2220,6 +2220,7 @@ static struct worker *create_worker(struct worker_pool *pool)
|
|||
}
|
||||
|
||||
set_user_nice(worker->task, pool->attrs->nice);
|
||||
trace_android_rvh_create_worker(worker->task, pool->attrs);
|
||||
kthread_bind_mask(worker->task, pool_allowed_cpus(pool));
|
||||
|
||||
/* successful, attach the worker to the pool */
|
||||
|
@ -6353,6 +6354,9 @@ static struct timer_list wq_watchdog_timer;
|
|||
static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
|
||||
static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
|
||||
|
||||
static unsigned int wq_panic_on_stall;
|
||||
module_param_named(panic_on_stall, wq_panic_on_stall, uint, 0644);
|
||||
|
||||
/*
|
||||
* Show workers that might prevent the processing of pending work items.
|
||||
* The only candidates are CPU-bound workers in the running state.
|
||||
|
@ -6404,6 +6408,16 @@ static void show_cpu_pools_hogs(void)
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void panic_on_wq_watchdog(void)
|
||||
{
|
||||
static unsigned int wq_stall;
|
||||
|
||||
if (wq_panic_on_stall) {
|
||||
wq_stall++;
|
||||
BUG_ON(wq_stall >= wq_panic_on_stall);
|
||||
}
|
||||
}
|
||||
|
||||
static void wq_watchdog_reset_touched(void)
|
||||
{
|
||||
int cpu;
|
||||
|
@ -6477,6 +6491,9 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
|
|||
if (cpu_pool_stall)
|
||||
show_cpu_pools_hogs();
|
||||
|
||||
if (lockup_detected)
|
||||
panic_on_wq_watchdog();
|
||||
|
||||
wq_watchdog_reset_touched();
|
||||
mod_timer(&wq_watchdog_timer, jiffies + thresh);
|
||||
}
|
||||
|
|
|
@ -226,5 +226,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -77,12 +77,6 @@ bool redirty_page_for_writepage(struct writeback_control *wbc,
|
|||
}
|
||||
EXPORT_SYMBOL(redirty_page_for_writepage);
|
||||
|
||||
void lru_cache_add_inactive_or_unevictable(struct page *page,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
folio_add_lru_vma(page_folio(page), vma);
|
||||
}
|
||||
|
||||
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
||||
pgoff_t index, gfp_t gfp)
|
||||
{
|
||||
|
@ -122,13 +116,3 @@ void putback_lru_page(struct page *page)
|
|||
{
|
||||
folio_putback_lru(page_folio(page));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma,
|
||||
unsigned long address)
|
||||
{
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
|
||||
return folio_add_new_anon_rmap((struct folio *)page, vma, address);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -939,7 +939,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
|
|||
|
||||
entry = mk_huge_pmd(page, vma->vm_page_prot);
|
||||
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
|
||||
folio_add_new_anon_rmap(folio, vma, haddr);
|
||||
folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE);
|
||||
folio_add_lru_vma(folio, vma);
|
||||
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
|
||||
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
|
||||
|
|
|
@ -1087,6 +1087,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
|
|||
pmd_t *pmd, _pmd;
|
||||
pte_t *pte;
|
||||
pgtable_t pgtable;
|
||||
struct folio *folio;
|
||||
struct page *hpage;
|
||||
spinlock_t *pmd_ptl, *pte_ptl;
|
||||
int result = SCAN_FAIL;
|
||||
|
@ -1209,13 +1210,13 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
|
|||
if (unlikely(result != SCAN_SUCCEED))
|
||||
goto out_up_write;
|
||||
|
||||
folio = page_folio(hpage);
|
||||
/*
|
||||
* spin_lock() below is not the equivalent of smp_wmb(), but
|
||||
* the smp_wmb() inside __SetPageUptodate() can be reused to
|
||||
* avoid the copy_huge_page writes to become visible after
|
||||
* the set_pmd_at() write.
|
||||
* The smp_wmb() inside __folio_mark_uptodate() ensures the
|
||||
* copy_huge_page writes become visible before the set_pmd_at()
|
||||
* write.
|
||||
*/
|
||||
__SetPageUptodate(hpage);
|
||||
__folio_mark_uptodate(folio);
|
||||
pgtable = pmd_pgtable(_pmd);
|
||||
|
||||
_pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
|
||||
|
@ -1223,8 +1224,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
|
|||
|
||||
spin_lock(pmd_ptl);
|
||||
BUG_ON(!pmd_none(*pmd));
|
||||
page_add_new_anon_rmap(hpage, vma, address);
|
||||
lru_cache_add_inactive_or_unevictable(hpage, vma);
|
||||
folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
|
||||
folio_add_lru_vma(folio, vma);
|
||||
pgtable_trans_huge_deposit(mm, pmd, pgtable);
|
||||
set_pmd_at(mm, address, pmd, _pmd);
|
||||
update_mmu_cache_pmd(vma, address, pmd);
|
||||
|
|
21
mm/ksm.c
21
mm/ksm.c
|
@ -2790,30 +2790,30 @@ void __ksm_exit(struct mm_struct *mm)
|
|||
trace_ksm_exit(mm);
|
||||
}
|
||||
|
||||
struct page *ksm_might_need_to_copy(struct page *page,
|
||||
struct folio *ksm_might_need_to_copy(struct folio *folio,
|
||||
struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct page *page = folio_page(folio, 0);
|
||||
struct anon_vma *anon_vma = folio_anon_vma(folio);
|
||||
struct folio *new_folio;
|
||||
|
||||
if (folio_test_large(folio))
|
||||
return page;
|
||||
return folio;
|
||||
|
||||
if (folio_test_ksm(folio)) {
|
||||
if (folio_stable_node(folio) &&
|
||||
!(ksm_run & KSM_RUN_UNMERGE))
|
||||
return page; /* no need to copy it */
|
||||
return folio; /* no need to copy it */
|
||||
} else if (!anon_vma) {
|
||||
return page; /* no need to copy it */
|
||||
return folio; /* no need to copy it */
|
||||
} else if (folio->index == linear_page_index(vma, addr) &&
|
||||
anon_vma->root == vma->anon_vma->root) {
|
||||
return page; /* still no need to copy it */
|
||||
return folio; /* still no need to copy it */
|
||||
}
|
||||
if (PageHWPoison(page))
|
||||
return ERR_PTR(-EHWPOISON);
|
||||
if (!folio_test_uptodate(folio))
|
||||
return page; /* let do_swap_page report the error */
|
||||
return folio; /* let do_swap_page report the error */
|
||||
|
||||
new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
|
||||
if (new_folio &&
|
||||
|
@ -2822,9 +2822,10 @@ struct page *ksm_might_need_to_copy(struct page *page,
|
|||
new_folio = NULL;
|
||||
}
|
||||
if (new_folio) {
|
||||
if (copy_mc_user_highpage(&new_folio->page, page, addr, vma)) {
|
||||
if (copy_mc_user_highpage(folio_page(new_folio, 0), page,
|
||||
addr, vma)) {
|
||||
folio_put(new_folio);
|
||||
memory_failure_queue(page_to_pfn(page), 0);
|
||||
memory_failure_queue(folio_pfn(folio), 0);
|
||||
return ERR_PTR(-EHWPOISON);
|
||||
}
|
||||
folio_set_dirty(new_folio);
|
||||
|
@ -2835,7 +2836,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
|
|||
#endif
|
||||
}
|
||||
|
||||
return new_folio ? &new_folio->page : NULL;
|
||||
return new_folio;
|
||||
}
|
||||
|
||||
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
|
||||
|
|
|
@ -1591,8 +1591,12 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
|
|||
}
|
||||
|
||||
total_len = iov_iter_count(&iter);
|
||||
trace_android_vh_process_madvise_begin(task, behavior);
|
||||
|
||||
while (iov_iter_count(&iter)) {
|
||||
trace_android_vh_process_madvise_iter(task, behavior, &ret);
|
||||
if (ret < 0)
|
||||
break;
|
||||
ret = do_madvise(mm, (unsigned long)iter_iov_addr(&iter),
|
||||
iter_iov_len(&iter), behavior);
|
||||
if (ret < 0)
|
||||
|
|
36
mm/memory.c
36
mm/memory.c
|
@ -80,6 +80,7 @@
|
|||
#include <linux/sched/sysctl.h>
|
||||
|
||||
#include <trace/events/kmem.h>
|
||||
#include <trace/hooks/mm.h>
|
||||
|
||||
#undef CREATE_TRACE_POINTS
|
||||
#include <trace/hooks/mm.h>
|
||||
|
@ -925,7 +926,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
|
|||
*prealloc = NULL;
|
||||
copy_user_highpage(&new_folio->page, page, addr, src_vma);
|
||||
__folio_mark_uptodate(new_folio);
|
||||
folio_add_new_anon_rmap(new_folio, dst_vma, addr);
|
||||
folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE);
|
||||
folio_add_lru_vma(new_folio, dst_vma);
|
||||
rss[MM_ANONPAGES]++;
|
||||
|
||||
|
@ -3357,7 +3358,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
|
|||
* some TLBs while the old PTE remains in others.
|
||||
*/
|
||||
ptep_clear_flush(vma, vmf->address, vmf->pte);
|
||||
folio_add_new_anon_rmap(new_folio, vma, vmf->address);
|
||||
folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE);
|
||||
folio_add_lru_vma(new_folio, vma);
|
||||
/*
|
||||
* We call the notify macro here because, when using secondary
|
||||
|
@ -4118,15 +4119,18 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
|||
* page->index of !PageKSM() pages would be nonlinear inside the
|
||||
* anon VMA -- PageKSM() is lost on actual swapout.
|
||||
*/
|
||||
page = ksm_might_need_to_copy(page, vma, vmf->address);
|
||||
if (unlikely(!page)) {
|
||||
folio = ksm_might_need_to_copy(folio, vma, vmf->address);
|
||||
if (unlikely(!folio)) {
|
||||
ret = VM_FAULT_OOM;
|
||||
folio = swapcache;
|
||||
goto out_page;
|
||||
} else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
|
||||
} else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
|
||||
ret = VM_FAULT_HWPOISON;
|
||||
folio = swapcache;
|
||||
goto out_page;
|
||||
}
|
||||
folio = page_folio(page);
|
||||
if (folio != swapcache)
|
||||
page = folio_page(folio, 0);
|
||||
|
||||
/*
|
||||
* If we want to map a page that's in the swapcache writable, we
|
||||
|
@ -4282,8 +4286,17 @@ check_folio:
|
|||
|
||||
/* ksm created a completely new copy */
|
||||
if (unlikely(folio != swapcache && swapcache)) {
|
||||
folio_add_new_anon_rmap(folio, vma, address);
|
||||
folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
|
||||
folio_add_lru_vma(folio, vma);
|
||||
} else if (!folio_test_anon(folio)) {
|
||||
/*
|
||||
* We currently only expect small !anon folios, which are either
|
||||
* fully exclusive or fully shared. If we ever get large folios
|
||||
* here, we have to be careful.
|
||||
*/
|
||||
VM_WARN_ON_ONCE(folio_test_large(folio));
|
||||
VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
|
||||
folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
|
||||
} else {
|
||||
folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
|
||||
rmap_flags);
|
||||
|
@ -4541,7 +4554,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
|
|||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC);
|
||||
#endif
|
||||
folio_add_new_anon_rmap(folio, vma, addr);
|
||||
folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
|
||||
folio_add_lru_vma(folio, vma);
|
||||
setpte:
|
||||
if (uffd_wp)
|
||||
|
@ -4741,7 +4754,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
|
|||
if (write && !(vma->vm_flags & VM_SHARED)) {
|
||||
add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr);
|
||||
VM_BUG_ON_FOLIO(nr != 1, folio);
|
||||
folio_add_new_anon_rmap(folio, vma, addr);
|
||||
folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
|
||||
folio_add_lru_vma(folio, vma);
|
||||
} else {
|
||||
add_mm_counter(vma->vm_mm, mm_counter_file(folio), nr);
|
||||
|
@ -4925,6 +4938,7 @@ static vm_fault_t do_fault_around(struct vm_fault *vmf)
|
|||
/* Return true if we should do read fault-around, false otherwise */
|
||||
static inline bool should_fault_around(struct vm_fault *vmf)
|
||||
{
|
||||
bool should_around = true;
|
||||
/* No ->map_pages? No way to fault around... */
|
||||
if (!vmf->vma->vm_ops->map_pages)
|
||||
return false;
|
||||
|
@ -4932,6 +4946,10 @@ static inline bool should_fault_around(struct vm_fault *vmf)
|
|||
if (uffd_disable_fault_around(vmf->vma))
|
||||
return false;
|
||||
|
||||
trace_android_vh_should_fault_around(vmf, &should_around);
|
||||
if (!should_around)
|
||||
return false;
|
||||
|
||||
/* A single page implies no faulting 'around' at all. */
|
||||
return fault_around_pages > 1;
|
||||
}
|
||||
|
|
|
@ -485,21 +485,11 @@ void free_zone_device_page(struct page *page)
|
|||
__ClearPageAnonExclusive(page);
|
||||
|
||||
/*
|
||||
* When a device managed page is freed, the page->mapping field
|
||||
* When a device managed page is freed, the folio->mapping field
|
||||
* may still contain a (stale) mapping value. For example, the
|
||||
* lower bits of page->mapping may still identify the page as an
|
||||
* anonymous page. Ultimately, this entire field is just stale
|
||||
* and wrong, and it will cause errors if not cleared. One
|
||||
* example is:
|
||||
*
|
||||
* migrate_vma_pages()
|
||||
* migrate_vma_insert_page()
|
||||
* page_add_new_anon_rmap()
|
||||
* __page_set_anon_rmap()
|
||||
* ...checks page->mapping, via PageAnon(page) call,
|
||||
* and incorrectly concludes that the page is an
|
||||
* anonymous page. Therefore, it incorrectly,
|
||||
* silently fails to set up the new anon rmap.
|
||||
* lower bits of folio->mapping may still identify the folio as an
|
||||
* anonymous folio. Ultimately, this entire field is just stale
|
||||
* and wrong, and it will cause errors if not cleared.
|
||||
*
|
||||
* For other types of ZONE_DEVICE pages, migration is either
|
||||
* handled differently or not done at all, so there is no need
|
||||
|
|
|
@ -567,6 +567,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
|
|||
struct page *page,
|
||||
unsigned long *src)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct vm_area_struct *vma = migrate->vma;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
bool flush = false;
|
||||
|
@ -599,17 +600,17 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
|
|||
goto abort;
|
||||
if (unlikely(anon_vma_prepare(vma)))
|
||||
goto abort;
|
||||
if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
|
||||
if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
|
||||
goto abort;
|
||||
|
||||
/*
|
||||
* The memory barrier inside __SetPageUptodate makes sure that
|
||||
* preceding stores to the page contents become visible before
|
||||
* The memory barrier inside __folio_mark_uptodate makes sure that
|
||||
* preceding stores to the folio contents become visible before
|
||||
* the set_pte_at() write.
|
||||
*/
|
||||
__SetPageUptodate(page);
|
||||
__folio_mark_uptodate(folio);
|
||||
|
||||
if (is_device_private_page(page)) {
|
||||
if (folio_is_device_private(folio)) {
|
||||
swp_entry_t swp_entry;
|
||||
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
|
@ -620,8 +621,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
|
|||
page_to_pfn(page));
|
||||
entry = swp_entry_to_pte(swp_entry);
|
||||
} else {
|
||||
if (is_zone_device_page(page) &&
|
||||
!is_device_coherent_page(page)) {
|
||||
if (folio_is_zone_device(folio) &&
|
||||
!folio_is_device_coherent(folio)) {
|
||||
pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
|
||||
goto abort;
|
||||
}
|
||||
|
@ -655,10 +656,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
|
|||
goto unlock_abort;
|
||||
|
||||
inc_mm_counter(mm, MM_ANONPAGES);
|
||||
page_add_new_anon_rmap(page, vma, addr);
|
||||
if (!is_zone_device_page(page))
|
||||
lru_cache_add_inactive_or_unevictable(page, vma);
|
||||
get_page(page);
|
||||
folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
|
||||
if (!folio_is_zone_device(folio))
|
||||
folio_add_lru_vma(folio, vma);
|
||||
folio_get(folio);
|
||||
|
||||
if (flush) {
|
||||
flush_cache_page(vma, addr, pte_pfn(orig_pte));
|
||||
|
|
|
@ -748,6 +748,19 @@ static inline void queue_oom_reaper(struct task_struct *tsk)
|
|||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
/**
|
||||
* tsk->mm has to be non NULL and caller has to guarantee it is stable (either
|
||||
* under task_lock or operate on the current).
|
||||
*/
|
||||
static void __mark_oom_victim(struct task_struct *tsk)
|
||||
{
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
|
||||
if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
|
||||
mmgrab(tsk->signal->oom_mm);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mark_oom_victim - mark the given task as OOM victim
|
||||
* @tsk: task to mark
|
||||
|
@ -761,7 +774,6 @@ static inline void queue_oom_reaper(struct task_struct *tsk)
|
|||
static void mark_oom_victim(struct task_struct *tsk)
|
||||
{
|
||||
const struct cred *cred;
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
|
||||
WARN_ON(oom_killer_disabled);
|
||||
/* OOM killer might race with memcg OOM */
|
||||
|
@ -769,8 +781,7 @@ static void mark_oom_victim(struct task_struct *tsk)
|
|||
return;
|
||||
|
||||
/* oom_mm is bound to the signal struct life time. */
|
||||
if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
|
||||
mmgrab(tsk->signal->oom_mm);
|
||||
__mark_oom_victim(tsk);
|
||||
|
||||
/*
|
||||
* Make sure that the task is woken up from uninterruptible sleep
|
||||
|
@ -1263,3 +1274,16 @@ put_task:
|
|||
return -ENOSYS;
|
||||
#endif /* CONFIG_MMU */
|
||||
}
|
||||
|
||||
void add_to_oom_reaper(struct task_struct *p)
|
||||
{
|
||||
p = find_lock_task_mm(p);
|
||||
if (!p)
|
||||
return;
|
||||
|
||||
if (task_will_free_mem(p)) {
|
||||
__mark_oom_victim(p);
|
||||
queue_oom_reaper(p);
|
||||
}
|
||||
task_unlock(p);
|
||||
}
|
||||
|
|
41
mm/rmap.c
41
mm/rmap.c
|
@ -1282,9 +1282,9 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page,
|
|||
* We have exclusion against folio_add_anon_rmap_*() because the caller
|
||||
* always holds the page locked.
|
||||
*
|
||||
* We have exclusion against page_add_new_anon_rmap because those pages
|
||||
* We have exclusion against folio_add_new_anon_rmap because those pages
|
||||
* are initially only visible via the pagetables, and the pte is locked
|
||||
* over the call to page_add_new_anon_rmap.
|
||||
* over the call to folio_add_new_anon_rmap.
|
||||
*/
|
||||
VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
|
||||
folio);
|
||||
|
@ -1298,27 +1298,16 @@ static __always_inline void __folio_add_anon_rmap(struct folio *folio,
|
|||
{
|
||||
int i, nr, nr_pmdmapped = 0;
|
||||
|
||||
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
|
||||
|
||||
nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
|
||||
if (nr_pmdmapped)
|
||||
__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped);
|
||||
if (nr)
|
||||
__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
|
||||
|
||||
if (unlikely(!folio_test_anon(folio))) {
|
||||
VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
|
||||
/*
|
||||
* For a PTE-mapped large folio, we only know that the single
|
||||
* PTE is exclusive. Further, __folio_set_anon() might not get
|
||||
* folio->index right when not given the address of the head
|
||||
* page.
|
||||
*/
|
||||
VM_WARN_ON_FOLIO(folio_test_large(folio) &&
|
||||
level != RMAP_LEVEL_PMD, folio);
|
||||
__folio_set_anon(folio, vma, address,
|
||||
!!(flags & RMAP_EXCLUSIVE));
|
||||
} else if (likely(!folio_test_ksm(folio))) {
|
||||
if (likely(!folio_test_ksm(folio)))
|
||||
__page_check_anon_rmap(folio, page, vma, address);
|
||||
}
|
||||
|
||||
if (flags & RMAP_EXCLUSIVE) {
|
||||
switch (level) {
|
||||
|
@ -1404,28 +1393,36 @@ void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page,
|
|||
* @folio: The folio to add the mapping to.
|
||||
* @vma: the vm area in which the mapping is added
|
||||
* @address: the user virtual address mapped
|
||||
* @flags: The rmap flags
|
||||
*
|
||||
* Like folio_add_anon_rmap_*() but must only be called on *new* folios.
|
||||
* This means the inc-and-test can be bypassed.
|
||||
* The folio does not have to be locked.
|
||||
* The folio doesn't necessarily need to be locked while it's exclusive
|
||||
* unless two threads map it concurrently. However, the folio must be
|
||||
* locked if it's shared.
|
||||
*
|
||||
* If the folio is pmd-mappable, it is accounted as a THP. As the folio
|
||||
* is new, it's assumed to be mapped exclusively by a single process.
|
||||
* If the folio is pmd-mappable, it is accounted as a THP.
|
||||
*/
|
||||
void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
|
||||
unsigned long address)
|
||||
unsigned long address, rmap_t flags)
|
||||
{
|
||||
int nr = folio_nr_pages(folio);
|
||||
const int nr = folio_nr_pages(folio);
|
||||
const bool exclusive = flags & RMAP_EXCLUSIVE;
|
||||
|
||||
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
|
||||
VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio);
|
||||
VM_BUG_ON_VMA(address < vma->vm_start ||
|
||||
address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
|
||||
|
||||
if (!folio_test_swapbacked(folio))
|
||||
__folio_set_swapbacked(folio);
|
||||
__folio_set_anon(folio, vma, address, true);
|
||||
__folio_set_anon(folio, vma, address, exclusive);
|
||||
|
||||
if (likely(!folio_test_large(folio))) {
|
||||
/* increment count (starts at -1) */
|
||||
atomic_set(&folio->_mapcount, 0);
|
||||
if (exclusive)
|
||||
SetPageAnonExclusive(&folio->page);
|
||||
} else if (!folio_test_pmd_mappable(folio)) {
|
||||
int i;
|
||||
|
@ -1435,6 +1432,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
|
|||
|
||||
/* increment count (starts at -1) */
|
||||
atomic_set(&page->_mapcount, 0);
|
||||
if (exclusive)
|
||||
SetPageAnonExclusive(page);
|
||||
}
|
||||
|
||||
|
@ -1443,6 +1441,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
|
|||
/* increment count (starts at -1) */
|
||||
atomic_set(&folio->_entire_mapcount, 0);
|
||||
atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED);
|
||||
if (exclusive)
|
||||
SetPageAnonExclusive(&folio->page);
|
||||
__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr);
|
||||
}
|
||||
|
|
|
@ -1762,18 +1762,24 @@ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
|
|||
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, swp_entry_t entry, struct folio *folio)
|
||||
{
|
||||
struct page *page = folio_file_page(folio, swp_offset(entry));
|
||||
struct page *swapcache;
|
||||
struct page *page;
|
||||
struct folio *swapcache;
|
||||
spinlock_t *ptl;
|
||||
pte_t *pte, new_pte, old_pte;
|
||||
bool hwpoisoned = PageHWPoison(page);
|
||||
bool hwpoisoned = false;
|
||||
int ret = 1;
|
||||
|
||||
swapcache = page;
|
||||
page = ksm_might_need_to_copy(page, vma, addr);
|
||||
if (unlikely(!page))
|
||||
swapcache = folio;
|
||||
folio = ksm_might_need_to_copy(folio, vma, addr);
|
||||
if (unlikely(!folio))
|
||||
return -ENOMEM;
|
||||
else if (unlikely(PTR_ERR(page) == -EHWPOISON))
|
||||
else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
|
||||
hwpoisoned = true;
|
||||
folio = swapcache;
|
||||
}
|
||||
|
||||
page = folio_file_page(folio, swp_offset(entry));
|
||||
if (PageHWPoison(page))
|
||||
hwpoisoned = true;
|
||||
|
||||
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
||||
|
@ -1785,13 +1791,12 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
|
||||
old_pte = ptep_get(pte);
|
||||
|
||||
if (unlikely(hwpoisoned || !PageUptodate(page))) {
|
||||
if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) {
|
||||
swp_entry_t swp_entry;
|
||||
|
||||
dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
|
||||
if (hwpoisoned) {
|
||||
swp_entry = make_hwpoison_entry(swapcache);
|
||||
page = swapcache;
|
||||
swp_entry = make_hwpoison_entry(page);
|
||||
} else {
|
||||
swp_entry = make_poisoned_swp_entry();
|
||||
}
|
||||
|
@ -1807,29 +1812,35 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
*/
|
||||
arch_swap_restore(folio_swap(entry, folio), folio);
|
||||
|
||||
/* See do_swap_page() */
|
||||
BUG_ON(!PageAnon(page) && PageMappedToDisk(page));
|
||||
BUG_ON(PageAnon(page) && PageAnonExclusive(page));
|
||||
|
||||
dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
|
||||
inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
|
||||
get_page(page);
|
||||
if (page == swapcache) {
|
||||
folio_get(folio);
|
||||
if (folio == swapcache) {
|
||||
rmap_t rmap_flags = RMAP_NONE;
|
||||
|
||||
/*
|
||||
* See do_swap_page(): PageWriteback() would be problematic.
|
||||
* However, we do a wait_on_page_writeback() just before this
|
||||
* call and have the page locked.
|
||||
* See do_swap_page(): writeback would be problematic.
|
||||
* However, we do a folio_wait_writeback() just before this
|
||||
* call and have the folio locked.
|
||||
*/
|
||||
VM_BUG_ON_PAGE(PageWriteback(page), page);
|
||||
VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
|
||||
if (pte_swp_exclusive(old_pte))
|
||||
rmap_flags |= RMAP_EXCLUSIVE;
|
||||
|
||||
/*
|
||||
* We currently only expect small !anon folios, which are either
|
||||
* fully exclusive or fully shared. If we ever get large folios
|
||||
* here, we have to be careful.
|
||||
*/
|
||||
if (!folio_test_anon(folio)) {
|
||||
VM_WARN_ON_ONCE(folio_test_large(folio));
|
||||
VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
|
||||
folio_add_new_anon_rmap(folio, vma, addr, rmap_flags);
|
||||
} else {
|
||||
folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags);
|
||||
}
|
||||
} else { /* ksm created a completely new copy */
|
||||
page_add_new_anon_rmap(page, vma, addr);
|
||||
lru_cache_add_inactive_or_unevictable(page, vma);
|
||||
folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
|
||||
folio_add_lru_vma(folio, vma);
|
||||
}
|
||||
new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
|
||||
if (pte_swp_soft_dirty(old_pte))
|
||||
|
@ -1842,9 +1853,9 @@ setpte:
|
|||
out:
|
||||
if (pte)
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
if (page != swapcache) {
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
if (folio != swapcache) {
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -217,7 +217,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
|
|||
folio_add_lru(folio);
|
||||
folio_add_file_rmap_pte(folio, page, dst_vma);
|
||||
} else {
|
||||
folio_add_new_anon_rmap(folio, dst_vma, dst_addr);
|
||||
folio_add_new_anon_rmap(folio, dst_vma, dst_addr, RMAP_EXCLUSIVE);
|
||||
folio_add_lru_vma(folio, dst_vma);
|
||||
}
|
||||
|
||||
|
@ -1029,8 +1029,8 @@ static int move_present_pte(struct mm_struct *mm,
|
|||
|
||||
double_pt_lock(dst_ptl, src_ptl);
|
||||
|
||||
if (!pte_same(*src_pte, orig_src_pte) ||
|
||||
!pte_same(*dst_pte, orig_dst_pte)) {
|
||||
if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
|
||||
!pte_same(ptep_get(dst_pte), orig_dst_pte)) {
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1073,8 +1073,8 @@ static int move_swap_pte(struct mm_struct *mm,
|
|||
|
||||
double_pt_lock(dst_ptl, src_ptl);
|
||||
|
||||
if (!pte_same(*src_pte, orig_src_pte) ||
|
||||
!pte_same(*dst_pte, orig_dst_pte)) {
|
||||
if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
|
||||
!pte_same(ptep_get(dst_pte), orig_dst_pte)) {
|
||||
double_pt_unlock(dst_ptl, src_ptl);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
@ -1170,7 +1170,7 @@ retry:
|
|||
}
|
||||
|
||||
spin_lock(dst_ptl);
|
||||
orig_dst_pte = *dst_pte;
|
||||
orig_dst_pte = ptep_get(dst_pte);
|
||||
spin_unlock(dst_ptl);
|
||||
if (!pte_none(orig_dst_pte)) {
|
||||
err = -EEXIST;
|
||||
|
@ -1178,7 +1178,7 @@ retry:
|
|||
}
|
||||
|
||||
spin_lock(src_ptl);
|
||||
orig_src_pte = *src_pte;
|
||||
orig_src_pte = ptep_get(src_pte);
|
||||
spin_unlock(src_ptl);
|
||||
if (pte_none(orig_src_pte)) {
|
||||
if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES))
|
||||
|
@ -1216,7 +1216,7 @@ retry:
|
|||
* page isn't freed under us
|
||||
*/
|
||||
spin_lock(src_ptl);
|
||||
if (!pte_same(orig_src_pte, *src_pte)) {
|
||||
if (!pte_same(orig_src_pte, ptep_get(src_pte))) {
|
||||
spin_unlock(src_ptl);
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
|
|
|
@ -2011,7 +2011,7 @@ retry:
|
|||
* try_to_unmap acquire PTL from the first PTE,
|
||||
* eliminating the influence of temporary PTE values.
|
||||
*/
|
||||
if (folio_test_large(folio) && list_empty(&folio->_deferred_list))
|
||||
if (folio_test_large(folio))
|
||||
flags |= TTU_SYNC;
|
||||
|
||||
if (!ignore_references)
|
||||
|
|
10
mm/zswap.c
10
mm/zswap.c
|
@ -84,6 +84,7 @@ static bool zswap_pool_reached_full;
|
|||
static int zswap_setup(void);
|
||||
|
||||
/* Enable/disable zswap */
|
||||
static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled);
|
||||
static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
|
||||
static int zswap_enabled_param_set(const char *,
|
||||
const struct kernel_param *);
|
||||
|
@ -144,6 +145,11 @@ module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
|
|||
/* Number of zpools in zswap_pool (empirically determined for scalability) */
|
||||
#define ZSWAP_NR_ZPOOLS 32
|
||||
|
||||
bool zswap_never_enabled(void)
|
||||
{
|
||||
return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled);
|
||||
}
|
||||
|
||||
/*********************************
|
||||
* data structures
|
||||
**********************************/
|
||||
|
@ -1410,6 +1416,9 @@ bool zswap_load(struct folio *folio)
|
|||
|
||||
VM_WARN_ON_ONCE(!folio_test_locked(folio));
|
||||
|
||||
if (zswap_never_enabled())
|
||||
return false;
|
||||
|
||||
/* find */
|
||||
spin_lock(&tree->lock);
|
||||
entry = zswap_entry_find_get(&tree->rbroot, offset);
|
||||
|
@ -1611,6 +1620,7 @@ static int zswap_setup(void)
|
|||
zpool_get_type(pool->zpools[0]));
|
||||
list_add(&pool->list, &zswap_pools);
|
||||
zswap_has_pool = true;
|
||||
static_branch_enable(&zswap_ever_enabled);
|
||||
} else {
|
||||
pr_err("pool creation failed\n");
|
||||
zswap_enabled = false;
|
||||
|
|
|
@ -242,5 +242,15 @@
|
|||
{
|
||||
"name": "vts_kernel_net_tests"
|
||||
}
|
||||
],
|
||||
"kernel-presubmit": [
|
||||
{
|
||||
"name": "CtsCameraTestCases",
|
||||
"options": [
|
||||
{
|
||||
"include-filter": "android.hardware.camera2.cts.FastBasicsTest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -252,7 +252,7 @@ EXPORT_SYMBOL(inet_listen);
|
|||
static int inet_create(struct net *net, struct socket *sock, int protocol,
|
||||
int kern)
|
||||
{
|
||||
struct sock *sk;
|
||||
struct sock *sk = NULL;
|
||||
struct inet_protosw *answer;
|
||||
struct inet_sock *inet;
|
||||
struct proto *answer_prot;
|
||||
|
@ -401,6 +401,7 @@ lookup_protocol:
|
|||
trace_android_rvh_inet_sock_create(sk);
|
||||
|
||||
out:
|
||||
trace_android_vh_inet_create(sk, err);
|
||||
return err;
|
||||
out_rcu_unlock:
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -1119,6 +1119,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
|
|||
}
|
||||
}
|
||||
|
||||
trace_android_vh_uplink_send_msg(sk);
|
||||
|
||||
/* This should be in poll */
|
||||
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
|
||||
|
||||
|
|
|
@ -244,9 +244,14 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
|
|||
*/
|
||||
if (unlikely(len != icsk->icsk_ack.rcv_mss)) {
|
||||
u64 val = (u64)skb->len << TCP_RMEM_TO_WIN_SCALE;
|
||||
u8 old_ratio = tcp_sk(sk)->scaling_ratio;
|
||||
|
||||
do_div(val, skb->truesize);
|
||||
tcp_sk(sk)->scaling_ratio = val ? val : 1;
|
||||
|
||||
if (old_ratio != tcp_sk(sk)->scaling_ratio)
|
||||
WRITE_ONCE(tcp_sk(sk)->window_clamp,
|
||||
tcp_win_from_space(sk, sk->sk_rcvbuf));
|
||||
}
|
||||
icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
|
||||
tcp_sk(sk)->advmss);
|
||||
|
@ -749,7 +754,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
|
|||
* <prev RTT . ><current RTT .. ><next RTT .... >
|
||||
*/
|
||||
|
||||
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf)) {
|
||||
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
|
||||
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
|
||||
u64 rcvwin, grow;
|
||||
int rcvbuf;
|
||||
|
||||
|
@ -765,7 +771,6 @@ void tcp_rcv_space_adjust(struct sock *sk)
|
|||
|
||||
rcvbuf = min_t(u64, tcp_space_from_win(sk, rcvwin),
|
||||
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
|
||||
if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
|
||||
if (rcvbuf > sk->sk_rcvbuf) {
|
||||
WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
|
||||
|
||||
|
@ -773,15 +778,6 @@ void tcp_rcv_space_adjust(struct sock *sk)
|
|||
WRITE_ONCE(tp->window_clamp,
|
||||
tcp_win_from_space(sk, rcvbuf));
|
||||
}
|
||||
} else {
|
||||
/* Make the window clamp follow along while being bounded
|
||||
* by SO_RCVBUF.
|
||||
*/
|
||||
int clamp = tcp_win_from_space(sk, min(rcvbuf, sk->sk_rcvbuf));
|
||||
|
||||
if (clamp > tp->window_clamp)
|
||||
WRITE_ONCE(tp->window_clamp, clamp);
|
||||
}
|
||||
}
|
||||
tp->rcvq_space.space = copied;
|
||||
|
||||
|
|
|
@ -1090,6 +1090,8 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
|||
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
trace_android_vh_uplink_send_msg(sk);
|
||||
|
||||
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
|
||||
|
||||
fl4 = &inet->cork.fl.u.ip4;
|
||||
|
|
|
@ -123,7 +123,7 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
|
|||
{
|
||||
struct inet_sock *inet;
|
||||
struct ipv6_pinfo *np;
|
||||
struct sock *sk;
|
||||
struct sock *sk = NULL;
|
||||
struct inet_protosw *answer;
|
||||
struct proto *answer_prot;
|
||||
unsigned char answer_flags;
|
||||
|
@ -276,6 +276,7 @@ lookup_protocol:
|
|||
trace_android_rvh_inet_sock_create(sk);
|
||||
|
||||
out:
|
||||
trace_android_vh_inet_create(sk, err);
|
||||
return err;
|
||||
out_rcu_unlock:
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -109,6 +109,7 @@
|
|||
#include <linux/errqueue.h>
|
||||
#include <linux/ptp_clock_kernel.h>
|
||||
#include <trace/events/sock.h>
|
||||
#include <trace/hooks/net.h>
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
unsigned int sysctl_net_busy_read __read_mostly;
|
||||
|
@ -1589,6 +1590,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
|
|||
goto out_sock_release;
|
||||
*res = sock;
|
||||
|
||||
trace_android_vh_sock_create(sock->sk);
|
||||
|
||||
return 0;
|
||||
|
||||
out_module_busy:
|
||||
|
|
Loading…
Reference in New Issue
Block a user