Merge remote-tracking branch 'aosp/android15-6.6' into nxp-linux-sdk/lf-6.6.y_android

Change-Id: Id0e4b6d21301b170108ed32123268df55ec6d1d5
This commit is contained in:
Zhipeng Wang 2024-07-09 18:21:23 +09:00
commit 18e245b072
86 changed files with 1471 additions and 12282 deletions

View File

@ -1008,6 +1008,7 @@ ifdef CONFIG_RUST
# This addresses the problem that on e.g. i686, int != long, and Rust
# maps both to i32.
# See https://rcvalle.com/docs/rust-cfi-design-doc.pdf for details.
$(error "Enabling Rust and CFI silently changes the KMI.")
CC_FLAGS_CFI += -fsanitize-cfi-icall-experimental-normalize-integers
RS_FLAGS_CFI := -Zsanitizer=kcfi -Zsanitizer-cfi-normalize-integers
KBUILD_RUSTFLAGS += $(RS_FLAGS_CFI)

File diff suppressed because it is too large Load Diff

View File

@ -631,10 +631,12 @@
drm_crtc_handle_vblank
drm_crtc_init_with_planes
drm_crtc_send_vblank_event
drm_crtc_vblank_count
drm_crtc_vblank_get
drm_crtc_vblank_helper_get_vblank_timestamp
drm_crtc_vblank_off
drm_crtc_vblank_on
drm_crtc_vblank_put
___drm_dbg
drm_debugfs_create_files
drm_dev_alloc
@ -976,6 +978,8 @@
hex2bin
hex_asc
hex_dump_to_buffer
__hid_register_driver
hid_unregister_driver
high_memory
hrtimer_active
hrtimer_cancel
@ -2222,6 +2226,8 @@
__traceiter_mmap_lock_acquire_returned
__traceiter_mmap_lock_released
__traceiter_mmap_lock_start_locking
__traceiter_mm_page_alloc
__traceiter_mm_page_free
__traceiter_rwmmio_post_read
__traceiter_rwmmio_post_write
__traceiter_rwmmio_read
@ -2256,6 +2262,8 @@
__tracepoint_mmap_lock_acquire_returned
__tracepoint_mmap_lock_released
__tracepoint_mmap_lock_start_locking
__tracepoint_mm_page_alloc
__tracepoint_mm_page_free
tracepoint_probe_register
tracepoint_probe_unregister
__tracepoint_rwmmio_post_read

View File

@ -1527,6 +1527,10 @@
snd_usb_autoresume
snd_usb_autosuspend
snd_usb_register_platform_ops
__traceiter_android_rvh_usb_dev_suspend
__traceiter_android_vh_usb_dev_resume
__tracepoint_android_rvh_usb_dev_suspend
__tracepoint_android_vh_usb_dev_resume
usb_altnum_to_altsetting
usb_choose_configuration
usb_ifnum_to_if
@ -1610,6 +1614,8 @@
# required by exynos_thermal_v2.ko
devm_thermal_of_zone_register
kthread_flush_work
kunit_hooks
kunit_running
of_get_cpu_node
thermal_cdev_update
thermal_cooling_device_unregister

View File

@ -45,8 +45,11 @@
nvmem_device_find
device_match_of_node
drop_super
filp_open_block
mm_trace_rss_stat
__kfifo_len_r
__traceiter_android_vh_rwsem_write_wait_finish
__tracepoint_android_vh_rwsem_write_wait_finish
__tracepoint_android_rvh_cpuinfo_c_show
__traceiter_android_rvh_cpuinfo_c_show
__tracepoint_android_vh_dc_send_copy
@ -81,3 +84,7 @@
__tracepoint_android_rvh_hw_protection_shutdown
__traceiter_android_rvh_bpf_int_jit_compile_ro
__tracepoint_android_rvh_bpf_int_jit_compile_ro
__traceiter_android_vh_sk_alloc
__tracepoint_android_vh_sk_alloc
__traceiter_android_vh_sk_free
__tracepoint_android_vh_sk_free

View File

@ -141,6 +141,8 @@
clk_put
clk_set_rate
clk_unprepare
clockevent_delta2ns
clockevents_register_device
cma_alloc
cma_get_name
cma_release
@ -189,6 +191,9 @@
cpu_topology
crc32_le
css_next_child
css_task_iter_end
css_task_iter_next
css_task_iter_start
csum_partial
_ctype
debugfs_attr_read
@ -996,6 +1001,7 @@
rtnl_lock
rtnl_unlock
sched_clock
sched_clock_register
sched_feat_names
sched_set_fifo
sched_set_fifo_low
@ -1241,6 +1247,7 @@
__traceiter_android_rvh_update_cpu_capacity
__traceiter_android_rvh_wake_up_new_task
__traceiter_android_vh_dup_task_struct
__traceiter_android_vh_cpufreq_online
__traceiter_android_vh_update_topology_flags_workfn
__traceiter_binder_transaction_received
__traceiter_cpu_frequency_limits
@ -1266,6 +1273,7 @@
__tracepoint_android_rvh_update_cpu_capacity
__tracepoint_android_rvh_wake_up_new_task
__tracepoint_android_vh_dup_task_struct
__tracepoint_android_vh_cpufreq_online
__tracepoint_android_vh_update_topology_flags_workfn
__tracepoint_binder_transaction_received
__tracepoint_cpu_frequency_limits

View File

@ -8,6 +8,7 @@
blk_rq_map_user
blk_rq_map_user_iov
blk_start_plug
blk_fill_rwbs
__break_lease
cgroup_add_legacy_cftypes
config_item_init_type_name

View File

@ -63,6 +63,7 @@
__bitmap_or
bitmap_parse
bitmap_parselist
bitmap_parse_user
bitmap_print_to_pagebuf
__bitmap_set
bitmap_to_arr32
@ -423,6 +424,7 @@
devm_regulator_get_optional
devm_regulator_put
devm_regulator_register
devm_request_any_context_irq
__devm_request_region
devm_request_threaded_irq
devm_rtc_device_register
@ -554,6 +556,7 @@
down_write
dput
drain_workqueue
driver_for_each_device
driver_register
driver_unregister
drm_add_edid_modes
@ -830,6 +833,7 @@
frame_vector_destroy
frame_vector_to_pages
free_iova_fast
free_hyp_memcache
free_irq
free_netdev
__free_pages
@ -1083,6 +1087,7 @@
iommu_iova_to_phys
iommu_map
iommu_map_sg
io_pgtable_configure
iommu_register_device_fault_handler
iommu_report_device_fault
iommu_set_fault_handler
@ -1332,6 +1337,7 @@
__netdev_alloc_skb
netdev_err
netdev_info
netdev_refcnt_read
netdev_set_default_ethtool_ops
netdev_state_change
netdev_update_features
@ -1410,6 +1416,7 @@
of_genpd_add_provider_simple
of_get_child_by_name
of_get_cpu_node
of_get_drm_panel_display_mode
of_get_named_gpio
of_get_next_available_child
of_get_next_child
@ -1557,6 +1564,10 @@
pin_user_pages_fast
pin_user_pages_remote
pktgen_xfrm_outer_mode_output
pkvm_iommu_resume
pkvm_iommu_suspend
__pkvm_topup_hyp_alloc
__pkvm_topup_hyp_alloc_mgt
platform_bus_type
platform_device_add
platform_device_add_data
@ -1863,6 +1874,7 @@
seq_write
set_cpus_allowed_ptr
set_freezable
set_normalized_timespec64
set_page_dirty
set_page_dirty_lock
set_task_cpu
@ -2204,6 +2216,8 @@
__traceiter_android_rvh_set_user_nice_locked
__traceiter_android_rvh_tick_entry
__traceiter_android_rvh_uclamp_eff_get
__traceiter_android_rvh_ufs_complete_init
__traceiter_android_rvh_ufs_reprogram_all_keys
__traceiter_android_rvh_update_blocked_fair
__traceiter_android_rvh_update_load_avg
__traceiter_android_rvh_update_misfit_status
@ -2298,6 +2312,8 @@
__tracepoint_android_rvh_set_user_nice_locked
__tracepoint_android_rvh_tick_entry
__tracepoint_android_rvh_uclamp_eff_get
__tracepoint_android_rvh_ufs_complete_init
__tracepoint_android_rvh_ufs_reprogram_all_keys
__tracepoint_android_rvh_update_blocked_fair
__tracepoint_android_rvh_update_load_avg
__tracepoint_android_rvh_update_misfit_status

View File

@ -401,6 +401,7 @@
devm_reset_controller_register
devm_rtc_allocate_device
__devm_rtc_register_device
devm_snd_soc_register_component
devm_thermal_of_cooling_device_register
devm_thermal_of_zone_register
devm_usb_get_phy_by_node
@ -461,6 +462,7 @@
dma_buf_map_attachment
dma_buf_map_attachment_unlocked
dma_buf_put
dma_buf_set_name
dma_buf_unmap_attachment
dma_buf_unmap_attachment_unlocked
dma_contiguous_default_area
@ -516,11 +518,15 @@
driver_register
driver_set_override
driver_unregister
drm_add_edid_modes
drm_atomic_get_connector_state
drm_atomic_get_crtc_state
drm_atomic_get_new_private_obj_state
drm_atomic_get_old_private_obj_state
drm_atomic_get_private_obj_state
drm_atomic_helper_connector_destroy_state
drm_atomic_helper_connector_duplicate_state
drm_atomic_helper_connector_reset
__drm_atomic_helper_private_obj_duplicate_state
drm_atomic_helper_wait_for_vblanks
drm_atomic_private_obj_fini
@ -528,12 +534,18 @@
drm_atomic_state_default_clear
drm_atomic_state_default_release
drm_atomic_state_init
drm_bridge_add
drm_bridge_remove
drm_client_init
drm_client_modeset_commit_locked
drm_client_register
drm_connector_attach_encoder
drm_connector_cleanup
drm_connector_init
drm_connector_list_update
drm_connector_register
drm_connector_unregister
drm_connector_update_edid_property
drm_crtc_add_crc_entry
__drm_crtc_commit_free
drm_crtc_commit_wait
@ -544,6 +556,7 @@
__drm_dev_dbg
drm_dev_printk
drm_display_mode_from_cea_vic
drm_do_get_edid
drm_edid_dup
drm_edid_duplicate
drm_edid_free
@ -556,19 +569,24 @@
drm_framebuffer_unregister_private
drm_gem_mmap_obj
drm_get_connector_status_name
drm_helper_probe_single_connector_modes
drm_kms_helper_hotplug_event
drm_master_get
drm_master_put
drm_mode_convert_umode
drm_mode_copy
drm_mode_create_dp_colorspace_property
drm_mode_duplicate
drm_mode_is_420_only
drm_mode_object_put
drm_mode_probed_add
drm_mode_prune_invalid
drm_modeset_lock
drm_modeset_lock_single_interruptible
drm_mode_set_name
drm_modeset_unlock
drm_mode_sort
drm_mode_vrefresh
drm_object_property_set_value
drm_printf
__drm_printfn_debug
@ -729,6 +747,11 @@
gpio_free_array
gpio_request
gpio_to_desc
gunyah_rm_call
gunyah_rm_notifier_register
gunyah_rm_notifier_unregister
gunyah_rm_register_platform_ops
gunyah_rm_unregister_platform_ops
handle_bad_irq
handle_edge_irq
handle_fasteoi_ack_irq
@ -738,6 +761,7 @@
handle_simple_irq
handle_sysrq
hashlen_string
hdmi_audio_infoframe_init
hex_dump_to_buffer
housekeeping_cpumask
housekeeping_overridden
@ -1206,6 +1230,7 @@
of_drm_find_panel
of_find_compatible_node
of_find_device_by_node
of_find_mipi_dsi_host_by_node
of_find_node_by_name
of_find_node_by_phandle
of_find_node_opts_by_path
@ -1225,9 +1250,11 @@
of_get_property
of_get_regulator_init_data
of_get_required_opp_performance_state
of_graph_get_endpoint_by_regs
of_graph_get_next_endpoint
of_graph_get_port_parent
of_graph_get_remote_endpoint
of_graph_get_remote_port_parent
of_graph_is_present
of_graph_parse_endpoint
of_hwspin_lock_get_id
@ -1757,14 +1784,24 @@
skip_spaces
smp_call_function_single
smp_call_function_single_async
snd_ctl_add
snd_ctl_new1
snd_info_create_module_entry
snd_info_free_entry
snd_info_register
snd_pcm_add_chmap_ctls
snd_pcm_create_iec958_consumer_default
snd_pcm_fill_iec958_consumer
snd_pcm_fill_iec958_consumer_hw_params
snd_pcm_format_width
snd_pcm_hw_constraint_eld
_snd_pcm_hw_params_any
snd_soc_card_jack_new
snd_soc_component_exit_regmap
snd_soc_dapm_add_routes
snd_soc_dapm_new_widgets
snd_soc_get_pcm_runtime
snd_soc_jack_report
snd_soc_lookup_component
snd_soc_rtdcom_lookup
snd_usb_autoresume
@ -2000,6 +2037,7 @@
__traceiter_android_vh_scheduler_tick
__traceiter_android_vh_show_resume_epoch_val
__traceiter_android_vh_show_suspend_epoch_val
__traceiter_android_vh_thermal_pm_notify_suspend
__traceiter_android_vh_timer_calc_index
__traceiter_android_vh_try_fixup_sea
__traceiter_android_vh_try_to_unmap_one
@ -2102,6 +2140,7 @@
__tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_show_resume_epoch_val
__tracepoint_android_vh_show_suspend_epoch_val
__tracepoint_android_vh_thermal_pm_notify_suspend
__tracepoint_android_vh_timer_calc_index
__tracepoint_android_vh_try_fixup_sea
__tracepoint_android_vh_try_to_unmap_one
@ -2215,8 +2254,8 @@
up_read
up_write
usb_add_phy_dev
usb_alloc_dev
usb_alloc_coherent
usb_alloc_dev
usb_assign_descriptors
usb_composite_setup_continue
usb_decode_ctrl
@ -2267,6 +2306,7 @@
v4l2_m2m_register_media_controller
v4l2_m2m_request_queue
v4l2_m2m_unregister_media_controller
v4l2_s_ctrl
v4l2_subdev_call_wrappers
v4l2_subdev_init
vb2_create_bufs
@ -2332,8 +2372,3 @@
xhci_set_interrupter_moderation
xhci_stop_endpoint_sync
zap_vma_ptes
gunyah_rm_call
gunyah_rm_notifier_register
gunyah_rm_notifier_unregister
gunyah_rm_register_platform_ops
gunyah_rm_unregister_platform_ops

View File

@ -17,8 +17,13 @@
devm_extcon_register_notifier_all
devm_hwspin_lock_request_specific
_dev_info
dev_pm_opp_calc_power
dev_pm_opp_of_register_em
dev_set_name
__dynamic_netdev_dbg
em_dev_update_chip_binning
em_dev_update_perf_domain
em_pd_get
finish_wait
fortify_panic
idr_alloc

View File

@ -24,6 +24,7 @@
sched_setattr_nocheck
schedule_timeout_killable
set_blocksize
skb_orphan_partial
static_key_enable
submit_bh
__kmalloc_node
@ -49,6 +50,7 @@
__traceiter_android_vh_binder_restore_priority
__traceiter_android_vh_binder_special_task
__traceiter_android_vh_binder_wait_for_work
__traceiter_android_vh_blk_fill_rwbs
__traceiter_android_vh_cgroup_attach
__traceiter_android_vh_check_folio_look_around_ref
__traceiter_android_vh_check_nanosleep_syscall
@ -57,6 +59,7 @@
__traceiter_android_vh_configfs_uevent_work
__traceiter_android_vh_count_workingset_refault
__traceiter_android_vh_do_anonymous_page
__traceiter_android_vh_do_new_mount_fc
__traceiter_android_vh_do_swap_page
__traceiter_android_vh_do_wp_page
__traceiter_android_vh_dup_task_struct
@ -120,6 +123,7 @@
__traceiter_sched_waking
__traceiter_sys_exit
__traceiter_task_rename
__traceiter_tcp_retransmit_skb
__traceiter_workqueue_execute_end
__traceiter_workqueue_execute_start
__tracepoint_android_rvh_alloc_and_link_pwqs
@ -142,6 +146,7 @@
__tracepoint_android_vh_binder_restore_priority
__tracepoint_android_vh_binder_special_task
__tracepoint_android_vh_binder_wait_for_work
__tracepoint_android_vh_blk_fill_rwbs
__tracepoint_android_vh_cgroup_attach
__tracepoint_android_vh_check_folio_look_around_ref
__tracepoint_android_vh_check_nanosleep_syscall
@ -150,6 +155,7 @@
__tracepoint_android_vh_configfs_uevent_work
__tracepoint_android_vh_count_workingset_refault
__tracepoint_android_vh_do_anonymous_page
__tracepoint_android_vh_do_new_mount_fc
__tracepoint_android_vh_do_swap_page
__tracepoint_android_vh_do_wp_page
__tracepoint_android_vh_dup_task_struct
@ -213,6 +219,7 @@
__tracepoint_sched_waking
__tracepoint_sys_exit
__tracepoint_task_rename
__tracepoint_tcp_retransmit_skb
__tracepoint_workqueue_execute_end
__tracepoint_workqueue_execute_start
ucsi_send_command

View File

@ -88,6 +88,7 @@
__tracepoint_android_rvh_dequeue_task
cpuset_cpus_allowed
cpufreq_update_policy
cgroup_threadgroup_rwsem
#required by millet.ko
__traceiter_android_rvh_refrigerator

View File

@ -41,7 +41,7 @@ KBUILD_CFLAGS += -mgeneral-regs-only \
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
KBUILD_AFLAGS += $(compat_vdso)
KBUILD_RUSTFLAGS += --target=aarch64-unknown-none -Ctarget-feature="-neon"
KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
@ -110,7 +110,6 @@ endif
ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
KBUILD_CFLAGS += -ffixed-x18
KBUILD_RUSTFLAGS += -Ctarget-feature=+reserve-x18
endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)

View File

@ -130,8 +130,8 @@ void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
#endif
#ifdef __KVM_NVHE_HYPERVISOR__
void __pkvm_init_switch_pgd(struct kvm_nvhe_init_params *params,
void (*finalize_fn)(void));
void __pkvm_init_switch_pgd(phys_addr_t pgd, unsigned long sp,
void (*fn)(void));
int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
unsigned long *per_cpu_base, u32 hyp_va_bits);
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);

View File

@ -12,6 +12,7 @@ typedef void (*dyn_hcall_t)(struct user_pt_regs *);
struct kvm_hyp_iommu;
struct iommu_iotlb_gather;
struct kvm_hyp_iommu_domain;
struct kvm_iommu_paddr_cache;
#ifdef CONFIG_MODULES
enum pkvm_psci_notification {
@ -213,7 +214,7 @@ struct pkvm_module_ops {
void (*iommu_reclaim_pages_atomic)(void *p, u8 order);
int (*iommu_snapshot_host_stage2)(struct kvm_hyp_iommu_domain *domain);
int (*hyp_smp_processor_id)(void);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_USE(1, void (*iommu_flush_unmap_cache)(struct kvm_iommu_paddr_cache *cache));
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);

View File

@ -1188,7 +1188,7 @@ int __init early_brk64(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
#ifdef CONFIG_CFI_CLANG
if ((esr_brk_comment(esr) & ~CFI_BRK_IMM_MASK) == CFI_BRK_IMM_BASE)
if (esr_is_cfi_brk(esr))
return cfi_handler(regs, esr) != DBG_HOOK_HANDLED;
#endif
#ifdef CONFIG_KASAN_SW_TAGS

View File

@ -469,10 +469,15 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
}
static void print_nvhe_hyp_panic(const char *name, u64 panic_addr)
{
kvm_err("nVHE hyp %s at: [<%016llx>] %pB!\n", name, panic_addr,
(void *)(panic_addr + kaslr_offset()));
}
static void kvm_nvhe_report_cfi_failure(u64 panic_addr)
{
kvm_err("nVHE hyp CFI failure at: [<%016llx>] %pB!\n", panic_addr,
(void *)(panic_addr + kaslr_offset()));
print_nvhe_hyp_panic("CFI failure", panic_addr);
if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
kvm_err(" (CONFIG_CFI_PERMISSIVE ignored for hyp failures)\n");
@ -484,17 +489,11 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
u64 far, u64 hpfar)
{
u64 elr_in_kimg = __phys_to_kimg(elr_phys);
u64 kaslr_off = kaslr_offset();
u64 hyp_offset = elr_in_kimg - kaslr_off - elr_virt;
u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
u64 mode = spsr & PSR_MODE_MASK;
u64 panic_addr = elr_virt + hyp_offset;
u64 mod_addr = pkvm_el2_mod_kern_va(elr_virt);
if (mod_addr) {
panic_addr = mod_addr;
kaslr_off = 0;
}
if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
kvm_err("Invalid host exception to nVHE hyp!\n");
} else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
@ -513,14 +512,18 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
if (file)
kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
else if (mod_addr)
kvm_err("nVHE hyp BUG at: [<%016llx>] %pB!\n", mod_addr,
(void *)mod_addr);
else
kvm_err("nVHE hyp BUG at: [<%016llx>] %pB!\n", panic_addr,
(void *)(panic_addr + kaslr_off));
print_nvhe_hyp_panic("BUG", panic_addr);
} else if (IS_ENABLED(CONFIG_CFI_CLANG) && esr_is_cfi_brk(esr)) {
kvm_nvhe_report_cfi_failure(panic_addr);
} else if (mod_addr) {
kvm_err("nVHE hyp panic at: [<%016llx>] %pB!\n", mod_addr,
(void *)mod_addr);
} else {
kvm_err("nVHE hyp panic at: [<%016llx>] %pB!\n", panic_addr,
(void *)(panic_addr + kaslr_off));
print_nvhe_hyp_panic("panic", panic_addr);
}
/* Dump the nVHE hypervisor backtrace */

View File

@ -83,16 +83,15 @@ alternative_else_nop_endif
eret
sb
SYM_INNER_LABEL(__hyp_restore_elr_and_panic, SYM_L_GLOBAL)
// x0-x29,lr: hyp regs
SYM_INNER_LABEL(__guest_exit_restore_elr_and_panic, SYM_L_GLOBAL)
// x2-x29,lr: vcpu regs
// vcpu x0-x1 on the stack
stp x0, x1, [sp, #-16]!
adr_this_cpu x0, kvm_hyp_ctxt, x1
ldr x0, [x0, #CPU_ELR_EL2]
msr elr_el2, x0
ldp x0, x1, [sp], #16
SYM_INNER_LABEL(__hyp_panic, SYM_L_GLOBAL)
SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
// x2-x29,lr: vcpu regs
// vcpu x0-x1 on the stack
@ -110,7 +109,7 @@ SYM_INNER_LABEL(__hyp_panic, SYM_L_GLOBAL)
// accurate if the guest had been completely restored.
adr_this_cpu x0, kvm_hyp_ctxt, x1
adr_l x1, hyp_panic
str x1, [x0, #CPU_LR_OFFSET]
str x1, [x0, #CPU_XREG_OFFSET(30)]
get_vcpu_ptr x1, x0

View File

@ -122,10 +122,9 @@ el2_error:
eret
sb
.macro invalid_vector label, target = __hyp_panic
.macro invalid_vector label, target = __guest_exit_panic
.align 2
SYM_CODE_START_LOCAL(\label)
stp x0, x1, [sp, #-16]!
b \target
SYM_CODE_END(\label)
.endm

View File

@ -720,7 +720,7 @@ guest:
static inline void __kvm_unexpected_el2_exception(void)
{
extern char __hyp_restore_elr_and_panic[];
extern char __guest_exit_restore_elr_and_panic[];
unsigned long addr, fixup;
struct kvm_exception_table_entry *entry, *end;
unsigned long elr_el2 = read_sysreg(elr_el2);
@ -743,7 +743,7 @@ static inline void __kvm_unexpected_el2_exception(void)
/* Trigger a panic after restoring the hyp context. */
this_cpu_ptr(&kvm_hyp_ctxt)->sys_regs[ELR_EL2] = elr_el2;
write_sysreg(__hyp_restore_elr_and_panic, elr_el2);
write_sysreg(__guest_exit_restore_elr_and_panic, elr_el2);
}
#endif /* __ARM64_KVM_HYP_SWITCH_H__ */

View File

@ -6,6 +6,7 @@
#include <kvm/iommu.h>
#include <linux/io-pgtable.h>
#include <nvhe/spinlock.h>
#if IS_ENABLED(CONFIG_ARM_SMMU_V3_PKVM)
#include <linux/io-pgtable-arm.h>
@ -70,6 +71,30 @@ struct kvm_iommu_paddr_cache {
size_t pgsize[KVM_IOMMU_PADDR_CACHE_MAX];
};
void kvm_iommu_flush_unmap_cache(struct kvm_iommu_paddr_cache *cache);
static inline hyp_spinlock_t *kvm_iommu_get_lock(struct kvm_hyp_iommu *iommu)
{
/* See struct kvm_hyp_iommu */
BUILD_BUG_ON(sizeof(iommu->lock) != sizeof(hyp_spinlock_t));
return (hyp_spinlock_t *)(&iommu->lock);
}
static inline void kvm_iommu_lock_init(struct kvm_hyp_iommu *iommu)
{
hyp_spin_lock_init(kvm_iommu_get_lock(iommu));
}
static inline void kvm_iommu_lock(struct kvm_hyp_iommu *iommu)
{
hyp_spin_lock(kvm_iommu_get_lock(iommu));
}
static inline void kvm_iommu_unlock(struct kvm_hyp_iommu *iommu)
{
hyp_spin_unlock(kvm_iommu_get_lock(iommu));
}
/**
* struct kvm_iommu_ops - KVM iommu ops
* @init: init the driver called once before the kernel de-privilege

View File

@ -183,7 +183,7 @@ SYM_FUNC_END(__host_hvc)
.endif
.endm
.macro __host_el2_vect handler:req
.macro invalid_host_el2_vect
.align 7
/*
@ -196,14 +196,13 @@ SYM_FUNC_END(__host_hvc)
tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
/* If a guest is loaded, panic out of it. */
/*
* The panic may not be clean if the exception is taken before the host
* context has been saved by __host_exit or after the hyp context has
* been partially clobbered by __host_enter.
*/
stp x0, x1, [sp, #-16]!
b \handler
b hyp_panic
.L__hyp_sp_overflow\@:
/* Switch to the overflow stack */
@ -213,10 +212,6 @@ SYM_FUNC_END(__host_hvc)
ASM_BUG()
.endm
.macro host_el2_sync_vect
__host_el2_vect __hyp_panic
.endm
.macro invalid_host_el1_vect
.align 7
mov x0, xzr /* restore_host = false */
@ -226,10 +221,6 @@ SYM_FUNC_END(__host_hvc)
b __hyp_do_panic
.endm
.macro invalid_host_el2_vect
__host_el2_vect __hyp_panic
.endm
/*
* The host vector does not use an ESB instruction in order to avoid consuming
* SErrors that should only be consumed by the host. Guest entry is deferred by
@ -247,7 +238,7 @@ SYM_CODE_START(__kvm_hyp_host_vector)
invalid_host_el2_vect // FIQ EL2t
invalid_host_el2_vect // Error EL2t
host_el2_sync_vect // Synchronous EL2h
invalid_host_el2_vect // Synchronous EL2h
invalid_host_el2_vect // IRQ EL2h
invalid_host_el2_vect // FIQ EL2h
invalid_host_el2_vect // Error EL2h

View File

@ -278,39 +278,35 @@ alternative_else_nop_endif
SYM_CODE_END(__kvm_handle_stub_hvc)
/*
* void __pkvm_init_switch_pgd(struct kvm_nvhe_init_params *params,
* void (*finalize_fn)(void));
* void __pkvm_init_switch_pgd(phys_addr_t pgd, unsigned long sp,
* void (*fn)(void));
*
* SYM_TYPED_FUNC_START() allows C to call this ID-mapped function indirectly
* using a physical pointer without triggering a kCFI failure.
*/
SYM_TYPED_FUNC_START(__pkvm_init_switch_pgd)
/* Load the inputs from the VA pointer before turning the MMU off */
ldr x5, [x0, #NVHE_INIT_PGD_PA]
ldr x0, [x0, #NVHE_INIT_STACK_HYP_VA]
/* Turn the MMU off */
pre_disable_mmu_workaround
mrs x2, sctlr_el2
bic x3, x2, #SCTLR_ELx_M
msr sctlr_el2, x3
mrs x3, sctlr_el2
bic x4, x3, #SCTLR_ELx_M
msr sctlr_el2, x4
isb
tlbi alle2
/* Install the new pgtables */
phys_to_ttbr x4, x5
phys_to_ttbr x5, x0
alternative_if ARM64_HAS_CNP
orr x4, x4, #TTBR_CNP_BIT
orr x5, x5, #TTBR_CNP_BIT
alternative_else_nop_endif
msr ttbr0_el2, x4
msr ttbr0_el2, x5
/* Set the new stack pointer */
mov sp, x0
mov sp, x1
/* And turn the MMU back on! */
set_sctlr_el2 x2
ret x1
set_sctlr_el2 x3
ret x2
SYM_FUNC_END(__pkvm_init_switch_pgd)
.popsection

View File

@ -331,7 +331,7 @@ size_t kvm_iommu_map_pages(pkvm_handle_t domain_id, unsigned long iova,
* so far.
*/
if (pgcount)
__pkvm_host_unuse_dma(paddr, pgcount * pgsize);
__pkvm_host_unuse_dma(paddr + total_mapped, pgcount * pgsize);
domain_put(domain);
return total_mapped;
@ -380,7 +380,7 @@ void kvm_iommu_iotlb_gather_add_page(struct kvm_hyp_iommu_domain *domain,
kvm_iommu_iotlb_gather_add_range(gather, iova, size);
}
static void kvm_iommu_flush_unmap_cache(struct kvm_iommu_paddr_cache *cache)
void kvm_iommu_flush_unmap_cache(struct kvm_iommu_paddr_cache *cache)
{
while (cache->ptr) {
cache->ptr--;
@ -471,13 +471,13 @@ static int iommu_power_on(struct kvm_power_domain *pd)
bool prev;
int ret;
hyp_spin_lock(&iommu->lock);
kvm_iommu_lock(iommu);
prev = iommu->power_is_off;
iommu->power_is_off = false;
ret = kvm_iommu_ops->resume ? kvm_iommu_ops->resume(iommu) : 0;
if (ret)
iommu->power_is_off = prev;
hyp_spin_unlock(&iommu->lock);
kvm_iommu_unlock(iommu);
return ret;
}
@ -488,13 +488,13 @@ static int iommu_power_off(struct kvm_power_domain *pd)
bool prev;
int ret;
hyp_spin_lock(&iommu->lock);
kvm_iommu_lock(iommu);
prev = iommu->power_is_off;
iommu->power_is_off = true;
ret = kvm_iommu_ops->suspend ? kvm_iommu_ops->suspend(iommu) : 0;
if (ret)
iommu->power_is_off = prev;
hyp_spin_unlock(&iommu->lock);
kvm_iommu_unlock(iommu);
return ret;
}
@ -505,8 +505,7 @@ static const struct kvm_power_domain_ops iommu_power_ops = {
int kvm_iommu_init_device(struct kvm_hyp_iommu *iommu)
{
/* See struct kvm_hyp_iommu */
BUILD_BUG_ON(sizeof(u32) != sizeof(hyp_spinlock_t));
kvm_iommu_lock_init(iommu);
return pkvm_init_power_domain(&iommu->power_domain, &iommu_power_ops);
}

View File

@ -2826,7 +2826,7 @@ int __pkvm_host_reclaim_page(struct pkvm_hyp_vm *vm, u64 pfn, u64 ipa, u8 order)
case PKVM_PAGE_OWNED:
WARN_ON(__host_check_page_state_range(phys, page_size, PKVM_NOPAGE));
hyp_poison_page(phys);
psci_mem_protect_dec(order);
psci_mem_protect_dec(1 << order);
break;
case PKVM_PAGE_SHARED_BORROWED:
case PKVM_PAGE_SHARED_BORROWED | PKVM_PAGE_RESTRICTED_PROT:

View File

@ -152,6 +152,7 @@ const struct pkvm_module_ops module_ops = {
.iommu_reclaim_pages_atomic = kvm_iommu_reclaim_pages_atomic,
.iommu_snapshot_host_stage2 = kvm_iommu_snapshot_host_stage2,
.hyp_smp_processor_id = _hyp_smp_processor_id,
.iommu_flush_unmap_cache = kvm_iommu_flush_unmap_cache,
};
int __pkvm_init_module(void *module_init)

View File

@ -400,6 +400,7 @@ out:
int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
unsigned long *per_cpu_base, u32 hyp_va_bits)
{
struct kvm_nvhe_init_params *params;
void *virt = hyp_phys_to_virt(phys);
typeof(__pkvm_init_switch_pgd) *fn;
int ret;
@ -427,8 +428,9 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
update_nvhe_init_params();
/* Jump in the idmap page to switch to the new page-tables */
params = this_cpu_ptr(&kvm_init_params);
fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
fn(this_cpu_ptr(&kvm_init_params), __pkvm_init_finalise);
fn(params->pgd_pa, params->stack_hyp_va, __pkvm_init_finalise);
unreachable();
}

View File

@ -57,6 +57,7 @@ config CRYPTO_FIPS_VERSION
config CRYPTO_FIPS140_MOD
tristate "Enable FIPS 140 cryptographic module"
depends on ARM64
depends on CC_IS_CLANG
depends on m
select CRYPTO_FIPS140_MERGE_MOD_SECTIONS
help

View File

@ -696,18 +696,8 @@ static bool update_fips140_library_routines(void)
return ret == 0;
}
/*
* Initialize the FIPS 140 module.
*
* Note: this routine iterates over the contents of the initcall section, which
* consists of an array of function pointers that was emitted by the linker
* rather than the compiler. This means that these function pointers lack the
* usual CFI stubs that the compiler emits when CFI codegen is enabled. So
* let's disable CFI locally when handling the initcall array, to avoid
* surpises.
*/
static int __init __attribute__((__no_sanitize__("cfi")))
fips140_init(void)
/* Initialize the FIPS 140 module */
static int __init fips140_init(void)
{
const initcall_entry_t *initcall;
@ -720,7 +710,7 @@ fips140_init(void)
for (initcall = fips140_initcalls_start + 1;
initcall < &__fips140_initcalls_end;
initcall++) {
int (*init)(void) = offset_to_ptr(initcall);
initcall_t init = offset_to_ptr(initcall);
int err = init();
/*

View File

@ -139,8 +139,6 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
int minor, ret;
struct dentry *dentry, *root;
rust_binder_device device = NULL;
char *name = NULL;
size_t name_len;
struct inode *inode = NULL;
struct super_block *sb = ref_inode->i_sb;
struct binderfs_info *info = sb->s_fs_info;
@ -168,13 +166,8 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
ret = -ENOMEM;
req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
name_len = strlen(req->name);
/* Make sure to include terminating NUL byte */
name = kmemdup(req->name, name_len + 1, GFP_KERNEL);
if (!name)
goto err;
device = rust_binder_new_device(name);
device = rust_binder_new_device(req->name);
if (!device)
goto err;
@ -202,7 +195,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
inode_lock(d_inode(root));
/* look it up */
dentry = lookup_one_len(name, root, name_len);
dentry = lookup_one_len(req->name, root, strlen(req->name));
if (IS_ERR(dentry)) {
inode_unlock(d_inode(root));
ret = PTR_ERR(dentry);
@ -225,7 +218,6 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
return 0;
err:
kfree(name);
rust_binder_remove_device(device);
mutex_lock(&binderfs_minors_mutex);
--info->device_count;

View File

@ -848,9 +848,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
alloc->buffer = vma->vm_start;
alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
if (alloc->pages == NULL) {
ret = -ENOMEM;
failure_string = "alloc page array";
@ -881,7 +881,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
return 0;
err_alloc_buf_struct_failed:
kfree(alloc->pages);
kvfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
alloc->buffer = 0;
@ -953,7 +953,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
__free_page(alloc->pages[i].page_ptr);
page_count++;
}
kfree(alloc->pages);
kvfree(alloc->pages);
}
spin_unlock(&alloc->lock);
if (alloc->mm)

View File

@ -31,7 +31,6 @@
#include <trace/hooks/madvise.h>
#include <trace/hooks/iommu.h>
#include <trace/hooks/net.h>
#include <trace/hooks/pm_domain.h>
#include <trace/hooks/cpuidle_psci.h>
#include <trace/hooks/vmscan.h>
#include <trace/hooks/avc.h>
@ -80,8 +79,6 @@
* Export tracepoints that act as a bare tracehook (ie: have no trace event
* associated with them) to allow external modules to probe them.
*/
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sk_alloc);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sk_free);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_tcp_sendmsg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_tcp_recvmsg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_udp_sendmsg);
@ -138,6 +135,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freq_table_limits);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_resolve_freq);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_fast_switch);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_target);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_online);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_attach);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_online);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuinfo_c_show);
@ -146,6 +144,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_mem_available_adjust);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_meminfo_adjust);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_thermal_pm_notify_suspend);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_fill_prdt);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ufs_reprogram_all_keys);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ufs_complete_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_prepare_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_update_sysfs);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_command);
@ -161,7 +161,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_uic_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_tm_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_check_int_errors);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_update_sdev);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_clock_scaling);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cgroup_attach);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_setup_dma_ops);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_alloc_insert_iova);
@ -173,7 +172,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_psci_cpu_suspend);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_iovad_init_alloc_algo);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_limit_align_shift);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ptype_head);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_allow_domain_state);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpuidle_psci_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpuidle_psci_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpufreq_transition);
@ -264,7 +262,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sha256);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_aes_expandkey);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_aes_encrypt);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_aes_decrypt);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmc_update_mmc_queue);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_downgrade_wake_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_wake_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_meminfo_proc_show);
@ -376,6 +373,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_percpu_rwsem_down_read);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_percpu_rwsem_up_write);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_percpu_rwsem_wait_complete);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_read_trylock_failed);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sk_alloc);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sk_free);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sd_init_unmap_multi_segment);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sd_setup_unmap_multi_segment);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_scan_type);
@ -421,6 +420,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_tsk_need_resched_lazy);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_usb_dev_suspend);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_usb_dev_resume);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_bd_link_disk_holder);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_new_mount_fc);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_fill_rwbs);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_use_amu_fie);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_resume_begin);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_resume_end);

View File

@ -12,8 +12,6 @@
#include <linux/cpumask.h>
#include <linux/ktime.h>
#include <trace/hooks/pm_domain.h>
static int dev_update_qos_constraint(struct device *dev, void *data)
{
s64 *constraint_ns_p = data;
@ -181,11 +179,6 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
struct pm_domain_data *pdd;
s64 min_off_time_ns;
s64 off_on_time_ns;
bool allow = true;
trace_android_vh_allow_domain_state(genpd, state, &allow);
if (!allow)
return false;
off_on_time_ns = genpd->states[state].power_off_latency_ns +
genpd->states[state].power_on_latency_ns;

View File

@ -1497,6 +1497,8 @@ static int cpufreq_online(unsigned int cpu)
goto out_destroy_policy;
}
trace_android_vh_cpufreq_online(policy);
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
}

View File

@ -41,6 +41,7 @@ extern const struct pkvm_module_ops *mod_ops;
#define kvm_iommu_donate_pages_atomic(x) CALL_FROM_OPS(iommu_donate_pages_atomic, x)
#define kvm_iommu_reclaim_pages_atomic(x, y) CALL_FROM_OPS(iommu_reclaim_pages_atomic, x, y)
#define kvm_iommu_snapshot_host_stage2(x) CALL_FROM_OPS(iommu_snapshot_host_stage2, x)
#define kvm_iommu_flush_unmap_cache(x) CALL_FROM_OPS(iommu_flush_unmap_cache, x)
#endif
#endif /* __ARM_SMMU_V3_MODULE__ */

View File

@ -551,13 +551,13 @@ static void smmu_tlb_flush_all(void *cookie)
hyp_read_lock(&smmu_domain->lock);
list_for_each_entry(iommu_node, &smmu_domain->iommu_list, list) {
smmu = to_smmu(iommu_node->iommu);
hyp_spin_lock(&smmu->iommu.lock);
kvm_iommu_lock(&smmu->iommu);
if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on) {
hyp_spin_unlock(&smmu->iommu.lock);
kvm_iommu_unlock(&smmu->iommu);
continue;
}
WARN_ON(smmu_send_cmd(smmu, &cmd));
hyp_spin_unlock(&smmu->iommu.lock);
kvm_iommu_unlock(&smmu->iommu);
}
hyp_read_unlock(&smmu_domain->lock);
}
@ -572,7 +572,7 @@ static int smmu_tlb_inv_range_smmu(struct hyp_arm_smmu_v3_device *smmu,
size_t inv_range = granule;
struct hyp_arm_smmu_v3_domain *smmu_domain = domain->priv;
hyp_spin_lock(&smmu->iommu.lock);
kvm_iommu_lock(&smmu->iommu);
if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on)
goto out_ret;
@ -633,7 +633,7 @@ static int smmu_tlb_inv_range_smmu(struct hyp_arm_smmu_v3_device *smmu,
ret = smmu_sync_cmd(smmu);
out_ret:
hyp_spin_unlock(&smmu->iommu.lock);
kvm_iommu_unlock(&smmu->iommu);
return ret;
}
@ -997,7 +997,7 @@ static int smmu_attach_dev(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_dom
struct domain_iommu_node *iommu_node = NULL;
hyp_write_lock(&smmu_domain->lock);
hyp_spin_lock(&iommu->lock);
kvm_iommu_lock(iommu);
dst = smmu_get_ste_ptr(smmu, sid);
if (!dst)
goto out_unlock;
@ -1087,7 +1087,7 @@ static int smmu_attach_dev(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_dom
out_unlock:
if (ret && iommu_node)
hyp_free(iommu_node);
hyp_spin_unlock(&iommu->lock);
kvm_iommu_unlock(iommu);
hyp_write_unlock(&smmu_domain->lock);
return ret;
}
@ -1103,7 +1103,7 @@ static int smmu_detach_dev(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_dom
u64 *cd_table, *cd;
hyp_write_lock(&smmu_domain->lock);
hyp_spin_lock(&iommu->lock);
kvm_iommu_lock(iommu);
dst = smmu_get_ste_ptr(smmu, sid);
if (!dst)
goto out_unlock;
@ -1145,7 +1145,7 @@ static int smmu_detach_dev(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_dom
smmu_put_ref_domain(smmu, smmu_domain);
out_unlock:
hyp_spin_unlock(&iommu->lock);
kvm_iommu_unlock(iommu);
hyp_write_unlock(&smmu_domain->lock);
return ret;
}
@ -1300,14 +1300,20 @@ static void kvm_iommu_unmap_walker(struct io_pgtable_ctxt *ctxt)
struct kvm_iommu_walk_data *data = (struct kvm_iommu_walk_data *)ctxt->arg;
struct kvm_iommu_paddr_cache *cache = data->cache;
cache->paddr[cache->ptr] = ctxt->addr;
cache->pgsize[cache->ptr++] = ctxt->size;
/*
* It is guaranteed unmap is called with max of the cache size,
* see kvm_iommu_unmap_pages()
*/
WARN_ON(cache->ptr == KVM_IOMMU_PADDR_CACHE_MAX);
cache->paddr[cache->ptr] = ctxt->addr;
cache->pgsize[cache->ptr++] = ctxt->size;
/* Make more space. */
if(cache->ptr == KVM_IOMMU_PADDR_CACHE_MAX) {
/* Must invalidate TLB first. */
smmu_iotlb_sync(data->cookie, data->iotlb_gather);
iommu_iotlb_gather_init(data->iotlb_gather);
kvm_iommu_flush_unmap_cache(cache);
}
}
static size_t smmu_unmap_pages(struct kvm_hyp_iommu_domain *domain, unsigned long iova,

View File

@ -46,7 +46,6 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include <trace/hooks/mmc.h>
#include <linux/uaccess.h>
@ -3030,7 +3029,6 @@ static int mmc_blk_probe(struct mmc_card *card)
ret = PTR_ERR(md);
goto out_free;
}
trace_android_vh_mmc_update_mmc_queue(card, &md->queue);
ret = mmc_blk_alloc_parts(card, md);
if (ret)

View File

@ -1538,6 +1538,12 @@ static int thermal_pm_notify(struct notifier_block *nb,
list_for_each_entry(tz, &thermal_tz_list, node) {
mutex_lock(&tz->lock);
trace_android_vh_thermal_pm_notify_suspend(tz, &irq_wakeable);
if (irq_wakeable) {
mutex_unlock(&tz->lock);
continue;
}
tz->suspended = true;
mutex_unlock(&tz->lock);
@ -1553,11 +1559,13 @@ static int thermal_pm_notify(struct notifier_block *nb,
list_for_each_entry(tz, &thermal_tz_list, node) {
mutex_lock(&tz->lock);
tz->suspended = false;
trace_android_vh_thermal_pm_notify_suspend(tz, &irq_wakeable);
if (irq_wakeable)
if (irq_wakeable) {
mutex_unlock(&tz->lock);
continue;
}
tz->suspended = false;
thermal_zone_device_init(tz);
__thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);

View File

@ -6,6 +6,9 @@
#include <ufs/ufshcd.h>
#include "ufshcd-crypto.h"
#undef CREATE_TRACE_POINTS
#include <trace/hooks/ufshcd.h>
/* Blk-crypto modes supported by UFS crypto */
static const struct ufs_crypto_alg_entry {
enum ufs_crypto_alg ufs_alg;
@ -122,7 +125,13 @@ bool ufshcd_crypto_enable(struct ufs_hba *hba)
return false;
/* Reset might clear all keys, so reprogram all the keys. */
blk_crypto_reprogram_all_keys(&hba->crypto_profile);
if (hba->crypto_profile.num_slots) {
int err = -EOPNOTSUPP;
trace_android_rvh_ufs_reprogram_all_keys(hba, &err);
if (err == -EOPNOTSUPP)
blk_crypto_reprogram_all_keys(&hba->crypto_profile);
}
if (hba->android_quirks & UFSHCD_ANDROID_QUIRK_BROKEN_CRYPTO_ENABLE)
return false;

View File

@ -1396,8 +1396,6 @@ static int ufshcd_devfreq_target(struct device *dev,
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
unsigned long irq_flags;
bool force_out = false;
bool force_scaling = false;
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
@ -1430,11 +1428,8 @@ static int ufshcd_devfreq_target(struct device *dev,
scale_up = *freq == clki->max_freq;
if (!scale_up)
*freq = clki->min_freq;
trace_android_vh_ufs_clock_scaling(hba, &force_out, &force_scaling, &scale_up);
/* Update the frequency */
if (force_out || (!force_scaling && !ufshcd_is_devfreq_scaling_required(hba, scale_up))) {
if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
ret = 0;
goto out; /* no state change required */
@ -8850,6 +8845,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
trace_android_rvh_ufs_complete_init(hba);
out:
spin_lock_irqsave(hba->host->host_lock, flags);
if (ret)

View File

@ -2189,7 +2189,6 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
{
unsigned long flags;
int ret;
u32 reg;
@ -2247,9 +2246,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
dwc3_otg_host_init(dwc);
} else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_resume(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
}
break;

View File

@ -6,7 +6,11 @@
# Rewritten to use lists instead of if-statements.
#
ifdef CONFIG_GKI_HACKS_TO_FIX
subdir-ccflags-y += -DANDROID_GKI_VFS_EXPORT_ONLY=VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
else
subdir-ccflags-y += -DANDROID_GKI_VFS_EXPORT_ONLY=""
endif
obj-y := open.o read_write.o file_table.o super.o \
char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \

View File

@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_EROFS_FS) += erofs.o
erofs-objs := super.o inode.o data.o namei.o dir.o utils.o sysfs.o
erofs-objs := super.o inode.o data.o namei.o dir.o sysfs.o
erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o pcpubuf.o
erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o zutil.o
erofs-$(CONFIG_EROFS_FS_ZIP_LZMA) += decompressor_lzma.o
erofs-$(CONFIG_EROFS_FS_ZIP_DEFLATE) += decompressor_deflate.o
erofs-$(CONFIG_EROFS_FS_ONDEMAND) += fscache.o

View File

@ -11,13 +11,12 @@
struct z_erofs_decompress_req {
struct super_block *sb;
struct page **in, **out;
unsigned short pageofs_in, pageofs_out;
unsigned int inputsize, outputsize;
/* indicate the algorithm will be used for decompression */
unsigned int alg;
unsigned int alg; /* the algorithm for decompression */
bool inplace_io, partial_decoding, fillgaps;
gfp_t gfp; /* allocation flags for extra temporary buffers */
};
struct z_erofs_decompressor {

View File

@ -55,7 +55,7 @@ static int z_erofs_load_lz4_config(struct super_block *sb,
sbi->lz4.max_distance_pages = distance ?
DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
LZ4_MAX_DISTANCE_PAGES;
return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
return z_erofs_gbuf_growsize(sbi->lz4.max_pclusterblks);
}
/*
@ -112,8 +112,9 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
victim = availables[--top];
get_page(victim);
} else {
victim = erofs_allocpage(pagepool,
GFP_KERNEL | __GFP_NOFAIL);
victim = __erofs_allocpage(pagepool, rq->gfp, true);
if (!victim)
return -ENOMEM;
set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
}
rq->out[i] = victim;
@ -159,7 +160,7 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
docopy:
/* Or copy compressed data which can be overlapped to per-CPU buffer */
in = rq->in;
src = erofs_get_pcpubuf(ctx->inpages);
src = z_erofs_get_gbuf(ctx->inpages);
if (!src) {
DBG_BUGON(1);
kunmap_local(inpage);
@ -266,7 +267,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
} else if (maptype == 1) {
vm_unmap_ram(src, ctx->inpages);
} else if (maptype == 2) {
erofs_put_pcpubuf(src);
z_erofs_put_gbuf(src);
} else if (maptype != 3) {
DBG_BUGON(1);
return -EFAULT;

View File

@ -99,7 +99,7 @@ failed:
}
int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
struct page **pagepool)
struct page **pgpl)
{
const unsigned int nrpages_out =
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
@ -162,8 +162,12 @@ again:
strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs);
outsz -= strm->z.avail_out;
if (!rq->out[no]) {
rq->out[no] = erofs_allocpage(pagepool,
GFP_KERNEL | __GFP_NOFAIL);
rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
if (!rq->out[no]) {
kout = NULL;
err = -ENOMEM;
break;
}
set_page_private(rq->out[no],
Z_EROFS_SHORTLIVED_PAGE);
}
@ -215,8 +219,11 @@ again:
DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb),
rq->in[j]));
tmppage = erofs_allocpage(pagepool,
GFP_KERNEL | __GFP_NOFAIL);
tmppage = erofs_allocpage(pgpl, rq->gfp);
if (!tmppage) {
err = -ENOMEM;
goto failed;
}
set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
copy_highpage(tmppage, rq->in[j]);
rq->in[j] = tmppage;
@ -234,7 +241,7 @@ again:
break;
}
}
failed:
if (zlib_inflateEnd(&strm->z) != Z_OK && !err)
err = -EIO;
if (kout)

View File

@ -151,7 +151,7 @@ again:
}
int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
struct page **pagepool)
struct page **pgpl)
{
const unsigned int nrpages_out =
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
@ -218,8 +218,11 @@ again:
PAGE_SIZE - pageofs);
outlen -= strm->buf.out_size;
if (!rq->out[no] && rq->fillgaps) { /* deduped */
rq->out[no] = erofs_allocpage(pagepool,
GFP_KERNEL | __GFP_NOFAIL);
rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
if (!rq->out[no]) {
err = -ENOMEM;
break;
}
set_page_private(rq->out[no],
Z_EROFS_SHORTLIVED_PAGE);
}
@ -261,8 +264,11 @@ again:
DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb),
rq->in[j]));
tmppage = erofs_allocpage(pagepool,
GFP_KERNEL | __GFP_NOFAIL);
tmppage = erofs_allocpage(pgpl, rq->gfp);
if (!tmppage) {
err = -ENOMEM;
goto failed;
}
set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
copy_highpage(tmppage, rq->in[j]);
rq->in[j] = tmppage;
@ -280,6 +286,7 @@ again:
break;
}
}
failed:
if (no < nrpages_out && strm->buf.out)
kunmap(rq->out[no]);
if (ni < nrpages_in)

View File

@ -438,7 +438,11 @@ void erofs_unregister_sysfs(struct super_block *sb);
int __init erofs_init_sysfs(void);
void erofs_exit_sysfs(void);
struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp);
struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv);
static inline struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
{
return __erofs_allocpage(pagepool, gfp, false);
}
static inline void erofs_pagepool_add(struct page **pagepool, struct page *page)
{
set_page_private(page, (unsigned long)*pagepool);
@ -463,11 +467,11 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
struct erofs_workgroup *egrp);
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
int flags);
void *erofs_get_pcpubuf(unsigned int requiredpages);
void erofs_put_pcpubuf(void *ptr);
int erofs_pcpubuf_growsize(unsigned int nrpages);
void __init erofs_pcpubuf_init(void);
void erofs_pcpubuf_exit(void);
void *z_erofs_get_gbuf(unsigned int requiredpages);
void z_erofs_put_gbuf(void *ptr);
int z_erofs_gbuf_growsize(unsigned int nrpages);
int __init z_erofs_gbuf_init(void);
void z_erofs_gbuf_exit(void);
int erofs_init_managed_cache(struct super_block *sb);
int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb);
#else
@ -477,8 +481,8 @@ static inline int erofs_init_shrinker(void) { return 0; }
static inline void erofs_exit_shrinker(void) {}
static inline int z_erofs_init_zip_subsystem(void) { return 0; }
static inline void z_erofs_exit_zip_subsystem(void) {}
static inline void erofs_pcpubuf_init(void) {}
static inline void erofs_pcpubuf_exit(void) {}
static inline int z_erofs_gbuf_init(void) { return 0; }
static inline void z_erofs_gbuf_exit(void) {}
static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
#endif /* !CONFIG_EROFS_FS_ZIP */

View File

@ -1,148 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Gao Xiang <xiang@kernel.org>
*
* For low-latency decompression algorithms (e.g. lz4), reserve consecutive
* per-CPU virtual memory (in pages) in advance to store such inplace I/O
* data if inplace decompression is failed (due to unmet inplace margin for
* example).
*/
#include "internal.h"
struct erofs_pcpubuf {
raw_spinlock_t lock;
void *ptr;
struct page **pages;
unsigned int nrpages;
};
static DEFINE_PER_CPU(struct erofs_pcpubuf, erofs_pcb);
void *erofs_get_pcpubuf(unsigned int requiredpages)
__acquires(pcb->lock)
{
struct erofs_pcpubuf *pcb = &get_cpu_var(erofs_pcb);
raw_spin_lock(&pcb->lock);
/* check if the per-CPU buffer is too small */
if (requiredpages > pcb->nrpages) {
raw_spin_unlock(&pcb->lock);
put_cpu_var(erofs_pcb);
/* (for sparse checker) pretend pcb->lock is still taken */
__acquire(pcb->lock);
return NULL;
}
return pcb->ptr;
}
void erofs_put_pcpubuf(void *ptr) __releases(pcb->lock)
{
struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, smp_processor_id());
DBG_BUGON(pcb->ptr != ptr);
raw_spin_unlock(&pcb->lock);
put_cpu_var(erofs_pcb);
}
/* the next step: support per-CPU page buffers hotplug */
int erofs_pcpubuf_growsize(unsigned int nrpages)
{
static DEFINE_MUTEX(pcb_resize_mutex);
static unsigned int pcb_nrpages;
struct page *pagepool = NULL;
int delta, cpu, ret, i;
mutex_lock(&pcb_resize_mutex);
delta = nrpages - pcb_nrpages;
ret = 0;
/* avoid shrinking pcpubuf, since no idea how many fses rely on */
if (delta <= 0)
goto out;
for_each_possible_cpu(cpu) {
struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
struct page **pages, **oldpages;
void *ptr, *old_ptr;
pages = kmalloc_array(nrpages, sizeof(*pages), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
break;
}
for (i = 0; i < nrpages; ++i) {
pages[i] = erofs_allocpage(&pagepool, GFP_KERNEL);
if (!pages[i]) {
ret = -ENOMEM;
oldpages = pages;
goto free_pagearray;
}
}
ptr = vmap(pages, nrpages, VM_MAP, PAGE_KERNEL);
if (!ptr) {
ret = -ENOMEM;
oldpages = pages;
goto free_pagearray;
}
raw_spin_lock(&pcb->lock);
old_ptr = pcb->ptr;
pcb->ptr = ptr;
oldpages = pcb->pages;
pcb->pages = pages;
i = pcb->nrpages;
pcb->nrpages = nrpages;
raw_spin_unlock(&pcb->lock);
if (!oldpages) {
DBG_BUGON(old_ptr);
continue;
}
if (old_ptr)
vunmap(old_ptr);
free_pagearray:
while (i)
erofs_pagepool_add(&pagepool, oldpages[--i]);
kfree(oldpages);
if (ret)
break;
}
pcb_nrpages = nrpages;
erofs_release_pages(&pagepool);
out:
mutex_unlock(&pcb_resize_mutex);
return ret;
}
void __init erofs_pcpubuf_init(void)
{
int cpu;
for_each_possible_cpu(cpu) {
struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
raw_spin_lock_init(&pcb->lock);
}
}
void erofs_pcpubuf_exit(void)
{
int cpu, i;
for_each_possible_cpu(cpu) {
struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
if (pcb->ptr) {
vunmap(pcb->ptr);
pcb->ptr = NULL;
}
if (!pcb->pages)
continue;
for (i = 0; i < pcb->nrpages; ++i)
if (pcb->pages[i])
put_page(pcb->pages[i]);
kfree(pcb->pages);
pcb->pages = NULL;
}
}

View File

@ -856,7 +856,10 @@ static int __init erofs_module_init(void)
if (err)
goto deflate_err;
erofs_pcpubuf_init();
err = z_erofs_gbuf_init();
if (err)
goto gbuf_err;
err = z_erofs_init_zip_subsystem();
if (err)
goto zip_err;
@ -876,6 +879,8 @@ fs_err:
sysfs_err:
z_erofs_exit_zip_subsystem();
zip_err:
z_erofs_gbuf_exit();
gbuf_err:
z_erofs_deflate_exit();
deflate_err:
z_erofs_lzma_exit();
@ -899,7 +904,7 @@ static void __exit erofs_module_exit(void)
z_erofs_lzma_exit();
erofs_exit_shrinker();
kmem_cache_destroy(erofs_inode_cachep);
erofs_pcpubuf_exit();
z_erofs_gbuf_exit();
}
static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)

View File

@ -82,6 +82,9 @@ struct z_erofs_pcluster {
/* L: indicate several pageofs_outs or not */
bool multibases;
/* L: whether extra buffer allocations are best-effort */
bool besteffort;
/* A: compressed bvecs (can be cached or inplaced pages) */
struct z_erofs_bvec compressed_bvecs[];
};
@ -960,7 +963,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
}
static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
struct page *page)
struct page *page, bool ra)
{
struct inode *const inode = fe->inode;
struct erofs_map_blocks *const map = &fe->map;
@ -1010,6 +1013,7 @@ repeat:
err = z_erofs_pcluster_begin(fe);
if (err)
goto out;
fe->pcl->besteffort |= !ra;
}
/*
@ -1285,6 +1289,9 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
.inplace_io = overlapped,
.partial_decoding = pcl->partial,
.fillgaps = pcl->multibases,
.gfp = pcl->besteffort ?
GFP_KERNEL | __GFP_NOFAIL :
GFP_NOWAIT | __GFP_NORETRY
}, be->pagepool);
out:
@ -1328,6 +1335,7 @@ out:
pcl->length = 0;
pcl->partial = true;
pcl->multibases = false;
pcl->besteffort = false;
pcl->bvset.nextpage = NULL;
pcl->vcnt = 0;
@ -1797,7 +1805,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
if (PageUptodate(page))
unlock_page(page);
else
(void)z_erofs_do_read_page(f, page);
(void)z_erofs_do_read_page(f, page, !!rac);
put_page(page);
}
@ -1818,7 +1826,7 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
z_erofs_pcluster_readmore(&f, NULL, true);
err = z_erofs_do_read_page(&f, &folio->page);
err = z_erofs_do_read_page(&f, &folio->page, false);
z_erofs_pcluster_readmore(&f, NULL, false);
z_erofs_pcluster_end(&f);
@ -1859,7 +1867,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
folio = head;
head = folio_get_private(folio);
err = z_erofs_do_read_page(&f, &folio->page);
err = z_erofs_do_read_page(&f, &folio->page, true);
if (err && err != -EINTR)
erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
folio->index, EROFS_I(inode)->nid);

View File

@ -5,16 +5,185 @@
*/
#include "internal.h"
struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
struct z_erofs_gbuf {
spinlock_t lock;
void *ptr;
struct page **pages;
unsigned int nrpages;
};
static struct z_erofs_gbuf *z_erofs_gbufpool, *z_erofs_rsvbuf;
static unsigned int z_erofs_gbuf_count, z_erofs_gbuf_nrpages,
z_erofs_rsv_nrpages;
module_param_named(global_buffers, z_erofs_gbuf_count, uint, 0444);
module_param_named(reserved_pages, z_erofs_rsv_nrpages, uint, 0444);
static atomic_long_t erofs_global_shrink_cnt; /* for all mounted instances */
/* protected by 'erofs_sb_list_lock' */
static unsigned int shrinker_run_no;
/* protects the mounted 'erofs_sb_list' */
static DEFINE_SPINLOCK(erofs_sb_list_lock);
static LIST_HEAD(erofs_sb_list);
static unsigned int z_erofs_gbuf_id(void)
{
return raw_smp_processor_id() % z_erofs_gbuf_count;
}
void *z_erofs_get_gbuf(unsigned int requiredpages)
__acquires(gbuf->lock)
{
struct z_erofs_gbuf *gbuf;
gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()];
spin_lock(&gbuf->lock);
/* check if the buffer is too small */
if (requiredpages > gbuf->nrpages) {
spin_unlock(&gbuf->lock);
/* (for sparse checker) pretend gbuf->lock is still taken */
__acquire(gbuf->lock);
return NULL;
}
return gbuf->ptr;
}
void z_erofs_put_gbuf(void *ptr) __releases(gbuf->lock)
{
struct z_erofs_gbuf *gbuf;
gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()];
DBG_BUGON(gbuf->ptr != ptr);
spin_unlock(&gbuf->lock);
}
int z_erofs_gbuf_growsize(unsigned int nrpages)
{
static DEFINE_MUTEX(gbuf_resize_mutex);
struct page **tmp_pages = NULL;
struct z_erofs_gbuf *gbuf;
void *ptr, *old_ptr;
int last, i, j;
mutex_lock(&gbuf_resize_mutex);
/* avoid shrinking gbufs, since no idea how many fses rely on */
if (nrpages <= z_erofs_gbuf_nrpages) {
mutex_unlock(&gbuf_resize_mutex);
return 0;
}
for (i = 0; i < z_erofs_gbuf_count; ++i) {
gbuf = &z_erofs_gbufpool[i];
tmp_pages = kcalloc(nrpages, sizeof(*tmp_pages), GFP_KERNEL);
if (!tmp_pages)
goto out;
for (j = 0; j < gbuf->nrpages; ++j)
tmp_pages[j] = gbuf->pages[j];
do {
last = j;
j = alloc_pages_bulk_array(GFP_KERNEL, nrpages,
tmp_pages);
if (last == j)
goto out;
} while (j != nrpages);
ptr = vmap(tmp_pages, nrpages, VM_MAP, PAGE_KERNEL);
if (!ptr)
goto out;
spin_lock(&gbuf->lock);
kfree(gbuf->pages);
gbuf->pages = tmp_pages;
old_ptr = gbuf->ptr;
gbuf->ptr = ptr;
gbuf->nrpages = nrpages;
spin_unlock(&gbuf->lock);
if (old_ptr)
vunmap(old_ptr);
}
z_erofs_gbuf_nrpages = nrpages;
out:
if (i < z_erofs_gbuf_count && tmp_pages) {
for (j = 0; j < nrpages; ++j)
if (tmp_pages[j] && tmp_pages[j] != gbuf->pages[j])
__free_page(tmp_pages[j]);
kfree(tmp_pages);
}
mutex_unlock(&gbuf_resize_mutex);
return i < z_erofs_gbuf_count ? -ENOMEM : 0;
}
int __init z_erofs_gbuf_init(void)
{
unsigned int i, total = num_possible_cpus();
if (z_erofs_gbuf_count)
total = min(z_erofs_gbuf_count, total);
z_erofs_gbuf_count = total;
/* The last (special) global buffer is the reserved buffer */
total += !!z_erofs_rsv_nrpages;
z_erofs_gbufpool = kcalloc(total, sizeof(*z_erofs_gbufpool),
GFP_KERNEL);
if (!z_erofs_gbufpool)
return -ENOMEM;
if (z_erofs_rsv_nrpages) {
z_erofs_rsvbuf = &z_erofs_gbufpool[total - 1];
z_erofs_rsvbuf->pages = kcalloc(z_erofs_rsv_nrpages,
sizeof(*z_erofs_rsvbuf->pages), GFP_KERNEL);
if (!z_erofs_rsvbuf->pages) {
z_erofs_rsvbuf = NULL;
z_erofs_rsv_nrpages = 0;
}
}
for (i = 0; i < total; ++i)
spin_lock_init(&z_erofs_gbufpool[i].lock);
return 0;
}
void z_erofs_gbuf_exit(void)
{
int i, j;
for (i = 0; i < z_erofs_gbuf_count + (!!z_erofs_rsvbuf); ++i) {
struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i];
if (gbuf->ptr) {
vunmap(gbuf->ptr);
gbuf->ptr = NULL;
}
if (!gbuf->pages)
continue;
for (j = 0; j < gbuf->nrpages; ++j)
if (gbuf->pages[j])
put_page(gbuf->pages[j]);
kfree(gbuf->pages);
gbuf->pages = NULL;
}
kfree(z_erofs_gbufpool);
}
struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv)
{
struct page *page = *pagepool;
if (page) {
DBG_BUGON(page_ref_count(page) != 1);
*pagepool = (struct page *)page_private(page);
} else {
page = alloc_page(gfp);
} else if (tryrsv && z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages) {
spin_lock(&z_erofs_rsvbuf->lock);
if (z_erofs_rsvbuf->nrpages)
page = z_erofs_rsvbuf->pages[--z_erofs_rsvbuf->nrpages];
spin_unlock(&z_erofs_rsvbuf->lock);
}
if (!page)
page = alloc_page(gfp);
DBG_BUGON(page && page_ref_count(page) != 1);
return page;
}
@ -24,14 +193,22 @@ void erofs_release_pages(struct page **pagepool)
struct page *page = *pagepool;
*pagepool = (struct page *)page_private(page);
/* try to fill reserved global pool first */
if (z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages <
z_erofs_rsv_nrpages) {
spin_lock(&z_erofs_rsvbuf->lock);
if (z_erofs_rsvbuf->nrpages < z_erofs_rsv_nrpages) {
z_erofs_rsvbuf->pages[z_erofs_rsvbuf->nrpages++]
= page;
spin_unlock(&z_erofs_rsvbuf->lock);
continue;
}
spin_unlock(&z_erofs_rsvbuf->lock);
}
put_page(page);
}
}
#ifdef CONFIG_EROFS_FS_ZIP
/* global shrink count (for all mounted EROFS instances) */
static atomic_long_t erofs_global_shrink_cnt;
static bool erofs_workgroup_get(struct erofs_workgroup *grp)
{
if (lockref_get_not_zero(&grp->lockref))
@ -171,13 +348,6 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
return freed;
}
/* protected by 'erofs_sb_list_lock' */
static unsigned int shrinker_run_no;
/* protects the mounted 'erofs_sb_list' */
static DEFINE_SPINLOCK(erofs_sb_list_lock);
static LIST_HEAD(erofs_sb_list);
void erofs_shrinker_register(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
@ -279,4 +449,3 @@ void erofs_exit_shrinker(void)
{
unregister_shrinker(&erofs_shrinker_info);
}
#endif /* !CONFIG_EROFS_FS_ZIP */

View File

@ -1100,7 +1100,7 @@ retry:
struct bio *bio = NULL;
ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
&last_block_in_bio, false, true);
&last_block_in_bio, NULL, true);
f2fs_put_rpages(cc);
f2fs_destroy_compress_ctx(cc, true);
if (ret)

View File

@ -2072,12 +2072,17 @@ static inline loff_t f2fs_readpage_limit(struct inode *inode)
return i_size_read(inode);
}
static inline blk_opf_t f2fs_ra_op_flags(struct readahead_control *rac)
{
return rac ? REQ_RAHEAD : 0;
}
static int f2fs_read_single_page(struct inode *inode, struct folio *folio,
unsigned nr_pages,
struct f2fs_map_blocks *map,
struct bio **bio_ret,
sector_t *last_block_in_bio,
bool is_readahead)
struct readahead_control *rac)
{
struct bio *bio = *bio_ret;
const unsigned blocksize = blks_to_bytes(inode, 1);
@ -2159,7 +2164,7 @@ submit_and_realloc:
}
if (bio == NULL) {
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
is_readahead ? REQ_RAHEAD : 0, index,
f2fs_ra_op_flags(rac), index,
false);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
@ -2196,7 +2201,7 @@ out:
#ifdef CONFIG_F2FS_FS_COMPRESSION
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
unsigned nr_pages, sector_t *last_block_in_bio,
bool is_readahead, bool for_write)
struct readahead_control *rac, bool for_write)
{
struct dnode_of_data dn;
struct inode *inode = cc->inode;
@ -2319,7 +2324,7 @@ submit_and_realloc:
if (!bio) {
bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
is_readahead ? REQ_RAHEAD : 0,
f2fs_ra_op_flags(rac),
page->index, for_write);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
@ -2417,7 +2422,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
ret = f2fs_read_multi_pages(&cc, &bio,
max_nr_pages,
&last_block_in_bio,
rac != NULL, false);
rac, false);
f2fs_destroy_compress_ctx(&cc, false);
if (ret)
goto set_error_page;
@ -2467,7 +2472,7 @@ next_page:
ret = f2fs_read_multi_pages(&cc, &bio,
max_nr_pages,
&last_block_in_bio,
rac != NULL, false);
rac, false);
f2fs_destroy_compress_ctx(&cc, false);
}
}

View File

@ -4304,7 +4304,7 @@ void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
unsigned int llen, unsigned int c_len);
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
unsigned nr_pages, sector_t *last_block_in_bio,
bool is_readahead, bool for_write);
struct readahead_control *rac, bool for_write);
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
bool in_task);

View File

@ -35,6 +35,7 @@
#include "pnode.h"
#include "internal.h"
#include <trace/hooks/blk.h>
/* Maximum number of mounts in a mount namespace */
static unsigned int sysctl_mount_max __read_mostly = 100000;
@ -3289,6 +3290,8 @@ static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
unlock_mount(mp);
if (error < 0)
mntput(mnt);
else
trace_android_vh_do_new_mount_fc(mountpoint, mnt);
return error;
}

View File

@ -5,11 +5,6 @@
#include <asm/kvm_host.h>
#include <kvm/power_domain.h>
#include <linux/io-pgtable.h>
#ifdef __KVM_NVHE_HYPERVISOR__
#include <nvhe/spinlock.h>
#endif
#define HYP_SPINLOCK_SIZE 4
/*
* Domain ID for identity mapped domain that the host can attach
@ -31,10 +26,10 @@
*/
struct kvm_hyp_iommu {
struct kvm_power_domain power_domain;
#ifdef __KVM_NVHE_HYPERVISOR__
hyp_spinlock_t lock;
#ifndef __GENKSYMS__
u32 lock; /* lock size verified in kvm_iommu_get_lock. */
#else
u32 unused; /* HYP_SPINLOCK_SIZE verified at build time. */
u32 unused;
#endif
bool power_is_off;
ANDROID_KABI_RESERVE(1);

View File

@ -10,7 +10,7 @@
#include <linux/buffer_head.h>
#include <linux/tracepoint.h>
#define RWBS_LEN 8
#define RWBS_LEN 10
#ifdef CONFIG_BUFFER_HEAD
DECLARE_EVENT_CLASS(block_buffer,

View File

@ -16,6 +16,16 @@ struct gendisk;
DECLARE_HOOK(android_vh_bd_link_disk_holder,
TP_PROTO(struct block_device *bdev, struct gendisk *disk),
TP_ARGS(bdev, disk));
DECLARE_HOOK(android_vh_blk_fill_rwbs,
TP_PROTO(char *rwbs, unsigned int opf),
TP_ARGS(rwbs, opf));
struct path;
struct vfsmount;
DECLARE_HOOK(android_vh_do_new_mount_fc,
TP_PROTO(struct path *mountpoint, struct vfsmount *mnt),
TP_ARGS(mountpoint, mnt));
#endif /* _TRACE_HOOK_BLK_H */
/* This part must be outside protection */

View File

@ -43,6 +43,10 @@ DECLARE_RESTRICTED_HOOK(android_rvh_cpufreq_transition,
TP_PROTO(struct cpufreq_policy *policy),
TP_ARGS(policy), 1);
DECLARE_HOOK(android_vh_cpufreq_online,
TP_PROTO(struct cpufreq_policy *policy),
TP_ARGS(policy));
#endif /* _TRACE_HOOK_CPUFREQ_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -6,19 +6,12 @@
#if !defined(_TRACE_HOOK_MMC_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_MMC_H
#include <trace/hooks/vendor_hooks.h>
struct mmc_card;
struct mmc_queue;
struct blk_mq_queue_data;
struct mmc_host;
/*
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
DECLARE_HOOK(android_vh_mmc_update_mmc_queue,
TP_PROTO(struct mmc_card *card, struct mmc_queue *mq),
TP_ARGS(card, mq));
DECLARE_HOOK(android_vh_mmc_sdio_pm_flag_set,
TP_PROTO(struct mmc_host *host),
TP_ARGS(host));

View File

@ -18,10 +18,6 @@ struct sock;
struct msghdr;
struct sk_buff;
struct net_device;
DECLARE_RESTRICTED_HOOK(android_rvh_sk_alloc,
TP_PROTO(struct sock *sock), TP_ARGS(sock), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_sk_free,
TP_PROTO(struct sock *sock), TP_ARGS(sock), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_tcp_sendmsg,
TP_PROTO(struct sock *sk, struct msghdr *msg, size_t len),
TP_ARGS(sk, msg, len), 1);
@ -64,6 +60,10 @@ DECLARE_HOOK(android_vh_tcp_write_timeout_estab_retrans,
TP_PROTO(struct sock *sk), TP_ARGS(sk));
DECLARE_HOOK(android_vh_tcp_connect,
TP_PROTO(struct sk_buff *skb), TP_ARGS(skb));
DECLARE_HOOK(android_vh_sk_alloc,
TP_PROTO(struct sock *sk), TP_ARGS(sk));
DECLARE_HOOK(android_vh_sk_free,
TP_PROTO(struct sock *sk), TP_ARGS(sk));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_NET_VH_H */

View File

@ -1,20 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM pm_domain
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_PM_DOMAIN_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_PM_DOMAIN_H
#include <trace/hooks/vendor_hooks.h>
struct generic_pm_domain;
DECLARE_HOOK(android_vh_allow_domain_state,
TP_PROTO(struct generic_pm_domain *genpd, uint32_t idx, bool *allow),
TP_ARGS(genpd, idx, allow))
#endif /* _TRACE_HOOK_PM_DOMAIN_H */
#include <trace/define_trace.h>

View File

@ -454,10 +454,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_update_blocked_fair,
TP_PROTO(struct rq *rq),
TP_ARGS(rq), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_update_load_sum,
TP_PROTO(struct sched_avg *sa, u64 *delta, unsigned int *sched_pelt_lshift),
TP_ARGS(sa, delta, sched_pelt_lshift), 1);
struct sched_attr;
DECLARE_HOOK(android_vh_set_sugov_sched_attr,

View File

@ -18,6 +18,14 @@ DECLARE_HOOK(android_vh_ufs_fill_prdt,
unsigned int segments, int *err),
TP_ARGS(hba, lrbp, segments, err));
DECLARE_RESTRICTED_HOOK(android_rvh_ufs_reprogram_all_keys,
TP_PROTO(struct ufs_hba *hba, int *err),
TP_ARGS(hba, err), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_ufs_complete_init,
TP_PROTO(struct ufs_hba *hba),
TP_ARGS(hba), 1);
DECLARE_HOOK(android_vh_ufs_prepare_command,
TP_PROTO(struct ufs_hba *hba, struct request *rq,
struct ufshcd_lrb *lrbp, int *err),
@ -88,9 +96,6 @@ DECLARE_HOOK(android_vh_ufs_err_check_ctrl,
bool *err_check),
TP_ARGS(hba, err_check));
DECLARE_HOOK(android_vh_ufs_clock_scaling,
TP_PROTO(struct ufs_hba *hba, bool *force_out, bool *force_scaling, bool *scale_up),
TP_ARGS(hba, force_out, force_scaling, scale_up));
#endif /* _TRACE_HOOK_UFSHCD_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -3,7 +3,11 @@
# Makefile for the linux kernel.
#
ifdef CONFIG_GKI_HACKS_TO_FIX
CFLAGS_cred.o = -DANDROID_GKI_VFS_EXPORT_ONLY=VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
else
CFLAGS_cred.o = -DANDROID_GKI_VFS_EXPORT_ONLY=""
endif
obj-y = fork.o exec_domain.o panic.o \
cpu.o exit.o softirq.o resource.o \
sysctl.o capability.o ptrace.o user.o \

View File

@ -115,6 +115,7 @@ static DEFINE_SPINLOCK(cgroup_idr_lock);
static DEFINE_SPINLOCK(cgroup_file_kn_lock);
DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
EXPORT_SYMBOL_GPL(cgroup_threadgroup_rwsem);
#define cgroup_assert_mutex_or_rcu_locked() \
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
@ -4941,6 +4942,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
spin_unlock_irq(&css_set_lock);
}
EXPORT_SYMBOL_GPL(css_task_iter_start);
/**
* css_task_iter_next - return the next task for the iterator
@ -4974,6 +4976,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
return it->cur_task;
}
EXPORT_SYMBOL_GPL(css_task_iter_next);
/**
* css_task_iter_end - finish task iteration
@ -4996,6 +4999,7 @@ void css_task_iter_end(struct css_task_iter *it)
if (it->cur_task)
put_task_struct(it->cur_task);
}
EXPORT_SYMBOL_GPL(css_task_iter_end);
static void cgroup_procs_release(struct kernfs_open_file *of)
{

View File

@ -1377,7 +1377,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/*
* lock for writing
*/
static inline int __down_write_common(struct rw_semaphore *sem, int state)
static __always_inline int __down_write_common(struct rw_semaphore *sem, int state)
{
int ret = 0;
@ -1390,12 +1390,12 @@ static inline int __down_write_common(struct rw_semaphore *sem, int state)
return ret;
}
static inline void __down_write(struct rw_semaphore *sem)
static __always_inline void __down_write(struct rw_semaphore *sem)
{
__down_write_common(sem, TASK_UNINTERRUPTIBLE);
}
static inline int __down_write_killable(struct rw_semaphore *sem)
static __always_inline int __down_write_killable(struct rw_semaphore *sem)
{
return __down_write_common(sem, TASK_KILLABLE);
}

View File

@ -7161,7 +7161,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
void *key)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU|WF_ANDROID_VENDOR));
return try_to_wake_up(curr->private, mode, wake_flags);
}
EXPORT_SYMBOL(default_wake_function);

View File

@ -9138,7 +9138,7 @@ static int detach_tasks(struct lb_env *env)
case migrate_util:
util = task_util_est(p);
if (util > env->imbalance)
if (shr_bound(util, env->sd->nr_balance_failed) > env->imbalance)
goto next;
env->imbalance -= util;

View File

@ -24,8 +24,6 @@
* Author: Vincent Guittot <vincent.guittot@linaro.org>
*/
#include <trace/hooks/sched.h>
/*
* Approximate:
* val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
@ -204,8 +202,6 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
sa->last_update_time += delta << 10;
trace_android_rvh_update_load_sum(sa, &delta, &sched_pelt_lshift);
/*
* running is a subset of runnable (weight) so running can't be set if
* runnable is clear. But there are some corner cases where the current

View File

@ -1294,10 +1294,12 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
return ERR_PTR(-EOPNOTSUPP);
/*
* Checking the privilege here on file->f_cred implies that a privileged user
* could open the file and delegate the write to an unprivileged one.
* Checking the privilege on file->f_cred or selinux enabled here imply
* that a privileged user could open the file and delegate the write
* to an unprivileged one.
*/
privileged = cap_raised(file->f_cred->cap_effective, CAP_SYS_RESOURCE);
privileged = cap_raised(file->f_cred->cap_effective, CAP_SYS_RESOURCE) ||
IS_ENABLED(CONFIG_DEFAULT_SECURITY_SELINUX);
if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
state = PSI_IO_SOME + res * 2;
@ -1656,11 +1658,11 @@ static int __init psi_proc_init(void)
{
if (psi_enable) {
proc_mkdir("pressure", NULL);
proc_create("pressure/io", 0666, NULL, &psi_io_proc_ops);
proc_create("pressure/memory", 0666, NULL, &psi_memory_proc_ops);
proc_create("pressure/cpu", 0666, NULL, &psi_cpu_proc_ops);
proc_create("pressure/io", 0, NULL, &psi_io_proc_ops);
proc_create("pressure/memory", 0, NULL, &psi_memory_proc_ops);
proc_create("pressure/cpu", 0, NULL, &psi_cpu_proc_ops);
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
proc_create("pressure/irq", 0666, NULL, &psi_irq_proc_ops);
proc_create("pressure/irq", 0, NULL, &psi_irq_proc_ops);
#endif
}
return 0;

View File

@ -2208,6 +2208,8 @@ static inline int task_on_rq_migrating(struct task_struct *p)
#define WF_MIGRATED 0x20 /* Internal use, task got migrated */
#define WF_CURRENT_CPU 0x40 /* Prefer to move the wakee to the current CPU. */
#define WF_ANDROID_VENDOR 0x1000 /* Vendor specific for Android */
#ifdef CONFIG_SMP
static_assert(WF_EXEC == SD_BALANCE_EXEC);
static_assert(WF_FORK == SD_BALANCE_FORK);

View File

@ -112,7 +112,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_context_switch);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmput);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_attach_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_detach_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_load_sum);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_remove_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_blocked_fair);

View File

@ -23,6 +23,7 @@
#include "../../block/blk.h"
#include <trace/events/block.h>
#include <trace/hooks/blk.h>
#include "trace_output.h"
@ -1911,6 +1912,7 @@ void blk_fill_rwbs(char *rwbs, blk_opf_t opf)
rwbs[i++] = 'M';
rwbs[i] = '\0';
trace_android_vh_blk_fill_rwbs(rwbs, opf);
}
EXPORT_SYMBOL_GPL(blk_fill_rwbs);

View File

@ -68,6 +68,9 @@
#undef CREATE_TRACE_POINTS
#include <trace/hooks/mm.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(mm_page_alloc);
EXPORT_TRACEPOINT_SYMBOL_GPL(mm_page_free);
/* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
typedef int __bitwise fpi_t;

View File

@ -1950,6 +1950,7 @@ struct vmap_block {
struct list_head free_list;
struct rcu_head rcu_head;
struct list_head purge;
unsigned int cpu;
};
/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
@ -2078,7 +2079,15 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
return ERR_PTR(err);
}
vbq = raw_cpu_ptr(&vmap_block_queue);
/*
* list_add_tail_rcu could happened in another core
* rather than vb->cpu due to task migration, which
* is safe as list_add_tail_rcu will ensure the list's
* integrity together with list_for_each_rcu from read
* side.
*/
vb->cpu = raw_smp_processor_id();
vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu);
spin_lock(&vbq->lock);
list_add_tail_rcu(&vb->free_list, &vbq->free);
spin_unlock(&vbq->lock);
@ -2104,9 +2113,11 @@ static void free_vmap_block(struct vmap_block *vb)
}
static bool purge_fragmented_block(struct vmap_block *vb,
struct vmap_block_queue *vbq, struct list_head *purge_list,
bool force_purge)
struct list_head *purge_list, bool force_purge)
{
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue,
vb->cpu);
if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
vb->dirty == VMAP_BBMAP_BITS)
return false;
@ -2154,7 +2165,7 @@ static void purge_fragmented_blocks(int cpu)
continue;
spin_lock(&vb->lock);
purge_fragmented_block(vb, vbq, &purge, true);
purge_fragmented_block(vb, &purge, true);
spin_unlock(&vb->lock);
}
rcu_read_unlock();
@ -2291,7 +2302,7 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
* not purgeable, check whether there is dirty
* space to be flushed.
*/
if (!purge_fragmented_block(vb, vbq, &purge_list, false) &&
if (!purge_fragmented_block(vb, &purge_list, false) &&
vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
unsigned long va_start = vb->va->va_start;
unsigned long s, e;

View File

@ -69,3 +69,5 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(net_dev_queue);
EXPORT_TRACEPOINT_SYMBOL_GPL(net_dev_xmit);
EXPORT_TRACEPOINT_SYMBOL_GPL(netif_receive_skb);
EXPORT_TRACEPOINT_SYMBOL_GPL(netif_rx);
EXPORT_TRACEPOINT_SYMBOL_GPL(tcp_retransmit_skb);

View File

@ -2104,8 +2104,6 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
if (security_sk_alloc(sk, family, priority))
goto out_free;
trace_android_rvh_sk_alloc(sk);
if (!try_module_get(prot->owner))
goto out_free_sec;
}
@ -2114,7 +2112,6 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
out_free_sec:
security_sk_free(sk);
trace_android_rvh_sk_free(sk);
out_free:
if (slab != NULL)
kmem_cache_free(slab, sk);
@ -2133,8 +2130,8 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
cgroup_sk_free(&sk->sk_cgrp_data);
mem_cgroup_sk_free(sk);
trace_android_vh_sk_free(sk);
security_sk_free(sk);
trace_android_rvh_sk_free(sk);
if (slab != NULL)
kmem_cache_free(slab, sk);
else
@ -2178,6 +2175,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
refcount_set(&sk->sk_wmem_alloc, 1);
mem_cgroup_sk_alloc(sk);
trace_android_vh_sk_alloc(sk);
cgroup_sk_alloc(&sk->sk_cgrp_data);
sock_update_classid(&sk->sk_cgrp_data);
sock_update_netprioidx(&sk->sk_cgrp_data);

View File

@ -436,7 +436,7 @@ $(obj)/core.o: private rustc_objcopy = $(foreach sym,$(redirect-intrinsics),--re
$(obj)/core.o: private rustc_target_flags = $(core-cfgs)
$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
$(call if_changed_dep,rustc_library)
ifdef CONFIG_X86_64
ifneq ($(or $(CONFIG_X86_64),$(CONFIG_ARM64)),)
$(obj)/core.o: scripts/target.json
endif

View File

@ -12,7 +12,7 @@ hostprogs-always-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE) += insert-sys-cert
hostprogs-always-$(CONFIG_RUST_KERNEL_DOCTESTS) += rustdoc_test_builder
hostprogs-always-$(CONFIG_RUST_KERNEL_DOCTESTS) += rustdoc_test_gen
ifdef CONFIG_X86_64
ifneq ($(or $(CONFIG_X86_64),$(CONFIG_ARM64)),)
always-$(CONFIG_RUST) += target.json
filechk_rust_target = $< < include/config/auto.conf

View File

@ -26,7 +26,11 @@ enum Value {
type Object = Vec<(String, Value)>;
fn comma_sep<T>(seq: &[T], formatter: &mut Formatter<'_>, f: impl Fn(&mut Formatter<'_>, &T) -> Result) -> Result {
fn comma_sep<T>(
seq: &[T],
formatter: &mut Formatter<'_>,
f: impl Fn(&mut Formatter<'_>, &T) -> Result,
) -> Result {
if let [ref rest @ .., ref last] = seq[..] {
for v in rest {
f(formatter, v)?;
@ -52,8 +56,9 @@ impl Display for Value {
}
Value::Object(object) => {
formatter.write_str("{")?;
comma_sep(&object[..], formatter, |formatter, v|
write!(formatter, "\"{}\": {}", v.0, v.1))?;
comma_sep(&object[..], formatter, |formatter, v| {
write!(formatter, "\"{}\": {}", v.0, v.1)
})?;
formatter.write_str("}")
}
}
@ -90,7 +95,7 @@ impl From<Object> for Value {
}
}
impl <T: Into<Value>, const N: usize> From<[T; N]> for Value {
impl<T: Into<Value>, const N: usize> From<[T; N]> for Value {
fn from(i: [T; N]) -> Self {
Self::Array(i.into_iter().map(|v| v.into()).collect())
}
@ -168,7 +173,20 @@ fn main() {
// `llvm-target`s are taken from `scripts/Makefile.clang`.
if cfg.has("ARM64") {
panic!("arm64 uses the builtin rustc aarch64-unknown-none target");
ts.push("arch", "aarch64");
ts.push(
"data-layout",
"e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128",
);
ts.push("disable-redzone", true);
let mut features = "+v8a,+strict-align,+neon,+fp-armv8".to_string();
if cfg.has("SHADOW_CALL_STACK") {
features += ",+reserve-x18";
}
ts.push("features", features);
ts.push("llvm-target", "aarch64-linux-gnu");
ts.push("supported-sanitizers", ["kcfi"]);
ts.push("target-pointer-width", "64");
} else if cfg.has("X86_64") {
ts.push("arch", "x86_64");
ts.push(

View File

@ -165,14 +165,6 @@ if [ "$rust_bindings_generator_cversion" -lt "$rust_bindings_generator_min_cvers
echo >&2 "***"
exit 1
fi
if [ "$rust_bindings_generator_cversion" -gt "$rust_bindings_generator_min_cversion" ]; then
echo >&2 "***"
echo >&2 "*** Rust bindings generator '$BINDGEN' is too new. This may or may not work."
echo >&2 "*** Your version: $rust_bindings_generator_version"
echo >&2 "*** Expected version: $rust_bindings_generator_min_version"
echo >&2 "***"
warning=1
fi
# Check that the `libclang` used by the Rust bindings generator is suitable.
#

View File

@ -102,8 +102,12 @@ long outofrange_freq[NUM_FREQ_OUTOFRANGE] = {
1000 * SHIFTED_PPM,
};
#ifndef LONG_MAX
#define LONG_MAX (~0UL>>1)
#endif
#ifndef LONG_MIN
#define LONG_MIN (-LONG_MAX - 1)
#endif
long invalid_freq[NUM_FREQ_INVALID] = {
LONG_MAX,

View File

@ -55,14 +55,20 @@ static struct vdso_info
ELF(Verdef) *verdef;
} vdso_info;
/* Straight from the ELF specification. */
static unsigned long elf_hash(const unsigned char *name)
/*
* Straight from the ELF specification...and then tweaked slightly, in order to
* avoid a few clang warnings.
*/
static unsigned long elf_hash(const char *name)
{
unsigned long h = 0, g;
while (*name)
const unsigned char *uch_name = (const unsigned char *)name;
while (*uch_name)
{
h = (h << 4) + *name++;
if (g = h & 0xf0000000)
h = (h << 4) + *uch_name++;
g = h & 0xf0000000;
if (g)
h ^= g >> 24;
h &= ~g;
}

View File

@ -18,7 +18,7 @@
#include "parse_vdso.h"
/* We need a libc functions... */
/* We need some libc functions... */
int strcmp(const char *a, const char *b)
{
/* This implementation is buggy: it never returns -1. */
@ -34,6 +34,20 @@ int strcmp(const char *a, const char *b)
return 0;
}
/*
* The clang build needs this, although gcc does not.
* Stolen from lib/string.c.
*/
void *memcpy(void *dest, const void *src, size_t count)
{
char *tmp = dest;
const char *s = src;
while (count--)
*tmp++ = *s++;
return dest;
}
/* ...and two syscalls. This is x86-specific. */
static inline long x86_syscall3(long nr, long a0, long a1, long a2)
{
@ -70,7 +84,7 @@ void to_base10(char *lastdig, time_t n)
}
}
__attribute__((externally_visible)) void c_main(void **stack)
void c_main(void **stack)
{
/* Parse the stack */
long argc = (long)*stack;