Merge remote-tracking branch 'aosp/android16-6.12' into nxp-linux-sdk/lf-6.12.y_android

Change-Id: I130651205cb7ff1ddc645348052a5974aa20550c
This commit is contained in:
Zhipeng Wang 2025-08-01 11:13:32 +09:00
commit ad1f2a7f75
112 changed files with 7832 additions and 715 deletions

View File

@ -195,10 +195,12 @@ filegroup(
"gki/aarch64/symbols/galaxy",
"gki/aarch64/symbols/honor",
"gki/aarch64/symbols/imx",
"gki/aarch64/symbols/lenovo",
"gki/aarch64/symbols/mtk",
"gki/aarch64/symbols/mtktv",
"gki/aarch64/symbols/oplus",
"gki/aarch64/symbols/pixel",
"gki/aarch64/symbols/pixel_watch",
"gki/aarch64/symbols/qcom",
"gki/aarch64/symbols/rtkstb",
"gki/aarch64/symbols/rtktv",

View File

@ -1 +0,0 @@
per-file sysfs-fs-f2fs=file:/fs/f2fs/OWNERS

View File

@ -4759,6 +4759,11 @@
pcmv= [HW,PCMCIA] BadgePAD 4
pcp_thp_order= [MM]
Specify the order of the pcp used by THP.
The specified value must be no greater than HPAGE_PMD_ORDER
(default) and greater than 3 (PAGE_ALLOC_COSTLY_ORDER).
pd_ignore_unused
[PM]
Keep all power-domains already enabled by bootloader on,

View File

@ -1 +0,0 @@
per-file f2fs**=file:/fs/f2fs/OWNERS

29
OWNERS
View File

@ -1,21 +1,12 @@
# The full list of approvers is defined in
# https://android.googlesource.com/kernel/common/+/refs/meta/config/OWNERS
set noparent
# The following OWNERS are defined at the top level to improve the OWNERS
# suggestions through any user interface. Consider those people the ones that
# can help with finding the best person to review.
adelva@google.com
gregkh@google.com
joneslee@google.com
maennich@google.com
saravanak@google.com
surenb@google.com
tkjos@google.com
vmartensson@google.com
willdeacon@google.com
# GKI Dr. No Enforcement is active on this branch. Approval of one of the Dr.
# No reviewers is required following a regular CodeReview+2 vote of a code
# reviewer.
#
# See the GKI release documentation (go/gki-dr-no) for further details.
#
# The expanded list of reviewers can be found at:
# https://android.googlesource.com/kernel/common/+/android-mainline/OWNERS_DrNo
# Test mapping changes can be made by anyone
per-file */TEST_MAPPING = *
# Test config xml can be made by anyone
per-file */*.xml = *
include kernel/common:android-mainline:/OWNERS_DrNo

View File

@ -1 +0,0 @@
include ../arm64/OWNERS

View File

@ -1,4 +0,0 @@
per-file crypto/**=file:/crypto/OWNERS
per-file {include,kernel,kvm,lib}/**=mzyngier@google.com,willdeacon@google.com
per-file mm/**=file:/mm/OWNERS
per-file net/**=file:/net/OWNERS

View File

@ -9,12 +9,13 @@ CONFIG_RCU_BOOST=y
CONFIG_RCU_NOCB_CPU=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_TIME_NS is not set
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
# CONFIG_RD_GZIP is not set
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
@ -22,21 +23,22 @@ CONFIG_MEMCG=y
# CONFIG_RD_LZO is not set
# CONFIG_RD_ZSTD is not set
CONFIG_BOOT_CONFIG=y
CONFIG_EXPERT=y
CONFIG_PROFILING=y
CONFIG_KEXEC_FILE=y
CONFIG_ARM64_VA_BITS_39=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=32
CONFIG_PARAVIRT_TIME_ACCOUNTING=y
CONFIG_KEXEC_FILE=y
CONFIG_ARM64_SW_TTBR0_PAN=y
# CONFIG_ARM64_BTI_KERNEL is not set
CONFIG_RANDOMIZE_BASE=y
# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set
CONFIG_UNWIND_PATCH_PAC_INTO_SCS=y
CONFIG_CMDLINE="stack_depot_disable=on kasan.stacktrace=off cgroup_disable=pressure ioremap_guard panic=-1 bootconfig"
CONFIG_CMDLINE_EXTEND=y
# CONFIG_EFI is not set
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
# CONFIG_SUSPEND is not set
CONFIG_VIRTUALIZATION=y
CONFIG_JUMP_LABEL=y
CONFIG_SHADOW_CALL_STACK=y
@ -55,43 +57,49 @@ CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
# CONFIG_ZONE_DMA is not set
# CONFIG_ZONE_DMA32 is not set
CONFIG_ANON_VMA_NAME=y
CONFIG_USERFAULTFD=y
CONFIG_LRU_GEN=y
CONFIG_NET=y
CONFIG_UNIX=y
CONFIG_INET=y
# CONFIG_INET_DIAG is not set
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
# CONFIG_TCP_CONG_CUBIC is not set
# CONFIG_TCP_CONG_WESTWOOD is not set
# CONFIG_TCP_CONG_HTCP is not set
# CONFIG_IPV6 is not set
CONFIG_VSOCKETS=y
CONFIG_VIRTIO_VSOCKETS=y
# CONFIG_WIRELESS is not set
# CONFIG_ETHTOOL_NETLINK is not set
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCIEAER=y
CONFIG_PCI_IOV=y
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCIE_DW_PLAT_EP=y
CONFIG_PCIE_KIRIN=y
CONFIG_PCIE_DW_PLAT_EP=y
CONFIG_PCI_ENDPOINT=y
CONFIG_FW_LOADER_USER_HELPER=y
# CONFIG_FW_CACHE is not set
# CONFIG_FW_LOADER is not set
CONFIG_ARM_SCMI_PROTOCOL=y
# CONFIG_ARM_SCMI_POWER_DOMAIN is not set
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_VIRTIO_BLK=y
CONFIG_OPEN_DICE=y
CONFIG_VCPU_STALL_DETECTOR=y
CONFIG_MD=y
# CONFIG_MD_BITMAP_FILE is not set
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
# CONFIG_DM_USER is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_LEGACY_PTYS is not set
@ -103,7 +111,6 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=0
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_NULL_TTY=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_CCTRNG=y
# CONFIG_DEVMEM is not set
# CONFIG_DEVPORT is not set
@ -111,23 +118,25 @@ CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_CORE=y
# CONFIG_HID is not set
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EDAC=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
CONFIG_RTC_DRV_PL030=y
CONFIG_RTC_DRV_PL031=y
CONFIG_DMABUF_HEAPS=y
CONFIG_DMABUF_SYSFS_STATS=y
CONFIG_UIO=y
CONFIG_VIRT_DRIVERS=y
CONFIG_ARM_PKVM_GUEST=y
CONFIG_GUNYAH_GUEST=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_STAGING=y
# CONFIG_SURFACE_PLATFORMS is not set
CONFIG_HWSPINLOCK=y
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_ARM_SCMI_POWER_DOMAIN is not set
# CONFIG_ANDROID_KABI_RESERVE is not set
# CONFIG_ANDROID_VENDOR_OEM_DATA is not set
CONFIG_EXT4_FS=y
# CONFIG_EXT4_USE_FOR_EXT2 is not set
CONFIG_EXT4_FS_POSIX_ACL=y
@ -146,8 +155,10 @@ CONFIG_STATIC_USERMODEHELPER=y
CONFIG_STATIC_USERMODEHELPER_PATH=""
CONFIG_SECURITY_SELINUX=y
CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CRYPTO_HCTR2=y
CONFIG_CRYPTO_LZO=y
CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
CONFIG_CRYPTO_POLYVAL_ARM64_CE=y
@ -157,7 +168,6 @@ CONFIG_DMA_RESTRICTED_POOL=y
CONFIG_PRINTK_TIME=y
CONFIG_PRINTK_CALLER=y
CONFIG_DYNAMIC_DEBUG_CORE=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF5=y
CONFIG_DEBUG_INFO_REDUCED=y
CONFIG_HEADERS_INSTALL=y
@ -165,12 +175,11 @@ CONFIG_HEADERS_INSTALL=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_UBSAN=y
CONFIG_UBSAN_TRAP=y
CONFIG_UBSAN_LOCAL_BOUNDS=y
# CONFIG_UBSAN_SHIFT is not set
# CONFIG_UBSAN_BOOL is not set
# CONFIG_UBSAN_ENUM is not set
CONFIG_PAGE_OWNER=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_KASAN=y
CONFIG_KASAN_HW_TAGS=y
CONFIG_PANIC_ON_OOPS=y
@ -178,8 +187,6 @@ CONFIG_PANIC_TIMEOUT=-1
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_WQ_WATCHDOG=y
CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_HIST_TRIGGERS=y
CONFIG_PID_IN_CONTEXTIDR=y
# CONFIG_RUNTIME_TESTING_MENU is not set

View File

@ -7,3 +7,4 @@
# CONFIG_PAGE_PINNER is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_DEBUG_INFO_BTF is not set
# CONFIG_SHADOW_CALL_STACK is not set

View File

@ -51,6 +51,7 @@ void mte_sync_tags(pte_t pte, unsigned int nr_pages)
/* ensure the tags are visible before the PTE is set */
smp_wmb();
}
EXPORT_SYMBOL_GPL(mte_sync_tags);
int memcmp_pages(struct page *page1, struct page *page2)
{

View File

@ -549,7 +549,8 @@ void *hyp_alloc(size_t size)
struct hyp_allocator *allocator = &hyp_allocator;
struct chunk_hdr *chunk, *last_chunk;
unsigned long chunk_addr;
int missing_map, ret = 0;
size_t missing_map;
int ret = 0;
size = ALIGN(size ?: MIN_ALLOC, MIN_ALLOC);

View File

@ -431,15 +431,19 @@ int __pkvm_guest_relinquish_to_host(struct pkvm_hyp_vcpu *vcpu,
goto end;
}
hyp_poison_page(phys, PAGE_SIZE);
psci_mem_protect_dec(1);
/* Zap the guest stage2 pte and return ownership to the host */
ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, PAGE_SIZE);
if (ret)
goto end;
hyp_poison_page(phys, PAGE_SIZE);
psci_mem_protect_dec(1);
WARN_ON(host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HOST));
if (pkvm_ipa_range_has_pvmfw(vm, ipa, ipa + PAGE_SIZE))
vm->kvm.arch.pkvm.pvmfw_load_addr = PVMFW_INVALID_LOAD_ADDR;
end:
guest_unlock_component(vm);
host_unlock_component();

View File

@ -758,6 +758,7 @@ hyp_trace_raw_read(struct file *file, char __user *ubuf,
struct ht_iterator *iter = (struct ht_iterator *)file->private_data;
size_t size;
int ret;
void *page_data;
if (iter->copy_leftover)
goto read;
@ -786,7 +787,9 @@ read:
if (size > cnt)
size = cnt;
ret = copy_to_user(ubuf, iter->spare + PAGE_SIZE - size, size);
page_data = ring_buffer_read_page_data(
(struct buffer_data_read_page *)iter->spare);
ret = copy_to_user(ubuf, page_data + PAGE_SIZE - size, size);
if (ret == size)
return -EFAULT;
@ -855,13 +858,10 @@ static int hyp_trace_open(struct inode *inode, struct file *file)
{
int cpu = (s64)inode->i_private;
if (file->f_mode & FMODE_WRITE) {
if (file->f_mode & FMODE_WRITE)
hyp_trace_reset(cpu);
return 0;
}
return -EPERM;
return 0;
}
static ssize_t hyp_trace_write(struct file *filp, const char __user *ubuf,
@ -1039,9 +1039,9 @@ int hyp_trace_init_tracefs(void)
(void *)cpu, &hyp_trace_pipe_fops);
tracefs_create_file("trace_pipe_raw", TRACEFS_MODE_READ, per_cpu_dir,
(void *)cpu, &hyp_trace_pipe_fops);
(void *)cpu, &hyp_trace_raw_fops);
tracefs_create_file("trace", TRACEFS_MODE_READ, per_cpu_dir,
tracefs_create_file("trace", TRACEFS_MODE_WRITE, per_cpu_dir,
(void *)cpu, &hyp_trace_fops);
}

View File

@ -1,3 +0,0 @@
per-file crypto/**=file:/crypto/OWNERS
per-file mm/**=file:/mm/OWNERS
per-file net/**=file:/net/OWNERS

View File

@ -15,6 +15,7 @@ CONFIG_UCLAMP_TASK=y
CONFIG_UCLAMP_BUCKETS_COUNT=20
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_TIME_NS is not set
# CONFIG_PID_NS is not set
@ -24,7 +25,9 @@ CONFIG_MEMCG=y
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
CONFIG_BOOT_CONFIG=y
CONFIG_EXPERT=y
CONFIG_PROFILING=y
CONFIG_KEXEC_FILE=y
CONFIG_SMP=y
CONFIG_X86_X2APIC=y
CONFIG_HYPERVISOR_GUEST=y
@ -33,12 +36,9 @@ CONFIG_PARAVIRT_TIME_ACCOUNTING=y
CONFIG_NR_CPUS=32
# CONFIG_X86_MCE is not set
CONFIG_EFI=y
CONFIG_KEXEC_FILE=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="stack_depot_disable=on cgroup_disable=pressure ioremap_guard panic=-1 bootconfig acpi=noirq"
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
# CONFIG_SUSPEND is not set
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_TIMES=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
@ -58,8 +58,7 @@ CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
# CONFIG_ZONE_DMA is not set
CONFIG_ANON_VMA_NAME=y
CONFIG_USERFAULTFD=y
CONFIG_LRU_GEN=y
@ -69,9 +68,17 @@ CONFIG_DAMON_RECLAIM=y
CONFIG_NET=y
CONFIG_UNIX=y
CONFIG_INET=y
# CONFIG_INET_DIAG is not set
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
# CONFIG_TCP_CONG_CUBIC is not set
# CONFIG_TCP_CONG_WESTWOOD is not set
# CONFIG_TCP_CONG_HTCP is not set
# CONFIG_IPV6 is not set
CONFIG_VSOCKETS=y
CONFIG_VIRTIO_VSOCKETS=y
# CONFIG_WIRELESS is not set
# CONFIG_ETHTOOL_NETLINK is not set
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCIEAER=y
@ -79,21 +86,20 @@ CONFIG_PCI_MSI=y
CONFIG_PCI_IOV=y
CONFIG_PCIE_DW_PLAT_EP=y
CONFIG_PCI_ENDPOINT=y
CONFIG_FW_LOADER_USER_HELPER=y
# CONFIG_FW_CACHE is not set
# CONFIG_FW_LOADER is not set
CONFIG_OF=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_VIRTIO_BLK=y
CONFIG_MD=y
# CONFIG_MD_BITMAP_FILE is not set
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
# CONFIG_DM_USER is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_LEGACY_PTYS is not set
@ -105,7 +111,6 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=0
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_NULL_TTY=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
# CONFIG_DEVMEM is not set
# CONFIG_DEVPORT is not set
@ -127,12 +132,13 @@ CONFIG_MFD_SYSCON=y
# CONFIG_USB_SUPPORT is not set
CONFIG_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_DMABUF_HEAPS=y
CONFIG_DMABUF_SYSFS_STATS=y
CONFIG_UIO=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_STAGING=y
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_ANDROID_KABI_RESERVE is not set
# CONFIG_ANDROID_VENDOR_OEM_DATA is not set
CONFIG_LIBNVDIMM=y
CONFIG_EXT4_FS=y
# CONFIG_EXT4_USE_FOR_EXT2 is not set
@ -203,8 +209,10 @@ CONFIG_STATIC_USERMODEHELPER=y
CONFIG_STATIC_USERMODEHELPER_PATH=""
CONFIG_SECURITY_SELINUX=y
CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CRYPTO_HCTR2=y
CONFIG_CRYPTO_LZO=y
CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_AES_NI_INTEL=y
CONFIG_CRYPTO_POLYVAL_CLMUL_NI=y
CONFIG_CRYPTO_SHA1_SSSE3=y
@ -212,7 +220,6 @@ CONFIG_CRYPTO_SHA256_SSSE3=y
CONFIG_CRYPTO_SHA512_SSSE3=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG_CORE=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF5=y
CONFIG_DEBUG_INFO_REDUCED=y
CONFIG_HEADERS_INSTALL=y
@ -220,12 +227,11 @@ CONFIG_HEADERS_INSTALL=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_UBSAN=y
CONFIG_UBSAN_TRAP=y
CONFIG_UBSAN_LOCAL_BOUNDS=y
# CONFIG_UBSAN_SHIFT is not set
# CONFIG_UBSAN_BOOL is not set
# CONFIG_UBSAN_ENUM is not set
CONFIG_PAGE_OWNER=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_KFENCE=y
CONFIG_KFENCE_SAMPLE_INTERVAL=500
CONFIG_KFENCE_NUM_OBJECTS=63
@ -234,6 +240,5 @@ CONFIG_PANIC_TIMEOUT=-1
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_WQ_WATCHDOG=y
CONFIG_SCHEDSTATS=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_HIST_TRIGGERS=y
CONFIG_UNWINDER_FRAME_POINTER=y

View File

@ -1,2 +0,0 @@
bvanassche@google.com
jaegeuk@google.com

View File

@ -1 +0,0 @@
ardb@google.com

View File

@ -1,6 +0,0 @@
per-file base/**=gregkh@google.com,saravanak@google.com
per-file block/**=akailash@google.com
per-file md/**=akailash@google.com,paullawrence@google.com
per-file net/**=file:/net/OWNERS
per-file scsi/**=bvanassche@google.com,jaegeuk@google.com
per-file {tty,usb}/**=gregkh@google.com

View File

@ -931,7 +931,7 @@ impl Process {
refs.by_node.remove(&id);
}
} else {
pr_warn!("{}: no such ref {handle}\n", kernel::current!().pid());
pr_warn!("{}: no such ref {handle}\n", self.pid_in_current_ns());
}
Ok(())
}
@ -1310,37 +1310,9 @@ impl Process {
work.into_arc().cancel();
}
// Take all threads and release them.
let threads = take(&mut self.inner.lock().threads);
for thread in threads.values() {
thread.release();
}
drop(threads);
// Free any resources kept alive by allocated buffers.
let omapping = self.inner.lock().mapping.take();
if let Some(mut mapping) = omapping {
let address = mapping.address;
mapping
.alloc
.take_for_each(|offset, size, debug_id, odata| {
let ptr = offset + address;
let mut alloc =
Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
if let Some(data) = odata {
alloc.set_info(data);
}
drop(alloc)
});
}
// Drop all references. We do this dance with `swap` to avoid destroying the references
// while holding the lock.
let mut refs = self.node_refs.lock();
let mut node_refs = take(&mut refs.by_handle);
let freeze_listeners = take(&mut refs.freeze_listeners);
drop(refs);
for info in node_refs.values_mut() {
for info in self.node_refs.lock().by_handle.values_mut() {
// SAFETY: We are removing the `NodeRefInfo` from the right node.
unsafe { info.node_ref2().node.remove_node_info(&info) };
@ -1352,14 +1324,24 @@ impl Process {
};
death.set_cleared(false);
}
drop(node_refs);
let freeze_listeners = take(&mut self.node_refs.lock().freeze_listeners);
for listener in freeze_listeners.values() {
listener.on_process_exit(&self);
}
drop(freeze_listeners);
// Do similar dance for the state lock.
let mut inner = self.inner.lock();
let threads = take(&mut inner.threads);
let nodes = take(&mut inner.nodes);
drop(inner);
// Release all threads.
for thread in threads.values() {
thread.release();
}
// Deliver death notifications.
let nodes = take(&mut self.inner.lock().nodes);
for node in nodes.values() {
loop {
let death = {
@ -1373,6 +1355,27 @@ impl Process {
death.set_dead();
}
}
// Free any resources kept alive by allocated buffers.
let omapping = self.inner.lock().mapping.take();
if let Some(mut mapping) = omapping {
let address = mapping.address;
mapping
.alloc
.take_for_each(|offset, size, debug_id, odata| {
let ptr = offset + address;
pr_warn!(
"{}: removing orphan mapping {offset}:{size}\n",
self.pid_in_current_ns()
);
let mut alloc =
Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
if let Some(data) = odata {
alloc.set_info(data);
}
drop(alloc)
});
}
}
pub(crate) fn drop_outstanding_txn(&self) {

View File

@ -106,6 +106,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_write_wait_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_opt_spin_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_opt_spin_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_can_spin_on_owner);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_direct_rsteal);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_optimistic_rspin);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_idle_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_idle_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mpam_set);
@ -452,6 +454,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_pageout_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_cold_or_pageout_abort);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_pages_prepare_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_pages_ok_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_page_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_folio_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_split_large_folio_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_should_be_protected);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_referenced_check_bypass);
@ -504,6 +508,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_stack_hash);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_check);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rmqueue);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_filemap_get_folio);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_filemap_get_folio_gfp);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_f2fs_file_open);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_compaction_begin);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_compaction_end);
@ -578,9 +583,30 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_node);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuset_fork);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_uid);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_user);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gcma_cc_allow_nonworkingset);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gcma_cc_store_page_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gzvm_vcpu_exit_reason);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gzvm_handle_demand_page_pre);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gzvm_handle_demand_page_post);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gzvm_destroy_vm_post_process);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_usb_dev_suspend);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_usb_dev_resume);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swap_bio_charge);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_alloc_anon_thp);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_ac);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_rmqueue);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_suitable_zone);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_wmark_ok);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_zone_max_order);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_zone_pageset);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_isolate_priv_lru);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_file_is_tiny);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_pgdat_balanced);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_reclaim_idx);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_zone_can_compact);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_longterm_pinnable);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_do_madvise_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_migrate_one_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_remove_migration_pte_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_split_huge_page_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_try_split_folio_bypass);

View File

@ -1625,7 +1625,6 @@ static void gic_syscore_init(void)
#else
static inline void gic_syscore_init(void) { }
static int gic_v3_suspend(void) { return 0; }
#endif /* CONFIG_PM */
static struct irq_chip gic_chip = {

View File

@ -40,16 +40,6 @@
#define DM_BUFIO_WRITEBACK_RATIO 3
#define DM_BUFIO_LOW_WATERMARK_RATIO 16
/*
* Check buffer ages in this interval (seconds)
*/
#define DM_BUFIO_WORK_TIMER_SECS 30
/*
* Free buffers when they are older than this (seconds)
*/
#define DM_BUFIO_DEFAULT_AGE_SECS 300
/*
* The nr of bytes of cached data to keep around.
*/
@ -1056,10 +1046,8 @@ static unsigned long dm_bufio_cache_size_latch;
static DEFINE_SPINLOCK(global_spinlock);
/*
* Buffers are freed after this timeout
*/
static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
static unsigned int dm_bufio_max_age; /* No longer does anything */
static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
static unsigned long dm_bufio_peak_allocated;
@ -1086,7 +1074,6 @@ static LIST_HEAD(dm_bufio_all_clients);
static DEFINE_MUTEX(dm_bufio_clients_lock);
static struct workqueue_struct *dm_bufio_wq;
static struct delayed_work dm_bufio_cleanup_old_work;
static struct work_struct dm_bufio_replacement_work;
@ -2671,130 +2658,6 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
/*--------------------------------------------------------------*/
static unsigned int get_max_age_hz(void)
{
unsigned int max_age = READ_ONCE(dm_bufio_max_age);
if (max_age > UINT_MAX / HZ)
max_age = UINT_MAX / HZ;
return max_age * HZ;
}
static bool older_than(struct dm_buffer *b, unsigned long age_hz)
{
return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz);
}
struct evict_params {
gfp_t gfp;
unsigned long age_hz;
/*
* This gets updated with the largest last_accessed (ie. most
* recently used) of the evicted buffers. It will not be reinitialised
* by __evict_many(), so you can use it across multiple invocations.
*/
unsigned long last_accessed;
};
/*
* We may not be able to evict this buffer if IO pending or the client
* is still using it.
*
* And if GFP_NOFS is used, we must not do any I/O because we hold
* dm_bufio_clients_lock and we would risk deadlock if the I/O gets
* rerouted to different bufio client.
*/
static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
{
struct evict_params *params = context;
if (!(params->gfp & __GFP_FS) ||
(static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
if (test_bit_acquire(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state))
return ER_DONT_EVICT;
}
return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP;
}
static unsigned long __evict_many(struct dm_bufio_client *c,
struct evict_params *params,
int list_mode, unsigned long max_count)
{
unsigned long count;
unsigned long last_accessed;
struct dm_buffer *b;
for (count = 0; count < max_count; count++) {
b = cache_evict(&c->cache, list_mode, select_for_evict, params);
if (!b)
break;
last_accessed = READ_ONCE(b->last_accessed);
if (time_after_eq(params->last_accessed, last_accessed))
params->last_accessed = last_accessed;
__make_buffer_clean(b);
__free_buffer_wake(b);
cond_resched();
}
return count;
}
static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
{
struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0};
unsigned long retain = get_retain_buffers(c);
unsigned long count;
LIST_HEAD(write_list);
dm_bufio_lock(c);
__check_watermark(c, &write_list);
if (unlikely(!list_empty(&write_list))) {
dm_bufio_unlock(c);
__flush_write_list(&write_list);
dm_bufio_lock(c);
}
count = cache_total(&c->cache);
if (count > retain)
__evict_many(c, &params, LIST_CLEAN, count - retain);
dm_bufio_unlock(c);
}
static void cleanup_old_buffers(void)
{
unsigned long max_age_hz = get_max_age_hz();
struct dm_bufio_client *c;
mutex_lock(&dm_bufio_clients_lock);
__cache_size_refresh();
list_for_each_entry(c, &dm_bufio_all_clients, client_list)
evict_old_buffers(c, max_age_hz);
mutex_unlock(&dm_bufio_clients_lock);
}
static void work_fn(struct work_struct *w)
{
cleanup_old_buffers();
queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
DM_BUFIO_WORK_TIMER_SECS * HZ);
}
/*--------------------------------------------------------------*/
/*
* Global cleanup tries to evict the oldest buffers from across _all_
* the clients. It does this by repeatedly evicting a few buffers from
@ -2832,27 +2695,55 @@ static void __insert_client(struct dm_bufio_client *new_client)
list_add_tail(&new_client->client_list, h);
}
static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
{
/* In no-sleep mode, we cannot wait on IO. */
if (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep) {
if (test_bit_acquire(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state))
return ER_DONT_EVICT;
}
return ER_EVICT;
}
static unsigned long __evict_a_few(unsigned long nr_buffers)
{
unsigned long count;
struct dm_bufio_client *c;
struct evict_params params = {
.gfp = GFP_KERNEL,
.age_hz = 0,
/* set to jiffies in case there are no buffers in this client */
.last_accessed = jiffies
};
unsigned long oldest_buffer = jiffies;
unsigned long last_accessed;
unsigned long count;
struct dm_buffer *b;
c = __pop_client();
if (!c)
return 0;
dm_bufio_lock(c);
count = __evict_many(c, &params, LIST_CLEAN, nr_buffers);
for (count = 0; count < nr_buffers; count++) {
b = cache_evict(&c->cache, LIST_CLEAN, select_for_evict, NULL);
if (!b)
break;
last_accessed = READ_ONCE(b->last_accessed);
if (time_after_eq(oldest_buffer, last_accessed))
oldest_buffer = last_accessed;
__make_buffer_clean(b);
__free_buffer_wake(b);
if (need_resched()) {
dm_bufio_unlock(c);
cond_resched();
dm_bufio_lock(c);
}
}
dm_bufio_unlock(c);
if (count)
c->oldest_buffer = params.last_accessed;
c->oldest_buffer = oldest_buffer;
__insert_client(c);
return count;
@ -2934,10 +2825,7 @@ static int __init dm_bufio_init(void)
if (!dm_bufio_wq)
return -ENOMEM;
INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
DM_BUFIO_WORK_TIMER_SECS * HZ);
return 0;
}
@ -2949,7 +2837,6 @@ static void __exit dm_bufio_exit(void)
{
int bug = 0;
cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
destroy_workqueue(dm_bufio_wq);
if (dm_bufio_client_count) {
@ -2986,7 +2873,7 @@ module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
MODULE_PARM_DESC(max_age_seconds, "No longer does anything");
module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");

View File

@ -33,8 +33,9 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
struct hfi_buffer_requirements *bufreq;
struct hfi_extradata_input_crop *crop;
struct hfi_dpb_counts *dpb_count;
u32 ptype, rem_bytes;
u32 size_read = 0;
u8 *data_ptr;
u32 ptype;
inst->error = HFI_ERR_NONE;
@ -44,86 +45,118 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
break;
default:
inst->error = HFI_ERR_SESSION_INVALID_PARAMETER;
goto done;
inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event);
return;
}
event.event_type = pkt->event_data1;
num_properties_changed = pkt->event_data2;
if (!num_properties_changed) {
inst->error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
goto done;
}
if (!num_properties_changed)
goto error;
data_ptr = (u8 *)&pkt->ext_event_data[0];
rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt);
do {
if (rem_bytes < sizeof(u32))
goto error;
ptype = *((u32 *)data_ptr);
data_ptr += sizeof(u32);
rem_bytes -= sizeof(u32);
switch (ptype) {
case HFI_PROPERTY_PARAM_FRAME_SIZE:
data_ptr += sizeof(u32);
if (rem_bytes < sizeof(struct hfi_framesize))
goto error;
frame_sz = (struct hfi_framesize *)data_ptr;
event.width = frame_sz->width;
event.height = frame_sz->height;
data_ptr += sizeof(*frame_sz);
size_read = sizeof(struct hfi_framesize);
break;
case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
data_ptr += sizeof(u32);
if (rem_bytes < sizeof(struct hfi_profile_level))
goto error;
profile_level = (struct hfi_profile_level *)data_ptr;
event.profile = profile_level->profile;
event.level = profile_level->level;
data_ptr += sizeof(*profile_level);
size_read = sizeof(struct hfi_profile_level);
break;
case HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH:
data_ptr += sizeof(u32);
if (rem_bytes < sizeof(struct hfi_bit_depth))
goto error;
pixel_depth = (struct hfi_bit_depth *)data_ptr;
event.bit_depth = pixel_depth->bit_depth;
data_ptr += sizeof(*pixel_depth);
size_read = sizeof(struct hfi_bit_depth);
break;
case HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT:
data_ptr += sizeof(u32);
if (rem_bytes < sizeof(struct hfi_pic_struct))
goto error;
pic_struct = (struct hfi_pic_struct *)data_ptr;
event.pic_struct = pic_struct->progressive_only;
data_ptr += sizeof(*pic_struct);
size_read = sizeof(struct hfi_pic_struct);
break;
case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE:
data_ptr += sizeof(u32);
if (rem_bytes < sizeof(struct hfi_colour_space))
goto error;
colour_info = (struct hfi_colour_space *)data_ptr;
event.colour_space = colour_info->colour_space;
data_ptr += sizeof(*colour_info);
size_read = sizeof(struct hfi_colour_space);
break;
case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
data_ptr += sizeof(u32);
if (rem_bytes < sizeof(u32))
goto error;
event.entropy_mode = *(u32 *)data_ptr;
data_ptr += sizeof(u32);
size_read = sizeof(u32);
break;
case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
data_ptr += sizeof(u32);
if (rem_bytes < sizeof(struct hfi_buffer_requirements))
goto error;
bufreq = (struct hfi_buffer_requirements *)data_ptr;
event.buf_count = hfi_bufreq_get_count_min(bufreq, ver);
data_ptr += sizeof(*bufreq);
size_read = sizeof(struct hfi_buffer_requirements);
break;
case HFI_INDEX_EXTRADATA_INPUT_CROP:
data_ptr += sizeof(u32);
if (rem_bytes < sizeof(struct hfi_extradata_input_crop))
goto error;
crop = (struct hfi_extradata_input_crop *)data_ptr;
event.input_crop.left = crop->left;
event.input_crop.top = crop->top;
event.input_crop.width = crop->width;
event.input_crop.height = crop->height;
data_ptr += sizeof(*crop);
size_read = sizeof(struct hfi_extradata_input_crop);
break;
case HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS:
data_ptr += sizeof(u32);
if (rem_bytes < sizeof(struct hfi_dpb_counts))
goto error;
dpb_count = (struct hfi_dpb_counts *)data_ptr;
event.buf_count = dpb_count->fw_min_cnt;
data_ptr += sizeof(*dpb_count);
size_read = sizeof(struct hfi_dpb_counts);
break;
default:
size_read = 0;
break;
}
data_ptr += size_read;
rem_bytes -= size_read;
num_properties_changed--;
} while (num_properties_changed > 0);
done:
inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event);
return;
error:
inst->error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event);
}

View File

@ -239,6 +239,7 @@ static int venus_write_queue(struct venus_hfi_device *hdev,
static int venus_read_queue(struct venus_hfi_device *hdev,
struct iface_queue *queue, void *pkt, u32 *tx_req)
{
struct hfi_pkt_hdr *pkt_hdr = NULL;
struct hfi_queue_header *qhdr;
u32 dwords, new_rd_idx;
u32 rd_idx, wr_idx, type, qsize;
@ -304,6 +305,9 @@ static int venus_read_queue(struct venus_hfi_device *hdev,
memcpy(pkt, rd_ptr, len);
memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2);
}
pkt_hdr = (struct hfi_pkt_hdr *)(pkt);
if ((pkt_hdr->size >> 2) != dwords)
return -EINVAL;
} else {
/* bad packet received, dropping */
new_rd_idx = qhdr->write_idx;

View File

@ -81,24 +81,44 @@ void pci_configure_aspm_l1ss(struct pci_dev *pdev)
void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
{
struct pci_dev *parent = pdev->bus->self;
struct pci_cap_saved_state *save_state;
u16 l1ss = pdev->l1ss;
u32 *cap;
/*
* If this is a Downstream Port, we never restore the L1SS state
* directly; we only restore it when we restore the state of the
* Upstream Port below it.
*/
if (pcie_downstream_port(pdev) || !parent)
return;
if (!pdev->l1ss || !parent->l1ss)
return;
/*
* Save L1 substate configuration. The ASPM L0s/L1 configuration
* in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state().
*/
if (!l1ss)
return;
save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
if (!save_state)
return;
cap = &save_state->cap.data[0];
pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL2, cap++);
pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, cap++);
pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cap++);
pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cap++);
/*
* Save parent's L1 substate configuration so we have it for
* pci_restore_aspm_l1ss_state(pdev) to restore.
*/
save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
if (!save_state)
return;
cap = &save_state->cap.data[0];
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, cap++);
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, cap++);
}
void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)

View File

@ -1225,7 +1225,7 @@ static int cros_typec_probe(struct platform_device *pdev)
typec->dev = dev;
typec->ec = dev_get_drvdata(pdev->dev.parent);
if (!typec->ec) {
if (!typec->ec || !typec->ec->ec) {
dev_warn(dev, "couldn't find parent EC device\n");
return -EPROBE_DEFER;
}

View File

@ -298,18 +298,11 @@ impl MiscDevice for Ashmem {
impl Ashmem {
fn set_name(&self, reader: UserSliceReader) -> Result<isize> {
let mut local_name = [0u8; ASHMEM_NAME_LEN];
let mut len = reader.strncpy_from_user(&mut local_name)?;
let mut buf = [0u8; ASHMEM_NAME_LEN];
let name = reader.strcpy_into_buf(&mut buf)?.as_bytes();
// If the zero terminator is missing, the string is truncated to `ASHMEM_NAME_LEN-1` so
// that `get_name` can return it and has enough space to add a zero terminator.
if len == ASHMEM_NAME_LEN {
len -= 1;
local_name[len] = 0;
}
let mut v = KVec::with_capacity(len, GFP_KERNEL)?;
v.extend_from_slice(&local_name[..len], GFP_KERNEL)?;
let mut v = KVec::with_capacity(name.len(), GFP_KERNEL)?;
v.extend_from_slice(name, GFP_KERNEL)?;
let mut asma = self.inner.lock();
if asma.file.is_some() {

View File

@ -45,8 +45,6 @@
#undef CREATE_TRACE_POINTS
#include <trace/hooks/ufshcd.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(ufshcd_command);
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)

View File

@ -5719,6 +5719,7 @@ static void port_event(struct usb_hub *hub, int port1)
struct usb_device *hdev = hub->hdev;
u16 portstatus, portchange;
int i = 0;
int err;
connect_change = test_bit(port1, hub->change_bits);
clear_bit(port1, hub->event_bits);
@ -5815,8 +5816,11 @@ static void port_event(struct usb_hub *hub, int port1)
} else if (!udev || !(portstatus & USB_PORT_STAT_CONNECTION)
|| udev->state == USB_STATE_NOTATTACHED) {
dev_dbg(&port_dev->dev, "do warm reset, port only\n");
if (hub_port_reset(hub, port1, NULL,
HUB_BH_RESET_TIME, true) < 0)
err = hub_port_reset(hub, port1, NULL,
HUB_BH_RESET_TIME, true);
if (!udev && err == -ENOTCONN)
connect_change = 0;
else if (err < 0)
hub_port_disable(hub, port1, 1);
} else {
dev_dbg(&port_dev->dev, "do warm reset, full device\n");

View File

@ -373,6 +373,10 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
len = iov_length(vq->iov, out);
if (len < VIRTIO_VSOCK_SKB_HEADROOM ||
len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM)
return NULL;
/* len contains both payload and hdr */
skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
if (!skb)
@ -400,18 +404,15 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
return skb;
/* The pkt is too big or the length in the header is invalid */
if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE ||
payload_len + sizeof(*hdr) > len) {
if (payload_len + sizeof(*hdr) > len) {
kfree_skb(skb);
return NULL;
}
virtio_vsock_skb_rx_put(skb);
virtio_vsock_skb_put(skb, payload_len);
nbytes = copy_from_iter(skb->data, payload_len, &iov_iter);
if (nbytes != payload_len) {
vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
payload_len, nbytes);
if (skb_copy_datagram_from_iter(skb, 0, &iov_iter, payload_len)) {
vq_err(vq, "Failed to copy %zu byte payload\n", payload_len);
kfree_skb(skb);
return NULL;
}

View File

@ -257,6 +257,9 @@ static long gunyah_cma_create_mem_fd(struct gunyah_cma *cma)
struct file *file;
int fd, err;
if (cma->page)
return -EBUSY;
flags |= O_CLOEXEC;
fd = get_unused_fd_flags(flags);
if (fd < 0)

View File

@ -1 +0,0 @@
per-file {crypto,verity}/**=ebiggers@google.com

View File

@ -854,7 +854,7 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
kfree_rcu(epi, rcu);
percpu_counter_dec(&ep->user->epoll_watches);
return ep_refcount_dec_and_test(ep);
return true;
}
/*
@ -862,14 +862,14 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
*/
static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi)
{
WARN_ON_ONCE(__ep_remove(ep, epi, false));
if (__ep_remove(ep, epi, false))
WARN_ON_ONCE(ep_refcount_dec_and_test(ep));
}
static void ep_clear_and_put(struct eventpoll *ep)
{
struct rb_node *rbp, *next;
struct epitem *epi;
bool dispose;
/* We need to release all tasks waiting for these file */
if (waitqueue_active(&ep->poll_wait))
@ -902,10 +902,8 @@ static void ep_clear_and_put(struct eventpoll *ep)
cond_resched();
}
dispose = ep_refcount_dec_and_test(ep);
mutex_unlock(&ep->mtx);
if (dispose)
if (ep_refcount_dec_and_test(ep))
ep_free(ep);
}
@ -1108,7 +1106,7 @@ again:
dispose = __ep_remove(ep, epi, true);
mutex_unlock(&ep->mtx);
if (dispose)
if (dispose && ep_refcount_dec_and_test(ep))
ep_free(ep);
goto again;
}

View File

@ -1 +0,0 @@
jaegeuk@google.com

View File

@ -22,8 +22,6 @@
#include <trace/events/f2fs.h>
#include <trace/hooks/fs.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(f2fs_write_checkpoint);
#define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_RT, 3))
static struct kmem_cache *ino_entry_slab;

View File

@ -864,6 +864,7 @@ struct f2fs_inode_info {
/* linked in global inode list for cache donation */
struct list_head gdonate_list;
pgoff_t donate_start, donate_end; /* inclusive */
atomic_t open_count; /* # of open files */
struct task_struct *atomic_write_task; /* store atomic write task */
struct extent_tree *extent_tree[NR_EXTENT_CACHES];
@ -3646,6 +3647,7 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
void f2fs_update_inode(struct inode *inode, struct page *node_page);
void f2fs_update_inode_page(struct inode *inode);
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
void f2fs_remove_donate_inode(struct inode *inode);
void f2fs_evict_inode(struct inode *inode);
void f2fs_handle_failed_inode(struct inode *inode);

View File

@ -38,9 +38,6 @@
#undef CREATE_TRACE_POINTS
#include <trace/hooks/fs.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(f2fs_sync_file_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(f2fs_sync_file_exit);
static void f2fs_zero_post_eof_page(struct inode *inode, loff_t new_size)
{
loff_t old_size = i_size_read(inode);
@ -636,7 +633,10 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
if (err)
return err;
return finish_preallocate_blocks(inode);
err = finish_preallocate_blocks(inode);
if (!err)
atomic_inc(&F2FS_I(inode)->open_count);
return err;
}
void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
@ -2034,6 +2034,9 @@ out:
static int f2fs_release_file(struct inode *inode, struct file *filp)
{
if (atomic_dec_and_test(&F2FS_I(inode)->open_count))
f2fs_remove_donate_inode(inode);
/*
* f2fs_release_file is called at every close calls. So we should
* not drop any inmemory pages by close called by other process.

View File

@ -23,9 +23,6 @@
#include "iostat.h"
#include <trace/events/f2fs.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(f2fs_gc_begin);
EXPORT_TRACEPOINT_SYMBOL_GPL(f2fs_gc_end);
static struct kmem_cache *victim_entry_slab;
static unsigned int count_bits(const unsigned long *addr,

View File

@ -821,7 +821,7 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
return 0;
}
static void f2fs_remove_donate_inode(struct inode *inode)
void f2fs_remove_donate_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);

View File

@ -1443,6 +1443,7 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
/* Initialize f2fs-specific inode info */
atomic_set(&fi->dirty_pages, 0);
atomic_set(&fi->i_compr_blocks, 0);
atomic_set(&fi->open_count, 0);
init_f2fs_rwsem(&fi->i_sem);
spin_lock_init(&fi->i_size_lock);
INIT_LIST_HEAD(&fi->dirty_list);

View File

@ -420,23 +420,26 @@ int fuse_lseek_backing(struct fuse_bpf_args *fa, struct file *file, loff_t offse
struct file *backing_file = fuse_file->backing_file;
loff_t ret;
/* TODO: Handle changing of the file handle */
if (offset == 0) {
if (whence == SEEK_CUR) {
flo->offset = file->f_pos;
return flo->offset;
return 0;
}
if (whence == SEEK_SET) {
flo->offset = vfs_setpos(file, 0, 0);
return flo->offset;
return 0;
}
}
inode_lock(file->f_inode);
backing_file->f_pos = file->f_pos;
ret = vfs_llseek(backing_file, fli->offset, fli->whence);
flo->offset = ret;
if (!IS_ERR(ERR_PTR(ret))) {
flo->offset = ret;
ret = 0;
}
inode_unlock(file->f_inode);
return ret;
}
@ -2374,8 +2377,11 @@ static bool filldir(struct dir_context *ctx, const char *name, int namelen,
return true;
}
static int parse_dirfile(char *buf, size_t nbytes, struct dir_context *ctx)
static int parse_dirfile(char *buf, size_t nbytes, struct dir_context *ctx,
loff_t next_offset)
{
char *buffstart = buf;
while (nbytes >= FUSE_NAME_OFFSET) {
struct fuse_dirent *dirent = (struct fuse_dirent *) buf;
size_t reclen = FUSE_DIRENT_SIZE(dirent);
@ -2389,12 +2395,18 @@ static int parse_dirfile(char *buf, size_t nbytes, struct dir_context *ctx)
ctx->pos = dirent->off;
if (!dir_emit(ctx, dirent->name, dirent->namelen, dirent->ino,
dirent->type))
break;
dirent->type)) {
// If we can't make any progress, user buffer is too small
if (buf == buffstart)
return -EINVAL;
else
return 0;
}
buf += reclen;
nbytes -= reclen;
}
ctx->pos = next_offset;
return 0;
}
@ -2441,13 +2453,12 @@ void *fuse_readdir_finalize(struct fuse_bpf_args *fa,
struct file *backing_dir = ff->backing_file;
int err = 0;
err = parse_dirfile(fa->out_args[1].value, fa->out_args[1].size, ctx);
err = parse_dirfile(fa->out_args[1].value, fa->out_args[1].size, ctx, fro->offset);
*force_again = !!fro->again;
if (*force_again && !*allow_force)
err = -EINVAL;
ctx->pos = fro->offset;
backing_dir->f_pos = fro->offset;
backing_dir->f_pos = ctx->pos;
free_page((unsigned long) fa->out_args[1].value);
return ERR_PTR(err);

View File

@ -1,2 +0,0 @@
akailash@google.com
paullawrence@google.com

File diff suppressed because it is too large Load Diff

View File

@ -13,3 +13,19 @@ type 'struct f2fs_sb_info' changed
type 'enum gunyah_rm_vm_status' changed
enumerator 'GUNYAH_RM_VM_STATUS_RESET_FAILED' (12) was added
6 function symbol(s) removed
'int __traceiter_f2fs_gc_begin(void*, struct super_block*, int, bool, unsigned int, long long, long long, long long, unsigned int, unsigned int, int, unsigned int)'
'int __traceiter_f2fs_gc_end(void*, struct super_block*, int, int, int, long long, long long, long long, unsigned int, unsigned int, int, unsigned int)'
'int __traceiter_f2fs_sync_file_enter(void*, struct inode*)'
'int __traceiter_f2fs_sync_file_exit(void*, struct inode*, int, int, int)'
'int __traceiter_f2fs_write_checkpoint(void*, struct super_block*, int, const char*)'
'int __traceiter_ufshcd_command(void*, struct scsi_device*, struct ufs_hba*, enum ufs_trace_str_t, unsigned int, u32, u32, int, u32, u64, u8, u8)'
6 variable symbol(s) removed
'struct tracepoint __tracepoint_f2fs_gc_begin'
'struct tracepoint __tracepoint_f2fs_gc_end'
'struct tracepoint __tracepoint_f2fs_sync_file_enter'
'struct tracepoint __tracepoint_f2fs_sync_file_exit'
'struct tracepoint __tracepoint_f2fs_write_checkpoint'
'struct tracepoint __tracepoint_ufshcd_command'

View File

@ -6,22 +6,22 @@ optimize kernel builds for specific architectures and kernel versions.
## kernel.afdo
kernel.afdo is an AArch64 kernel profile collected on kernel version 6.12.18 (
SHA fbd1fef4d7283d210845f8609599efd53c0905a4, build server ID 13287522) using Pixel 6.
kernel.afdo is an AArch64 kernel profile collected on kernel version 6.12.30 (
SHA 21ed84930c160ed721c131a67e5ae6181ac40e1e, build server ID 13771236) using Pixel 6.
### Performance improvements
| Benchmark | Improvement |
| --------------------- | ----------- |
| Boot time | 2.6% |
| Cold App launch time | 3.7% |
| Binder-rpc | 9.4% |
| Binder-addints | 31.9% |
| Hwbinder | 16.5% |
| Bionic (syscall_mmap) | 7.1% |
| Bionic (pthread) | 3.3% |
| Bionic (stdio) | 3.7% |
| Bionic (all) | 3.4% |
| Benchmark | Improvement |
| --------------------- | ---------------------------------------------------------------------- |
| Boot time | 2.2% |
| Cold App launch time | 3.3% (Only for two apps, most app launch tests are broken b/432087996) |
| Binder-rpc | 8.3% |
| Binder-addints | 15.2% |
| Hwbinder | 17.8% |
| Bionic (syscall_mmap) | 6.6% |
| Bionic (pthread) | 2.8% |
| Bionic (stdio) | 1.2% |
| Bionic (all) | 1.4% |
Benchmark results were tested on Pixel 6.

Binary file not shown.

View File

@ -573,6 +573,7 @@
down
downgrade_write
down_interruptible
down_killable
down_read
down_read_trylock
down_timeout
@ -786,6 +787,7 @@
drop_reasons_register_subsys
drop_reasons_unregister_subsys
d_splice_alias
dump_backtrace
dump_stack
dw_pcie_find_capability
dw_pcie_host_deinit
@ -1005,6 +1007,7 @@
handle_fasteoi_irq
handle_simple_irq
handle_sysrq
hci_cmd_sync
hdmi_audio_infoframe_init
hdmi_audio_infoframe_pack
hdmi_avi_infoframe_init
@ -1317,8 +1320,10 @@
log_write_mmio
lookup_bdev
loops_per_jiffy
LZ4_compress_default
LZ4_compress_fast
LZ4_compress_fast_continue
LZ4_compress_HC
LZ4_decompress_safe
LZ4_decompress_safe_continue
LZ4_decompress_safe_partial
@ -2118,6 +2123,7 @@
skb_queue_tail
skb_realloc_headroom
skb_scrub_packet
skb_split
skb_trim
skb_tstamp_tx
skb_unlink
@ -2822,4 +2828,6 @@
zs_malloc
zs_map_object
zs_pool_stats
zstd_max_clevel
zstd_min_clevel
zs_unmap_object

View File

@ -2147,6 +2147,7 @@
irq_get_irqchip_state
kthread_park
kthread_unpark
of_irq_to_resource
of_property_read_variable_u16_array
pcim_enable_device
pcim_iomap_regions

View File

@ -11,6 +11,9 @@
file_write_and_wait_range
fixed_size_llseek
free_hpage
gcma_alloc_range
gcma_free_range
gcma_stat_get
generic_file_read_iter
generic_perform_write
get_pfnblock_flags_mask
@ -28,6 +31,7 @@
proc_set_size
pstore_register
pstore_unregister
register_gcma_area
scsi_device_quiesce
scsi_device_resume
swp_swapcount
@ -49,7 +53,11 @@
__traceiter_android_vh_filemap_map_pages
__traceiter_android_vh_filemap_read
__traceiter_android_vh_free_pages_ok_bypass
__traceiter_android_vh_free_page_bypass
__traceiter_android_vh_free_folio_bypass
__traceiter_android_vh_free_pages_prepare_bypass
__traceiter_android_vh_gcma_cc_allow_nonworkingset
__traceiter_android_vh_gcma_cc_store_page_bypass
__traceiter_android_vh_is_fpsimd_save
__traceiter_android_vh_logbuf
__traceiter_android_vh_logbuf_pr_cont
@ -95,7 +103,11 @@
__tracepoint_android_vh_filemap_map_pages
__tracepoint_android_vh_filemap_read
__tracepoint_android_vh_free_pages_ok_bypass
__tracepoint_android_vh_free_page_bypass
__tracepoint_android_vh_free_folio_bypass
__tracepoint_android_vh_free_pages_prepare_bypass
__tracepoint_android_vh_gcma_cc_allow_nonworkingset
__tracepoint_android_vh_gcma_cc_store_page_bypass
__tracepoint_android_vh_is_fpsimd_save
__tracepoint_android_vh_logbuf
__tracepoint_android_vh_logbuf_pr_cont

View File

@ -45,6 +45,8 @@
__pmd_trans_huge_lock
__pte_offset_map_lock
__show_mem
__traceiter_android_vh_page_cache_ra_order_bypass
__tracepoint_android_vh_page_cache_ra_order_bypass
__traceiter_android_vh_slab_alloc_node
__tracepoint_android_vh_slab_alloc_node
__traceiter_android_vh_slab_free

2183
gki/aarch64/symbols/lenovo Normal file

File diff suppressed because it is too large Load Diff

View File

@ -550,6 +550,7 @@
devm_memremap
devm_memunmap
devm_mfd_add_devices
devm_mmc_alloc_host
devm_nvmem_cell_get
devm_nvmem_device_get
devm_nvmem_register

File diff suppressed because it is too large Load Diff

View File

@ -552,6 +552,7 @@
device_match_name
device_match_of_node
device_move
device_node_to_regmap
device_property_match_string
device_property_present
device_property_read_string

View File

@ -875,6 +875,8 @@
dev_pm_opp_of_get_sharing_cpus
dev_pm_opp_of_register_em
dev_pm_opp_set_sharing_cpus
dev_pm_opp_register_notifier
dev_pm_opp_unregister_notifier
policy_has_boost_freq
# required by cqhci.ko
@ -1829,6 +1831,13 @@
unpin_user_pages
uuid_null
# required by regulator_supply_alias.ko
regulator_register_supply_alias
regulator_unregister_supply_alias
# required by selector.ko
of_i2c_get_board_info
# preserved by --additions-only
cancel_work
phy_start_aneg

View File

@ -27,4 +27,6 @@
remove_memory_subsection
send_sig_mceerr
smpboot_unregister_percpu_thread
__traceiter_android_vh_filemap_get_folio_gfp
__tracepoint_android_vh_filemap_get_folio_gfp
vmap_pfn

View File

@ -16,6 +16,7 @@
file_check_and_advance_wb_err
filemap_add_folio
filemap_check_errors
filemap_get_folios_contig
filemap_release_folio
__folio_cancel_dirty
folio_end_read
@ -26,10 +27,12 @@
iget_locked
ilookup5
inode_add_bytes
inode_bit_waitqueue
inode_maybe_inc_iversion
inode_query_iversion
__insert_inode_hash
iov_iter_single_seg_count
LZ4_compress_default
make_vfsgid
make_vfsuid
mark_buffer_write_io_error
@ -43,6 +46,7 @@
__remove_inode_hash
sb_min_blocksize
security_inode_init_security
shrink_dcache_sb
__sync_dirty_buffer
sysctl_hung_task_timeout_secs
tag_pages_for_writeback
@ -51,3 +55,4 @@
utf32_to_utf8
wake_bit_function
wrap_directory_iterator
zlib_inflateEnd

View File

@ -10,7 +10,9 @@
blk_stat_disable_accounting
blk_stat_enable_accounting
__brelse
__contpte_try_fold
class_find_device
contpte_ptep_get
elevator_alloc
elv_register
elv_unregister
@ -23,9 +25,16 @@
kblockd_mod_delayed_work_on
kern_path
__lock_buffer
__mmu_notifier_arch_invalidate_secondary_tlbs
__mmu_notifier_invalidate_range_end
__mmu_notifier_invalidate_range_start
memcg1_charge_batch
mempool_kfree
mempool_kmalloc
mipi_dsi_dcs_write
mm_trace_rss_stat
mte_sync_tags
mthp_stats
__neigh_create
netdev_is_rx_handler_busy
noop_qdisc
@ -63,13 +72,17 @@
seq_read_iter
set_blocksize
__set_task_comm
__sync_icache_dcache
set_task_ioprio
skb_orphan_partial
submit_bh
swap_migration_ad_supported
swapper_spaces
tcf_block_get
tcf_block_put
tcf_classify
timer_reduce
zone_pageset_high_and_batch_update
cpufreq_unregister_notifier
jiffies64_to_nsecs
profile_event_unregister
@ -79,6 +92,8 @@
__traceiter_android_rvh_alloc_pages_reclaim_cycle_end
__traceiter_android_rvh_alloc_pages_reclaim_start
__traceiter_android_rvh_bpf_skb_load_bytes
__traceiter_android_rvh_balance_fair
__traceiter_android_rvh_before_pick_task_fair
__traceiter_android_vh_throttle_direct_reclaim_bypass
__traceiter_android_vh_count_workingset_refault
__traceiter_android_rvh_cpufreq_transition
@ -136,7 +151,25 @@
__traceiter_android_vh_lruvec_add_folio
__traceiter_android_vh_lruvec_del_folio
__traceiter_android_vh_mglru_aging_bypass
__traceiter_android_vh_mm_customize_ac
__traceiter_android_vh_mm_customize_alloc_anon_thp
__traceiter_android_vh_mm_customize_file_is_tiny
__traceiter_android_vh_mm_customize_longterm_pinnable
__traceiter_android_vh_mm_customize_pgdat_balanced
__traceiter_android_vh_mm_customize_reclaim_idx
__traceiter_android_vh_mm_customize_rmqueue
__traceiter_android_vh_mm_customize_suitable_zone
__traceiter_android_vh_mm_customize_wmark_ok
__traceiter_android_vh_mm_customize_zone_can_compact
__traceiter_android_vh_mm_customize_zone_max_order
__traceiter_android_vh_mm_customize_zone_pageset
__traceiter_android_vh_mm_do_madvise_bypass
__traceiter_android_vh_mm_free_page
__traceiter_android_vh_mm_isolate_priv_lru
__traceiter_android_vh_mm_migrate_one_page
__traceiter_android_vh_mm_remove_migration_pte_bypass
__traceiter_android_vh_mm_split_huge_page_bypass
__traceiter_android_vh_mm_try_split_folio_bypass
__traceiter_android_vh_mmap_region
__traceiter_android_vh_mutex_unlock_slowpath
__traceiter_android_vh_mutex_unlock_slowpath_before_wakeq
@ -192,6 +225,8 @@
__tracepoint_android_rvh_alloc_pages_reclaim_cycle_end
__tracepoint_android_rvh_alloc_pages_reclaim_start
__tracepoint_android_rvh_bpf_skb_load_bytes
__tracepoint_android_rvh_balance_fair
__tracepoint_android_rvh_before_pick_task_fair
__tracepoint_android_vh_throttle_direct_reclaim_bypass
__tracepoint_android_vh_count_workingset_refault
__tracepoint_android_rvh_cpufreq_transition
@ -249,7 +284,25 @@
__tracepoint_android_vh_lruvec_add_folio
__tracepoint_android_vh_lruvec_del_folio
__tracepoint_android_vh_mglru_aging_bypass
__tracepoint_android_vh_mm_customize_ac
__tracepoint_android_vh_mm_customize_alloc_anon_thp
__tracepoint_android_vh_mm_customize_file_is_tiny
__tracepoint_android_vh_mm_customize_longterm_pinnable
__tracepoint_android_vh_mm_customize_pgdat_balanced
__tracepoint_android_vh_mm_customize_reclaim_idx
__tracepoint_android_vh_mm_customize_rmqueue
__tracepoint_android_vh_mm_customize_suitable_zone
__tracepoint_android_vh_mm_customize_wmark_ok
__tracepoint_android_vh_mm_customize_zone_can_compact
__tracepoint_android_vh_mm_customize_zone_max_order
__tracepoint_android_vh_mm_customize_zone_pageset
__tracepoint_android_vh_mm_do_madvise_bypass
__tracepoint_android_vh_mm_free_page
__tracepoint_android_vh_mm_isolate_priv_lru
__tracepoint_android_vh_mm_migrate_one_page
__tracepoint_android_vh_mm_remove_migration_pte_bypass
__tracepoint_android_vh_mm_split_huge_page_bypass
__tracepoint_android_vh_mm_try_split_folio_bypass
__tracepoint_android_vh_mmap_region
__tracepoint_android_vh_mutex_unlock_slowpath
__tracepoint_android_vh_mutex_unlock_slowpath_before_wakeq

View File

@ -121,6 +121,9 @@
__traceiter_android_vh_mutex_wait_start
__traceiter_android_vh_alter_mutex_list_add
__traceiter_android_rvh_cpuset_fork
__traceiter_android_vh_mutex_init
__traceiter_android_vh_rwsem_direct_rsteal
__traceiter_android_vh_rwsem_optimistic_rspin
__traceiter_android_vh_sched_setaffinity_early
__traceiter_android_rvh_set_cpus_allowed_comm
__traceiter_android_rvh_dequeue_task
@ -129,12 +132,17 @@
__tracepoint_android_vh_mutex_wait_start
__tracepoint_android_vh_alter_mutex_list_add
__tracepoint_android_rvh_cpuset_fork
__tracepoint_android_vh_mutex_init
__tracepoint_android_vh_rwsem_direct_rsteal
__tracepoint_android_vh_rwsem_optimistic_rspin
__tracepoint_android_vh_sched_setaffinity_early
__tracepoint_android_rvh_set_cpus_allowed_comm
__tracepoint_android_rvh_dequeue_task
cpuset_cpus_allowed
cpufreq_update_policy
cgroup_threadgroup_rwsem
osq_lock
osq_unlock
#required by millet.ko
__traceiter_android_rvh_refrigerator
@ -397,20 +405,6 @@
__tracepoint_android_vh_psi_event
__tracepoint_android_vh_psi_group
#required by io_monitor.ko
__tracepoint_f2fs_gc_begin
__tracepoint_f2fs_gc_end
__tracepoint_f2fs_write_checkpoint
__tracepoint_f2fs_sync_file_enter
__tracepoint_f2fs_sync_file_exit
__tracepoint_ufshcd_command
__traceiter_f2fs_gc_begin
__traceiter_f2fs_gc_end
__traceiter_f2fs_write_checkpoint
__traceiter_f2fs_sync_file_enter
__traceiter_f2fs_sync_file_exit
__traceiter_ufshcd_command
#required by zram.ko module
__blk_alloc_disk
bio_end_io_acct_remapped
@ -443,6 +437,8 @@
__tracepoint_android_vh_mem_cgroup_charge
__tracepoint_android_vh_filemap_add_folio
__tracepoint_android_vh_shrink_node
__traceiter_android_vh_swap_bio_charge
__tracepoint_android_vh_swap_bio_charge
#required by mem_reclaim_ctl.ko
__traceiter_android_vh_page_should_be_protected

View File

@ -1 +0,0 @@
per-file net/**=file:/net/OWNERS

View File

@ -1,4 +0,0 @@
per-file bio.h=file:/block/OWNERS
per-file blk*.h=file:/block/OWNERS
per-file f2fs**=file:/fs/f2fs/OWNERS
per-file net**=file:/net/OWNERS

View File

@ -5,6 +5,23 @@
#include <linux/types.h>
#ifdef CONFIG_GCMA
enum gcma_stat_type {
ALLOCATED_PAGE,
STORED_PAGE,
LOADED_PAGE,
EVICTED_PAGE,
CACHED_PAGE,
DISCARDED_PAGE,
TOTAL_PAGE,
NUM_OF_GCMA_STAT,
};
#ifdef CONFIG_GCMA_SYSFS
u64 gcma_stat_get(enum gcma_stat_type type);
#else
static inline u64 gcma_stat_get(enum gcma_stat_type type) { return 0; }
#endif
extern void gcma_alloc_range(unsigned long start_pfn, unsigned long end_pfn);
extern void gcma_free_range(unsigned long start_pfn, unsigned long end_pfn);
extern int register_gcma_area(const char *name, phys_addr_t base,

View File

@ -2099,8 +2099,17 @@ static inline bool is_zero_folio(const struct folio *folio)
/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin folios */
#ifdef CONFIG_MIGRATION
extern void _trace_android_vh_mm_customize_longterm_pinnable(struct folio *folio,
bool *is_longterm_pinnable);
static inline bool folio_is_longterm_pinnable(struct folio *folio)
{
bool is_longterm_pinnable = false;
_trace_android_vh_mm_customize_longterm_pinnable(folio, &is_longterm_pinnable);
if (is_longterm_pinnable)
return true;
#ifdef CONFIG_CMA
int mt = folio_migratetype(folio);
@ -4291,6 +4300,9 @@ void vma_pgtable_walk_end(struct vm_area_struct *vma);
int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
void zone_pageset_high_and_batch_update(struct zone *zone, int new_high_min,
int new_high_max, int new_batch);
#ifdef CONFIG_64BIT
int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
#else

View File

@ -2249,7 +2249,6 @@ static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
static inline void set_blocked_on_waking_nested(struct task_struct *p, struct mutex *m)
{
raw_spin_lock_nested(&p->blocked_lock, SINGLE_DEPTH_NESTING);
WARN_ON_ONCE(__get_task_blocked_on(p) != m);
__set_blocked_on_waking(p);
raw_spin_unlock(&p->blocked_lock);
}

View File

@ -47,31 +47,50 @@ static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb)
VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false;
}
static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb)
static inline void virtio_vsock_skb_put(struct sk_buff *skb, u32 len)
{
u32 len;
DEBUG_NET_WARN_ON_ONCE(skb->len);
len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
if (len > 0)
if (skb_is_nonlinear(skb))
skb->len = len;
else
skb_put(skb, len);
}
static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
static inline struct sk_buff *
__virtio_vsock_alloc_skb_with_frags(unsigned int header_len,
unsigned int data_len,
gfp_t mask)
{
struct sk_buff *skb;
int err;
if (size < VIRTIO_VSOCK_SKB_HEADROOM)
return NULL;
skb = alloc_skb(size, mask);
skb = alloc_skb_with_frags(header_len, data_len,
PAGE_ALLOC_COSTLY_ORDER, &err, mask);
if (!skb)
return NULL;
skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
skb->data_len = data_len;
return skb;
}
static inline struct sk_buff *
virtio_vsock_alloc_linear_skb(unsigned int size, gfp_t mask)
{
return __virtio_vsock_alloc_skb_with_frags(size, 0, mask);
}
static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
{
if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
return virtio_vsock_alloc_linear_skb(size, mask);
size -= VIRTIO_VSOCK_SKB_HEADROOM;
return __virtio_vsock_alloc_skb_with_frags(VIRTIO_VSOCK_SKB_HEADROOM,
size, mask);
}
static inline void
virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb)
{
@ -111,10 +130,14 @@ static inline size_t virtio_vsock_skb_len(struct sk_buff *skb)
return (size_t)(skb_end_pointer(skb) - skb->head);
}
#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4)
/* Dimension the RX SKB so that the entire thing fits exactly into
* a single 4KiB page. This avoids wasting memory due to alloc_skb()
* rounding up to the next page order and also means that we
* don't leave higher-order pages sitting around in the RX queue.
*/
#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE SKB_WITH_OVERHEAD(1024 * 4)
#define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE virtio_transport_max_vsock_pkt_buf_size
extern uint virtio_transport_max_vsock_pkt_buf_size;
#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
enum {
VSOCK_VQ_RX = 0, /* for host to guest data */

View File

@ -1 +0,0 @@
per-file f2fs**=file:/fs/f2fs/OWNERS

View File

@ -19,6 +19,9 @@ enum compact_result;
DECLARE_HOOK(android_vh_compaction_try_to_compact_exit,
TP_PROTO(enum compact_result *compact_result),
TP_ARGS(compact_result));
DECLARE_HOOK(android_vh_mm_customize_zone_can_compact,
TP_PROTO(struct zone *zone, bool *can_compact),
TP_ARGS(zone, can_compact));
#endif /* _TRACE_HOOK_COMPACTION_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -20,6 +20,10 @@ DECLARE_HOOK(android_vh_process_madvise_return_error,
DECLARE_HOOK(android_vh_madvise_pageout_bypass,
TP_PROTO(struct mm_struct *mm, bool pageout, int *ret),
TP_ARGS(mm, pageout, ret));
DECLARE_HOOK(android_vh_mm_do_madvise_bypass,
TP_PROTO(struct mm_struct *mm, unsigned long start, size_t len,
int behavior, int *error, bool *bypass),
TP_ARGS(mm, start, len, behavior, error, bypass));
#endif

View File

@ -149,6 +149,9 @@ DECLARE_HOOK(android_vh_filemap_get_folio,
TP_PROTO(struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp_mask, struct folio *folio),
TP_ARGS(mapping, index, fgp_flags, gfp_mask, folio));
DECLARE_HOOK(android_vh_filemap_get_folio_gfp,
TP_PROTO(struct address_space *mapping, int fgp_flags, gfp_t *gfp_mask),
TP_ARGS(mapping, fgp_flags, gfp_mask));
DECLARE_RESTRICTED_HOOK(android_rvh_madvise_pageout_begin,
TP_PROTO(void **private),
TP_ARGS(private), 1);
@ -343,6 +346,12 @@ DECLARE_HOOK(android_vh_free_pages_ok_bypass,
TP_PROTO(struct page *page, unsigned int order,
int __bitwise flags, bool *skip_free_pages_ok),
TP_ARGS(page, order, flags, skip_free_pages_ok));
DECLARE_HOOK(android_vh_free_page_bypass,
TP_PROTO(struct page *page, unsigned int order, bool *skip_free_page),
TP_ARGS(page, order, skip_free_page));
DECLARE_HOOK(android_vh_free_folio_bypass,
TP_PROTO(struct folio *folio, unsigned int order, bool *skip_free_folio),
TP_ARGS(folio, order, skip_free_folio));
DECLARE_HOOK(android_vh_free_pages_prepare_init,
TP_PROTO(struct page *page, int nr_pages, bool *init),
TP_ARGS(page, nr_pages, init));
@ -537,6 +546,57 @@ DECLARE_HOOK(android_vh_filemap_add_folio,
TP_PROTO(struct address_space *mapping, struct folio *folio,
pgoff_t index),
TP_ARGS(mapping, folio, index));
DECLARE_HOOK(android_vh_gcma_cc_allow_nonworkingset,
TP_PROTO(bool *allow_nonworkingset),
TP_ARGS(allow_nonworkingset));
DECLARE_HOOK(android_vh_gcma_cc_store_page_bypass,
TP_PROTO(bool *bypass),
TP_ARGS(bypass));
DECLARE_HOOK(android_vh_swap_bio_charge,
TP_PROTO(struct bio *bio),
TP_ARGS(bio));
DECLARE_HOOK(android_vh_mm_customize_alloc_anon_thp,
TP_PROTO(gfp_t *gfp_mask, unsigned long *orders, int *order, struct folio **folio),
TP_ARGS(gfp_mask, orders, order, folio));
DECLARE_HOOK(android_vh_mm_customize_ac,
TP_PROTO(gfp_t gfp, unsigned int order, struct zonelist **zonelist,
struct zoneref **preferred_zoneref, enum zone_type *highest_zoneidx,
unsigned int *alloc_flags),
TP_ARGS(gfp, order, zonelist, preferred_zoneref, highest_zoneidx, alloc_flags));
DECLARE_HOOK(android_vh_mm_customize_rmqueue,
TP_PROTO(struct zone *zone, unsigned int order, unsigned int *alloc_flags,
int *migratetype),
TP_ARGS(zone, order, alloc_flags, migratetype));
DECLARE_HOOK(android_vh_mm_customize_suitable_zone,
TP_PROTO(struct zone *zone, gfp_t gfp, int order, enum zone_type highest_zoneidx,
bool *use_this_zone, bool *suitable),
TP_ARGS(zone, gfp, order, highest_zoneidx, use_this_zone, suitable));
DECLARE_HOOK(android_vh_mm_customize_wmark_ok,
TP_PROTO(struct zone *zone, unsigned int order, enum zone_type highest_zoneidx,
bool *wmark_ok, bool *customized),
TP_ARGS(zone, order, highest_zoneidx, wmark_ok, customized));
DECLARE_HOOK(android_vh_mm_customize_zone_max_order,
TP_PROTO(struct zone *zone, int *max_order),
TP_ARGS(zone, max_order));
DECLARE_HOOK(android_vh_mm_customize_zone_pageset,
TP_PROTO(struct zone *zone, int *new_high_min, int *new_high_max, int *new_batch),
TP_ARGS(zone, new_high_min, new_high_max, new_batch));
DECLARE_HOOK(android_vh_mm_customize_longterm_pinnable,
TP_PROTO(struct folio *folio, bool *is_longterm_pinnable),
TP_ARGS(folio, is_longterm_pinnable));
DECLARE_HOOK(android_vh_mm_migrate_one_page,
TP_PROTO(struct page *page, const vm_flags_t vm_flags),
TP_ARGS(page, vm_flags));
DECLARE_HOOK(android_vh_mm_remove_migration_pte_bypass,
TP_PROTO(struct folio *dst, struct vm_area_struct *vma, unsigned long addr,
struct folio *src, bool *bypass),
TP_ARGS(dst, vma, addr, src, bypass));
DECLARE_HOOK(android_vh_mm_split_huge_page_bypass,
TP_PROTO(struct page *page, struct list_head *list, int *ret, bool *bypass),
TP_ARGS(page, list, ret, bypass));
DECLARE_HOOK(android_vh_mm_try_split_folio_bypass,
TP_PROTO(struct folio *folio, bool *bypass),
TP_ARGS(folio, bypass));
#endif /* _TRACE_HOOK_MM_H */
/* This part must be outside protection */

View File

@ -47,6 +47,12 @@ DECLARE_HOOK(android_vh_clear_rwsem_writer_owned,
DECLARE_HOOK(android_vh_rwsem_read_trylock_failed,
TP_PROTO(struct rw_semaphore *sem, long *cntp, int *ret),
TP_ARGS(sem, cntp, ret));
DECLARE_HOOK(android_vh_rwsem_direct_rsteal,
TP_PROTO(struct rw_semaphore *sem, bool *steal),
TP_ARGS(sem, steal));
DECLARE_HOOK(android_vh_rwsem_optimistic_rspin,
TP_PROTO(struct rw_semaphore *sem, long *adjustment, bool *rspin),
TP_ARGS(sem, adjustment, rspin));
#endif /* _TRACE_HOOK_RWSEM_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -387,6 +387,15 @@ DECLARE_RESTRICTED_HOOK(android_rvh_remove_entity_load_avg,
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(cfs_rq, se), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_before_pick_task_fair,
TP_PROTO(struct rq *rq, struct task_struct **p,
struct task_struct *prev, struct rq_flags *rf),
TP_ARGS(rq, p, prev, rf), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_balance_fair,
TP_PROTO(struct rq *rq, struct task_struct *prev, struct rq_flags *rf),
TP_ARGS(rq, prev, rf), 1);
struct affinity_context;
DECLARE_RESTRICTED_HOOK(android_rvh_set_cpus_allowed_ptr,
TP_PROTO(struct task_struct *p, struct affinity_context *ctx, bool *skip_user_ptr),

View File

@ -111,6 +111,20 @@ DECLARE_HOOK(android_vh_shrink_node,
DECLARE_HOOK(android_vh_shrink_node_memcgs,
TP_PROTO(struct mem_cgroup *memcg, bool *skip),
TP_ARGS(memcg, skip));
DECLARE_HOOK(android_vh_mm_isolate_priv_lru,
TP_PROTO(unsigned long nr_to_scan, struct lruvec *lruvec, enum lru_list lru,
struct list_head *dst, int reclaim_idx, bool may_unmap,
unsigned long *nr_scanned, unsigned long *nr_taken),
TP_ARGS(nr_to_scan, lruvec, lru, dst, reclaim_idx, may_unmap, nr_scanned, nr_taken));
DECLARE_HOOK(android_vh_mm_customize_file_is_tiny,
TP_PROTO(unsigned int may_swap, int order, int highest_zoneidx, bool *file_is_tiny),
TP_ARGS(may_swap, order, highest_zoneidx, file_is_tiny));
DECLARE_HOOK(android_vh_mm_customize_pgdat_balanced,
TP_PROTO(int order, int highest_zoneidx, bool *balanced, bool *customized),
TP_ARGS(order, highest_zoneidx, balanced, customized));
DECLARE_HOOK(android_vh_mm_customize_reclaim_idx,
TP_PROTO(int order, gfp_t gfp, s8 *reclaim_idx, enum zone_type *highest_zoneidx),
TP_ARGS(order, gfp, reclaim_idx, highest_zoneidx));
#endif /* _TRACE_HOOK_VMSCAN_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -1,3 +0,0 @@
per-file f2fs**=file:/fs/f2fs/OWNERS
per-file fuse**=file:/fs/fuse/OWNERS
per-file net**=file:/net/OWNERS

View File

@ -56,15 +56,29 @@ struct fuse_in_postfilter_header {
/** One input argument of a request */
struct fuse_bpf_in_arg {
uint32_t size;
const void *value;
const void *end_offset;
uint32_t padding;
union {
const void *value;
uint64_t padding2;
};
union {
const void *end_offset;
uint64_t padding3;
};
};
/** One output argument of a request */
struct fuse_bpf_arg {
uint32_t size;
void *value;
void *end_offset;
uint32_t padding;
union {
void *value;
uint64_t padding2;
};
union {
void *end_offset;
uint64_t padding3;
};
};
#define FUSE_MAX_IN_ARGS 5
@ -80,6 +94,7 @@ struct fuse_bpf_args {
uint32_t in_numargs;
uint32_t out_numargs;
uint32_t flags;
uint32_t padding;
struct fuse_bpf_in_arg in_args[FUSE_MAX_IN_ARGS];
struct fuse_bpf_arg out_args[FUSE_MAX_OUT_ARGS];
};

View File

@ -206,6 +206,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
return false;
}
EXPORT_SYMBOL_GPL(osq_lock);
void osq_unlock(struct optimistic_spin_queue *lock)
{
@ -233,3 +234,4 @@ void osq_unlock(struct optimistic_spin_queue *lock)
if (next)
WRITE_ONCE(next->locked, 1);
}
EXPORT_SYMBOL_GPL(osq_unlock);

View File

@ -343,6 +343,8 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
osq_lock_init(&sem->osq);
#endif
android_init_vendor_data(sem, 1);
android_init_oem_data(sem, 1);
trace_android_vh_rwsem_init(sem);
}
EXPORT_SYMBOL(__init_rwsem);
@ -1030,6 +1032,8 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
struct rwsem_waiter waiter;
DEFINE_WAKE_Q(wake_q);
bool already_on_list = false;
bool steal = true;
bool rspin = false;
/*
* To prevent a constant stream of readers from starving a sleeping
@ -1043,7 +1047,8 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
/*
* Reader optimistic lock stealing.
*/
if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) {
trace_android_vh_rwsem_direct_rsteal(sem, &steal);
if (steal && !(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) {
rwsem_set_reader_owned(sem);
lockevent_inc(rwsem_rlock_steal);
@ -1051,7 +1056,8 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
* Wake up other readers in the wait queue if it is
* the first reader.
*/
if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) {
wake_readers:
if ((rcnt == 1 || rspin) && (count & RWSEM_FLAG_WAITERS)) {
raw_spin_lock_irq(&sem->wait_lock);
if (!list_empty(&sem->wait_list))
rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
@ -1062,6 +1068,12 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
trace_android_vh_record_rwsem_lock_starttime(sem, jiffies);
return sem;
}
/*
* Reader optimistic spinning and stealing.
*/
trace_android_vh_rwsem_optimistic_rspin(sem, &adjustment, &rspin);
if (rspin)
goto wake_readers;
queue:
waiter.task = current;

View File

@ -1163,6 +1163,17 @@ static unsigned int __init add_to_rb(struct printk_ringbuffer *rb,
static char setup_text_buf[PRINTKRB_RECORD_MAX] __initdata;
static void print_log_buf_usage_stats(void)
{
unsigned int descs_count = log_buf_len >> PRB_AVGBITS;
size_t meta_data_size;
meta_data_size = descs_count * (sizeof(struct prb_desc) + sizeof(struct printk_info));
pr_info("log buffer data + meta data: %u + %zu = %zu bytes\n",
log_buf_len, meta_data_size, log_buf_len + meta_data_size);
}
void __init setup_log_buf(int early)
{
struct printk_info *new_infos;
@ -1192,20 +1203,25 @@ void __init setup_log_buf(int early)
if (!early && !new_log_buf_len)
log_buf_add_cpu();
if (!new_log_buf_len)
if (!new_log_buf_len) {
/* Show the memory stats only once. */
if (!early)
goto out;
return;
}
new_descs_count = new_log_buf_len >> PRB_AVGBITS;
if (new_descs_count == 0) {
pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len);
return;
goto out;
}
new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
if (unlikely(!new_log_buf)) {
pr_err("log_buf_len: %lu text bytes not available\n",
new_log_buf_len);
return;
goto out;
}
new_descs_size = new_descs_count * sizeof(struct prb_desc);
@ -1268,7 +1284,7 @@ void __init setup_log_buf(int early)
prb_next_seq(&printk_rb_static) - seq);
}
pr_info("log_buf_len: %u bytes\n", log_buf_len);
print_log_buf_usage_stats();
pr_info("early log buf free: %u(%u%%)\n",
free, (free * 100) / __LOG_BUF_LEN);
return;
@ -1277,6 +1293,8 @@ err_free_descs:
memblock_free(new_descs, new_descs_size);
err_free_log_buf:
memblock_free(new_log_buf, new_log_buf_len);
out:
print_log_buf_usage_stats();
}
static bool __read_mostly ignore_loglevel;

View File

@ -276,7 +276,7 @@ static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
* callback storms, no need to wake up too early.
*/
if (waketype == RCU_NOCB_WAKE_LAZY &&
rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) {
rdp_gp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) {
mod_timer(&rdp_gp->nocb_timer, jiffies + rcu_get_jiffies_lazy_flush());
WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
} else if (waketype == RCU_NOCB_WAKE_BYPASS) {

View File

@ -1,3 +0,0 @@
elavila@google.com
qperret@google.com
tkjos@google.com

View File

@ -8961,6 +8961,7 @@ static void set_cpus_allowed_fair(struct task_struct *p, struct affinity_context
static int
balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
trace_android_rvh_balance_fair(rq, prev, rf);
if (sched_fair_runnable(rq))
return 1;
@ -9120,12 +9121,15 @@ struct task_struct *
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
struct sched_entity *se;
struct task_struct *p;
struct task_struct *p = NULL;
int new_tasks;
again:
p = pick_task_fair(rq);
trace_android_rvh_replace_next_task_fair(rq, &p, prev);
trace_android_rvh_before_pick_task_fair(rq, &p, prev, rf);
if (!p) {
p = pick_task_fair(rq);
trace_android_rvh_replace_next_task_fair(rq, &p, prev);
}
if (!p)
goto idle;

View File

@ -134,5 +134,5 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_sugov_sched_attr);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_iowait);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_uclamp_validate);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_util_fits_cpu);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_before_pick_task_fair);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_balance_fair);

View File

@ -1,3 +0,0 @@
kaleshsingh@google.com
surenb@google.com
minchan@google.com

View File

@ -2854,6 +2854,11 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
ac->highest_zoneidx, ac->nodemask) {
enum compact_result status;
bool can_compact = true;
trace_android_vh_mm_customize_zone_can_compact(zone, &can_compact);
if (!can_compact)
continue;
if (cpusets_enabled() &&
(alloc_flags & ALLOC_CPUSET) &&
@ -2921,10 +2926,16 @@ void compact_node_async(int nid)
};
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
bool can_compact = true;
zone = &pgdat->node_zones[zoneid];
if (!populated_zone(zone))
continue;
trace_android_vh_mm_customize_zone_can_compact(zone, &can_compact);
if (!can_compact)
continue;
if (fatal_signal_pending(current))
break;
@ -2960,10 +2971,16 @@ static int compact_node(pg_data_t *pgdat, bool proactive)
};
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
bool can_compact = true;
zone = &pgdat->node_zones[zoneid];
if (!populated_zone(zone))
continue;
trace_android_vh_mm_customize_zone_can_compact(zone, &can_compact);
if (!can_compact)
continue;
if (fatal_signal_pending(current))
return -EINTR;
@ -3090,11 +3107,16 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
enum compact_result ret;
for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) {
bool can_compact = true;
zone = &pgdat->node_zones[zoneid];
if (!populated_zone(zone))
continue;
trace_android_vh_mm_customize_zone_can_compact(zone, &can_compact);
if (!can_compact)
continue;
ret = compaction_suit_allocation_order(zone,
pgdat->kcompactd_max_order,
highest_zoneidx, ALLOC_WMARK_MIN);
@ -3129,11 +3151,16 @@ static void kcompactd_do_work(pg_data_t *pgdat)
for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) {
int status;
bool can_compact = true;
zone = &pgdat->node_zones[zoneid];
if (!populated_zone(zone))
continue;
trace_android_vh_mm_customize_zone_can_compact(zone, &can_compact);
if (!can_compact)
continue;
if (compaction_deferred(zone, cc.order))
continue;

View File

@ -1963,6 +1963,9 @@ no_page:
gfp &= ~GFP_KERNEL;
gfp |= GFP_NOWAIT | __GFP_NOWARN;
}
trace_android_vh_filemap_get_folio_gfp(mapping, fgp_flags, &gfp);
if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
fgp_flags |= FGP_LOCK;

View File

@ -13,6 +13,7 @@
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/xarray.h>
#include <trace/hooks/mm.h>
#include "gcma_sysfs.h"
/*
@ -306,6 +307,7 @@ int register_gcma_area(const char *name, phys_addr_t base, phys_addr_t size)
INIT_LIST_HEAD(&area->free_pages);
spin_lock_init(&area->free_pages_lock);
gcma_stat_add(TOTAL_PAGE, page_count);
for (i = 0; i < page_count; i++) {
page = pfn_to_page(pfn + i);
set_area_id(page, area_id);
@ -620,6 +622,7 @@ void gcma_alloc_range(unsigned long start_pfn, unsigned long end_pfn)
__gcma_discard_range(area, s_pfn, e_pfn);
}
gcma_stat_add(ALLOCATED_PAGE, end_pfn - start_pfn + 1);
}
EXPORT_SYMBOL_GPL(gcma_alloc_range);
@ -658,6 +661,7 @@ void gcma_free_range(unsigned long start_pfn, unsigned long end_pfn)
}
local_irq_enable();
gcma_stat_sub(ALLOCATED_PAGE, end_pfn - start_pfn + 1);
}
EXPORT_SYMBOL_GPL(gcma_free_range);
@ -747,7 +751,12 @@ static void gcma_cc_store_page(int hash_id, struct cleancache_filekey key,
void *src, *dst;
bool is_new = false;
bool workingset = PageWorkingset(page);
bool bypass = false;
bool allow_nonworkingset = false;
trace_android_vh_gcma_cc_store_page_bypass(&bypass);
if (bypass)
return;
/*
* This cleancache function is called under irq disabled so every
* locks in this function should take of the irq if they are
@ -759,10 +768,11 @@ static void gcma_cc_store_page(int hash_id, struct cleancache_filekey key,
if (!gcma_fs)
return;
trace_android_vh_gcma_cc_allow_nonworkingset(&allow_nonworkingset);
find_inode:
inode = find_and_get_gcma_inode(gcma_fs, &key);
if (!inode) {
if (!workingset)
if (!workingset && !allow_nonworkingset)
return;
inode = add_gcma_inode(gcma_fs, &key);
if (!IS_ERR(inode))
@ -781,14 +791,14 @@ load_page:
xa_lock(&inode->pages);
g_page = xa_load(&inode->pages, offset);
if (g_page) {
if (!workingset) {
if (!workingset && !allow_nonworkingset) {
gcma_erase_page(inode, offset, g_page, true);
goto out_unlock;
}
goto copy;
}
if (!workingset)
if (!workingset && !allow_nonworkingset)
goto out_unlock;
g_page = gcma_alloc_page();

View File

@ -1,5 +1,6 @@
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/gcma.h>
#include "gcma_sysfs.h"
extern struct kobject *vendor_mm_kobj;
@ -22,6 +23,17 @@ void gcma_stat_add(enum gcma_stat_type type, unsigned long delta)
atomic64_add(delta, &gcma_stats[type]);
}
void gcma_stat_sub(enum gcma_stat_type type, unsigned long delta)
{
atomic64_sub(delta, &gcma_stats[type]);
}
u64 gcma_stat_get(enum gcma_stat_type type)
{
return (u64)atomic64_read(&gcma_stats[type]);
}
EXPORT_SYMBOL_GPL(gcma_stat_get);
/*
* This all compiles without CONFIG_SYSFS, but is a waste of space.
*/
@ -29,6 +41,13 @@ void gcma_stat_add(enum gcma_stat_type type, unsigned long delta)
#define GCMA_ATTR_RO(_name) \
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
static ssize_t allocated_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%llu\n", (u64)atomic64_read(&gcma_stats[ALLOCATED_PAGE]));
}
GCMA_ATTR_RO(allocated);
static ssize_t stored_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@ -64,12 +83,21 @@ static ssize_t discarded_show(struct kobject *kobj,
}
GCMA_ATTR_RO(discarded);
static ssize_t total_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%llu\n", (u64)atomic64_read(&gcma_stats[TOTAL_PAGE]));
}
GCMA_ATTR_RO(total);
static struct attribute *gcma_attrs[] = {
&allocated_attr.attr,
&stored_attr.attr,
&loaded_attr.attr,
&evicted_attr.attr,
&cached_attr.attr,
&discarded_attr.attr,
&total_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(gcma);

View File

@ -2,24 +2,18 @@
#ifndef __GCMA_SYSFS_H__
#define __GCMA_SYSFS_H__
enum gcma_stat_type {
STORED_PAGE,
LOADED_PAGE,
EVICTED_PAGE,
CACHED_PAGE,
DISCARDED_PAGE,
NUM_OF_GCMA_STAT,
};
#ifdef CONFIG_GCMA_SYSFS
void gcma_stat_inc(enum gcma_stat_type type);
void gcma_stat_dec(enum gcma_stat_type type);
void gcma_stat_add(enum gcma_stat_type type, unsigned long delta);
void gcma_stat_sub(enum gcma_stat_type type, unsigned long delta);
#else /* CONFIG_GCMA_SYSFS */
static inline void gcma_stat_inc(enum gcma_stat_type type) {}
static inline void gcma_stat_dec(enum gcma_stat_type type) {}
static inline void gcma_stat_add(enum gcma_stat_type type,
unsigned long delta) {}
static inline void gcma_stat_sub(enum gcma_stat_type type,
unsigned long delta) {}
#endif /* CONFIG_GCMA_SYSFS */
#endif

View File

@ -471,6 +471,17 @@ void unpin_folios(struct folio **folios, unsigned long nfolios)
}
EXPORT_SYMBOL_GPL(unpin_folios);
/*
* trace_android_vh_mm_customize_longterm_pinnable is called in include/linux/mm.h
* by including include/trace/hooks/mm.h, which will result to build-err.
* So we create func: _trace_android_vh_mm_customize_longterm_pinnable.
*/
void _trace_android_vh_mm_customize_longterm_pinnable(struct folio *folio,
bool *is_longterm_pinnable)
{
trace_android_vh_mm_customize_longterm_pinnable(folio, is_longterm_pinnable);
}
/*
* Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
* lifecycle. Avoid setting the bit unless necessary, or it might cause write

View File

@ -576,6 +576,7 @@ static const struct kobj_type thpsize_ktype = {
};
DEFINE_PER_CPU(struct mthp_stat, mthp_stats) = {{{0}}};
EXPORT_SYMBOL_GPL(mthp_stats);
static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item)
{
@ -3380,6 +3381,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
int extra_pins, ret;
pgoff_t end;
bool is_hzp;
bool bypass = false;
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
@ -3493,6 +3495,10 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
end = shmem_fallocend(mapping->host, end);
}
trace_android_vh_mm_split_huge_page_bypass(page, list, &ret, &bypass);
if (bypass)
goto out_unlock;
/*
* Racy check if we can split the page, before unmap_folio() will
* split PMDs

View File

@ -1687,6 +1687,7 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh
int write;
size_t len;
struct blk_plug plug;
bool bypass = false;
if (!madvise_behavior_valid(behavior))
return -EINVAL;
@ -1711,6 +1712,11 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh
return madvise_inject_error(behavior, start, start + len_in);
#endif
trace_android_vh_mm_do_madvise_bypass(mm, start, len, behavior,
&error, &bypass);
if (bypass)
return error;
write = madvise_need_mmap_write(behavior);
if (write) {
if (mmap_write_lock_killable(mm))

View File

@ -1562,6 +1562,17 @@ void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg)
memcg1_check_events(memcg, folio_nid(folio));
}
void memcg1_charge_batch(struct mem_cgroup *memcg, unsigned long nr_memory, int nid)
{
unsigned long flags;
local_irq_save(flags);
memcg1_charge_statistics(memcg, nr_memory);
memcg1_check_events(memcg, nid);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(memcg1_charge_batch);
void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
unsigned long nr_memory, int nid)
{

View File

@ -102,6 +102,7 @@ void memcg1_oom_recover(struct mem_cgroup *memcg);
void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg);
void memcg1_charge_batch(struct mem_cgroup *memcg, unsigned long nr_memory, int nid);
void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
unsigned long nr_memory, int nid);
@ -142,6 +143,9 @@ static inline void memcg1_commit_charge(struct folio *folio,
static inline void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg) {}
static inline void memcg1_charge_batch(struct mem_cgroup *memcg, unsigned long nr_memory,
int nid) {}
static inline void memcg1_uncharge_batch(struct mem_cgroup *memcg,
unsigned long pgpgout,
unsigned long nr_memory, int nid) {}

View File

@ -4690,7 +4690,7 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
unsigned long orders;
struct folio *folio;
struct folio *folio = NULL;
unsigned long addr;
pte_t *pte;
gfp_t gfp;
@ -4739,10 +4739,16 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
/* Try allocating the highest of the remaining orders. */
gfp = vma_thp_gfp_mask(vma);
trace_android_vh_mm_customize_alloc_anon_thp(&gfp, &orders, &order, &folio);
if (folio)
goto allocated;
while (orders) {
addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
folio = vma_alloc_folio(gfp, order, vma, addr, true);
if (folio) {
allocated:
if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
folio_put(folio);

View File

@ -254,6 +254,12 @@ static bool remove_migration_pte(struct folio *folio,
{
struct rmap_walk_arg *rmap_walk_arg = arg;
DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
bool bypass = false;
trace_android_vh_mm_remove_migration_pte_bypass(folio, vma, addr,
rmap_walk_arg->folio, &bypass);
if (bypass)
return true;
while (page_vma_mapped_walk(&pvmw)) {
rmap_t rmap_flags = RMAP_NONE;
@ -1568,6 +1574,11 @@ static inline int try_split_folio(struct folio *folio, struct list_head *split_f
enum migrate_mode mode)
{
int rc;
bool bypass = false;
trace_android_vh_mm_try_split_folio_bypass(folio, &bypass);
if (bypass)
return -EBUSY;
if (mode == MIGRATE_ASYNC) {
if (!folio_trylock(folio))

View File

@ -533,6 +533,7 @@ int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
return mn_hlist_invalidate_range_start(subscriptions, range);
return 0;
}
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
static void
mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
@ -569,6 +570,7 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
mn_hlist_invalidate_end(subscriptions, range);
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
}
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
unsigned long start, unsigned long end)
@ -587,6 +589,7 @@ void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
}
srcu_read_unlock(&srcu, id);
}
EXPORT_SYMBOL_GPL(__mmu_notifier_arch_invalidate_secondary_tlbs);
/*
* Same as mmu_notifier_register but here the caller must hold the mmap_lock in

View File

@ -599,6 +599,27 @@ out:
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static unsigned int pcp_thp_order __read_mostly = HPAGE_PMD_ORDER;
static int __init parse_pcp_thp_order(char *s)
{
int err;
unsigned int order;
err = kstrtouint(s, 0, &order);
if (err)
return err;
if (order <= PAGE_ALLOC_COSTLY_ORDER || order > HPAGE_PMD_ORDER)
return -EINVAL;
pcp_thp_order = order;
return 0;
}
early_param("pcp_thp_order", parse_pcp_thp_order);
#endif
static inline unsigned int order_to_pindex(int migratetype, int order)
{
bool __maybe_unused movable;
@ -614,7 +635,7 @@ static inline unsigned int order_to_pindex(int migratetype, int order)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (order > PAGE_ALLOC_COSTLY_ORDER) {
VM_BUG_ON(order != HPAGE_PMD_ORDER);
VM_BUG_ON(order != pcp_thp_order);
movable = migratetype == MIGRATE_MOVABLE;
#ifdef CONFIG_CMA
@ -636,7 +657,7 @@ static inline int pindex_to_order(unsigned int pindex)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (pindex >= NR_LOWORDER_PCP_LISTS)
order = HPAGE_PMD_ORDER;
order = pcp_thp_order;
#else
VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
#endif
@ -649,7 +670,7 @@ static inline bool pcp_allowed_order(unsigned int order)
if (order <= PAGE_ALLOC_COSTLY_ORDER)
return true;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (order == HPAGE_PMD_ORDER)
if (order == pcp_thp_order)
return true;
#endif
return false;
@ -850,6 +871,14 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
NULL) != NULL;
}
static int zone_max_order(struct zone *zone)
{
int max_order = MAX_PAGE_ORDER;
trace_android_vh_mm_customize_zone_max_order(zone, &max_order);
return max_order;
}
/*
* Freeing function for a buddy system allocator.
*
@ -885,6 +914,7 @@ static inline void __free_one_page(struct page *page,
struct page *buddy;
bool to_tail;
bool bypass = false;
int max_order = zone_max_order(zone);
trace_android_vh_free_one_page_bypass(page, zone, order,
migratetype, (int)fpi_flags, &bypass);
@ -901,7 +931,7 @@ static inline void __free_one_page(struct page *page,
account_freepages(zone, 1 << order, migratetype);
while (order < MAX_PAGE_ORDER) {
while (order < max_order) {
int buddy_mt = migratetype;
if (compaction_capture(capc, page, order, migratetype)) {
@ -959,6 +989,8 @@ done_merging:
to_tail = true;
else if (is_shuffle_order(order))
to_tail = shuffle_pick_tail();
else if (max_order != MAX_PAGE_ORDER)
to_tail = false;
else
to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
@ -2797,6 +2829,7 @@ void free_unref_page(struct page *page, unsigned int order)
unsigned long pfn = page_to_pfn(page);
int migratetype;
bool skip_free_unref_page = false;
bool skip_free_page = false;
if (!pcp_allowed_order(order)) {
__free_pages_ok(page, order, FPI_NONE);
@ -2806,6 +2839,9 @@ void free_unref_page(struct page *page, unsigned int order)
if (!free_pages_prepare(page, order))
return;
trace_android_vh_free_page_bypass(page, order, &skip_free_page);
if (skip_free_page)
return;
/*
* We only track unmovable, reclaimable, movable and if restrict cma
* fallback flag is set, CMA on pcp lists.
@ -2857,9 +2893,15 @@ void free_unref_folios(struct folio_batch *folios)
struct folio *folio = folios->folios[i];
unsigned long pfn = folio_pfn(folio);
unsigned int order = folio_order(folio);
bool skip_free_folio = false;
if (!free_pages_prepare(&folio->page, order))
continue;
trace_android_vh_free_folio_bypass(folio, order,
&skip_free_folio);
if (skip_free_folio)
continue;
/*
* Free orders not handled on the PCP directly to the
* allocator.
@ -3274,6 +3316,8 @@ struct page *rmqueue(struct zone *preferred_zone,
{
struct page *page;
trace_android_vh_mm_customize_rmqueue(zone, order, &alloc_flags, &migratetype);
if (likely(pcp_allowed_order(order))) {
page = rmqueue_pcplist(preferred_zone, zone, order,
migratetype, alloc_flags);
@ -3476,6 +3520,13 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
{
long min = mark;
int o;
bool customized = false;
bool wmark_ok = false;
trace_android_vh_mm_customize_wmark_ok(z, order, highest_zoneidx,
&wmark_ok, &customized);
if (customized)
return wmark_ok;
/* free_pages may go negative - that's OK */
free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
@ -3725,9 +3776,19 @@ retry:
z = ac->preferred_zoneref;
for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
ac->nodemask) {
bool use_this_zone = false;
bool suitable = true;
struct page *page;
unsigned long mark;
trace_android_vh_mm_customize_suitable_zone(zone, gfp_mask, order, ac->highest_zoneidx,
&use_this_zone, &suitable);
if (!suitable)
continue;
if (use_this_zone)
goto try_this_zone;
if (cpusets_enabled() &&
(alloc_flags & ALLOC_CPUSET) &&
!__cpuset_zone_allowed(zone, gfp_mask))
@ -5178,6 +5239,9 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
&alloc_gfp, &alloc_flags))
return NULL;
trace_android_vh_mm_customize_ac(gfp, order, &ac.zonelist, &ac.preferred_zoneref,
&ac.highest_zoneidx, &alloc_flags);
trace_android_rvh_try_alloc_pages_gfp(&page, order, gfp, gfp_zone(gfp));
if (page)
goto out;
@ -6146,6 +6210,9 @@ static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
zone->pageset_batch == new_batch)
return;
trace_android_vh_mm_customize_zone_pageset(zone, &new_high_min,
&new_high_max, &new_batch);
zone->pageset_high_min = new_high_min;
zone->pageset_high_max = new_high_max;
zone->pageset_batch = new_batch;
@ -6186,6 +6253,22 @@ static void zone_pcp_update(struct zone *zone, int cpu_online)
mutex_unlock(&pcp_batch_high_lock);
}
void zone_pageset_high_and_batch_update(struct zone *zone, int new_high_min,
int new_high_max, int new_batch)
{
mutex_lock(&pcp_batch_high_lock);
zone->pageset_high_min = new_high_min;
zone->pageset_high_max = new_high_max;
zone->pageset_batch = new_batch;
__zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max,
new_batch);
mutex_unlock(&pcp_batch_high_lock);
}
EXPORT_SYMBOL_GPL(zone_pageset_high_and_batch_update);
static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu)
{
struct per_cpu_pages *pcp;

View File

@ -594,6 +594,7 @@ static void swap_read_folio_bdev_sync(struct folio *folio,
get_task_struct(current);
count_vm_events(PSWPIN, folio_nr_pages(folio));
submit_bio_wait(&bio);
trace_android_vh_swap_bio_charge(&bio);
__end_swap_bio_read(&bio);
put_task_struct(current);
}

View File

@ -2306,6 +2306,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
set_pte_at(mm, address, pvmw.pte, swp_pte);
trace_set_migration_pte(address, pte_val(swp_pte),
folio_order(folio));
trace_android_vh_mm_migrate_one_page(subpage, vma->vm_flags);
/*
* No need to invalidate here it will synchronize on
* against the special swap migration pte.

View File

@ -940,7 +940,9 @@ unsigned long get_each_kmemcache_object(struct kmem_cache *s,
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(slab, &n->partial, slab_list) {
for_each_object(p, s, slab_address(slab), slab->objects) {
metadata_access_enable();
ret = fn(s, p, private);
metadata_access_disable();
if (ret) {
spin_unlock_irqrestore(&n->list_lock, flags);
return ret;
@ -950,7 +952,9 @@ unsigned long get_each_kmemcache_object(struct kmem_cache *s,
#ifdef CONFIG_SLUB_DEBUG
list_for_each_entry(slab, &n->full, slab_list) {
for_each_object(p, s, slab_address(slab), slab->objects) {
metadata_access_enable();
ret = fn(s, p, private);
metadata_access_disable();
if (ret) {
spin_unlock_irqrestore(&n->list_lock, flags);
return ret;

View File

@ -39,6 +39,7 @@ static const struct address_space_operations swap_aops = {
};
struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
EXPORT_SYMBOL_GPL(swapper_spaces);
static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
static bool enable_vma_readahead __read_mostly = true;

Some files were not shown because too many files have changed in this diff Show More