mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-07 18:05:21 +02:00
This is the 6.6.51 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmbisF0ACgkQONu9yGCS aT5Y8xAAqS/rmrC+/qlFvbtAqK+KXLq9BIGvDHW2QHfCyMpSZ6isehVhh64apHE/ /XvJ6a+2iPVp5o52iDTUKzbcDr3Jx/QwhS8Xa/HyQQy1rXIPpJNJb8Vuvkn/B2Cq cPCfTtfPZUUQTd09uAdBhy5NT8hsT2kSVpmSXDnahn9ih8k0tR40udw5Qf7xpWcf HqljbfonLP86mF/SB9m+VhDGF9fekujyb+0iS0OPE+TdvSjKB9ySoeL4PIeTSxrz goZdp9ygAYy8Bks825ztbfQszqIwceHU/xZRaUrGfOOk4A5kwTmbdUQu7ooMc+5F kbpifbewmY1UGn2KTxgj59xCjQ7HLQe+sqacy0/gALzRSajUNyjLn0n4w3UqaJWb pf+gwqHBLgDRfvWctggEdY2ApKgOlM9D7TTpWWB9uv1oR/g3PGfgehZgrMMPgPUw EZ8JiwnITfRaRFiH/vSR3aJKRj6qjb4mX3/U8HgGcACtyFfHgtuI7jzhnX36fRNO FG38bxSUMrJnlohghfBl6zyaruZBMHVaoQzs6MYZ7qrVvCbt3CHivJdaQ85nw0h7 YHa2zYFfT0ztyaSMzWq6JatgI7BZfd8PjobhbRZADBBD39KC8aL8XLoDPnpzWMUY UDlK8n96gOKo0t8ILDWcIisCVGNogcHJlGppC8Fu7ZyKzYsMhN4= =OEL/ -----END PGP SIGNATURE----- Merge 6.6.51 into android15-6.6-lts Changes in 6.6.51 sch/netem: fix use after free in netem_dequeue net: microchip: vcap: Fix use-after-free error in kunit test ASoC: dapm: Fix UAF for snd_soc_pcm_runtime object KVM: x86: Acquire kvm->srcu when handling KVM_SET_VCPU_EVENTS KVM: SVM: fix emulation of msr reads/writes of MSR_FS_BASE and MSR_GS_BASE KVM: SVM: Don't advertise Bus Lock Detect to guest if SVM support is missing ALSA: hda/conexant: Add pincfg quirk to enable top speakers on Sirius devices ALSA: hda/realtek: add patch for internal mic in Lenovo V145 ALSA: hda/realtek: Support mute LED on HP Laptop 14-dq2xxx powerpc/qspinlock: Fix deadlock in MCS queue smb: client: fix double put of @cfile in smb2_set_path_size() ksmbd: unset the binding mark of a reused connection ksmbd: Unlock on in ksmbd_tcp_set_interfaces() ata: libata: Fix memory leak for error path in ata_host_alloc() x86/tdx: Fix data leak in mmio_read() perf/x86/intel: Limit the period on Haswell irqchip/gic-v2m: Fix refcount leak in gicv2m_of_init() x86/kaslr: Expose and use the end of the physical memory address space rtmutex: Drop rt_mutex::wait_lock before scheduling nvme-pci: Add sleep quirk for Samsung 990 Evo rust: types: Make Opaque::get const rust: macros: provide correct provenance when constructing THIS_MODULE Revert "Bluetooth: MGMT/SMP: Fix address type when using SMP over BREDR/LE" Bluetooth: MGMT: Ignore keys being loaded with invalid type mmc: core: apply SD quirks earlier during probe mmc: dw_mmc: Fix IDMAC operation with pages bigger than 4K mmc: sdhci-of-aspeed: fix module autoloading mmc: cqhci: Fix checking of CQHCI_HALT state fuse: update stats for pages in dropped aux writeback list fuse: use unsigned type for getxattr/listxattr size truncation fuse: fix memory leak in fuse_create_open clk: starfive: jh7110-sys: Add notifier for PLL0 clock clk: qcom: clk-alpha-pll: Fix the pll post div mask clk: qcom: clk-alpha-pll: Fix the trion pll postdiv set rate API can: mcp251x: fix deadlock if an interrupt occurs during mcp251x_open kexec_file: fix elfcorehdr digest exclusion when CONFIG_CRASH_HOTPLUG=y mm: vmalloc: ensure vmap_block is initialised before adding to queue spi: rockchip: Resolve unbalanced runtime PM / system PM handling tracing/osnoise: Use a cpumask to know what threads are kthreads tracing/timerlat: Only clear timer if a kthread exists tracing: Avoid possible softlockup in tracing_iter_reset() tracing/timerlat: Add interface_lock around clearing of kthread in stop_kthread() userfaultfd: don't BUG_ON() if khugepaged yanks our page table userfaultfd: fix checks for huge PMDs fscache: delete fscache_cookie_lru_timer when fscache exits to avoid UAF eventfs: Use list_del_rcu() for SRCU protected list variable net: mana: Fix error handling in mana_create_txq/rxq's NAPI cleanup net: mctp-serial: Fix missing escapes on transmit x86/fpu: Avoid writing LBR bit to IA32_XSS unless supported x86/apic: Make x2apic_disable() work correctly Revert "drm/amdgpu: align pp_power_profile_mode with kernel docs" tcp_bpf: fix return value of tcp_bpf_sendmsg() ila: call nf_unregister_net_hooks() sooner sched: sch_cake: fix bulk flow accounting logic for host fairness nilfs2: fix missing cleanup on rollforward recovery error nilfs2: protect references to superblock parameters exposed in sysfs nilfs2: fix state management in error path of log writing function drm/i915: Do not attempt to load the GSC multiple times ALSA: control: Apply sanity check of input values for user elements ALSA: hda: Add input value sanity checks to HDMI channel map controls wifi: ath12k: fix uninitialize symbol error on ath12k_peer_assoc_h_he() wifi: ath12k: fix firmware crash due to invalid peer nss smack: unix sockets: fix accept()ed socket label bpf, verifier: Correct tail_call_reachable for bpf prog ELF: fix kernel.randomize_va_space double read accel/habanalabs/gaudi2: unsecure edma max outstanding register irqchip/armada-370-xp: Do not allow mapping IRQ 0 and 1 af_unix: Remove put_pid()/put_cred() in copy_peercred(). x86/kmsan: Fix hook for unaligned accesses iommu: sun50i: clear bypass register netfilter: nf_conncount: fix wrong variable type wifi: iwlwifi: mvm: use IWL_FW_CHECK for link ID check udf: Avoid excessive partition lengths fs/ntfs3: One more reason to mark inode bad riscv: kprobes: Use patch_text_nosync() for insn slots media: vivid: fix wrong sizeimage value for mplane leds: spi-byte: Call of_node_put() on error path wifi: brcmsmac: advertise MFP_CAPABLE to enable WPA3 usb: uas: set host status byte on data completion error usb: gadget: aspeed_udc: validate endpoint index for ast udc drm/amd/display: Run DC_LOG_DC after checking link->link_enc drm/amd/display: Check HDCP returned status drm/amdgpu: Fix smatch static checker warning drm/amdgpu: clear RB_OVERFLOW bit when enabling interrupts media: vivid: don't set HDMI TX controls if there are no HDMI outputs vfio/spapr: Always clear TCEs before unsetting the window ice: Check all ice_vsi_rebuild() errors in function PCI: keystone: Add workaround for Errata #i2037 (AM65x SR 1.0) Input: ili210x - use kvmalloc() to allocate buffer for firmware update media: qcom: camss: Add check for v4l2_fwnode_endpoint_parse pcmcia: Use resource_size function on resource object drm/amd/display: Check denominator pbn_div before used drm/amdgpu: check for LINEAR_ALIGNED correctly in check_tiling_flags_gfx6 can: bcm: Remove proc entry when dev is unregistered. can: m_can: Release irq on error in m_can_open can: mcp251xfd: fix ring configuration when switching from CAN-CC to CAN-FD mode rust: Use awk instead of recent xargs rust: kbuild: fix export of bss symbols cifs: Fix FALLOC_FL_ZERO_RANGE to preflush buffered part of target region igb: Fix not clearing TimeSync interrupts for 82580 ice: Add netif_device_attach/detach into PF reset flow platform/x86: dell-smbios: Fix error path in dell_smbios_init() regulator: core: Stub devm_regulator_bulk_get_const() if !CONFIG_REGULATOR can: kvaser_pciefd: Skip redundant NULL pointer check in ISR can: kvaser_pciefd: Remove unnecessary comment can: kvaser_pciefd: Rename board_irq to pci_irq can: kvaser_pciefd: Move reset of DMA RX buffers to the end of the ISR can: kvaser_pciefd: Use a single write when releasing RX buffers Bluetooth: qca: If memdump doesn't work, re-enable IBS Bluetooth: hci_event: Use HCI error defines instead of magic values Bluetooth: hci_conn: Only do ACL connections sequentially Bluetooth: Remove pending ACL connection attempts Bluetooth: hci_conn: Fix UAF Write in __hci_acl_create_connection_sync Bluetooth: hci_sync: Add helper functions to manipulate cmd_sync queue Bluetooth: hci_sync: Attempt to dequeue connection attempt Bluetooth: hci_sync: Introduce hci_cmd_sync_run/hci_cmd_sync_run_once Bluetooth: MGMT: Fix not generating command complete for MGMT_OP_DISCONNECT igc: Unlock on error in igc_io_resume() hwmon: (hp-wmi-sensors) Check if WMI event data exists net: phy: Fix missing of_node_put() for leds ice: protect XDP configuration with a mutex ice: do not bring the VSI up, if it was down before the XDP setup usbnet: modern method to get random MAC bpf: Add sockptr support for getsockopt bpf: Add sockptr support for setsockopt net/socket: Break down __sys_setsockopt net/socket: Break down __sys_getsockopt bpf, net: Fix a potential race in do_sock_getsockopt() bareudp: Fix device stats updates. fou: Fix null-ptr-deref in GRO. r8152: fix the firmware doesn't work net: bridge: br_fdb_external_learn_add(): always set EXT_LEARN net: dsa: vsc73xx: fix possible subblocks range of CAPT block selftests: net: enable bind tests xen: privcmd: Fix possible access to a freed kirqfd instance firmware: cs_dsp: Don't allow writes to read-only controls phy: zynqmp: Take the phy mutex in xlate ASoC: topology: Properly initialize soc_enum values dm init: Handle minors larger than 255 iommu/vt-d: Handle volatile descriptor status read cgroup: Protect css->cgroup write under css_set_lock um: line: always fill *error_out in setup_one_line() devres: Initialize an uninitialized struct member pci/hotplug/pnv_php: Fix hotplug driver crash on Powernv virtio_ring: fix KMSAN error for premapped mode wifi: rtw88: usb: schedule rx work after everything is set up scsi: ufs: core: Remove SCSI host only if added scsi: pm80xx: Set phy->enable_completion only when we wait for it crypto: qat - fix unintentional re-enabling of error interrupts hwmon: (adc128d818) Fix underflows seen when writing limit attributes hwmon: (lm95234) Fix underflows seen when writing limit attributes hwmon: (nct6775-core) Fix underflows seen when writing limit attributes hwmon: (w83627ehf) Fix underflows seen when writing limit attributes ASoc: TAS2781: replace beXX_to_cpup with get_unaligned_beXX for potentially broken alignment libbpf: Add NULL checks to bpf_object__{prev_map,next_map} drm/amdgpu: Set no_hw_access when VF request full GPU fails ext4: fix possible tid_t sequence overflows jbd2: avoid mount failed when commit block is partial submitted dma-mapping: benchmark: Don't starve others when doing the test wifi: mwifiex: Do not return unused priv in mwifiex_get_priv_by_id() drm/amdgpu: reject gang submit on reserved VMIDs smp: Add missing destroy_work_on_stack() call in smp_call_on_cpu() fs/ntfs3: Check more cases when directory is corrupted btrfs: replace BUG_ON with ASSERT in walk_down_proc() btrfs: clean up our handling of refs == 0 in snapshot delete btrfs: replace BUG_ON() with error handling at update_ref_for_cow() cxl/region: Verify target positions using the ordered target list riscv: set trap vector earlier PCI: Add missing bridge lock to pci_bus_lock() tcp: Don't drop SYN+ACK for simultaneous connect(). Bluetooth: btnxpuart: Fix Null pointer dereference in btnxpuart_flush() net: dpaa: avoid on-stack arrays of NR_CPUS elements LoongArch: Use correct API to map cmdline in relocate_kernel() regmap: maple: work around gcc-14.1 false-positive warning vfs: Fix potential circular locking through setxattr() and removexattr() i3c: master: svc: resend target address when get NACK i3c: mipi-i3c-hci: Error out instead on BUG_ON() in IBI DMA setup kselftests: dmabuf-heaps: Ensure the driver name is null-terminated spi: hisi-kunpeng: Add verification for the max_frequency provided by the firmware btrfs: initialize location to fix -Wmaybe-uninitialized in btrfs_lookup_dentry() s390/vmlinux.lds.S: Move ro_after_init section behind rodata section HID: cougar: fix slab-out-of-bounds Read in cougar_report_fixup HID: amd_sfh: free driver_data after destroying hid device Input: uinput - reject requests with unreasonable number of slots usbnet: ipheth: race between ipheth_close and error handling Squashfs: sanity check symbolic link size of/irq: Prevent device address out-of-bounds read in interrupt map walk lib/generic-radix-tree.c: Fix rare race in __genradix_ptr_alloc() MIPS: cevt-r4k: Don't call get_c0_compare_int if timer irq is installed spi: spi-fsl-lpspi: limit PRESCALE bit in TCR register ata: pata_macio: Use WARN instead of BUG smb/server: fix potential null-ptr-deref of lease_ctx_info in smb2_open() NFSv4: Add missing rescheduling points in nfs_client_return_marked_delegations riscv: Use WRITE_ONCE() when setting page table entries mm: Introduce pudp/p4dp/pgdp_get() functions riscv: mm: Only compile pgtable.c if MMU riscv: Use accessors to page table entries instead of direct dereference ACPI: CPPC: Add helper to get the highest performance value cpufreq: amd-pstate: Enable amd-pstate preferred core support cpufreq: amd-pstate: fix the highest frequency issue which limits performance tcp: process the 3rd ACK with sk_socket for TFO/MPTCP intel: legacy: Partial revert of field get conversion staging: iio: frequency: ad9834: Validate frequency parameter value iio: buffer-dmaengine: fix releasing dma channel on error iio: fix scale application in iio_convert_raw_to_processed_unlocked iio: adc: ad7124: fix config comparison iio: adc: ad7606: remove frstdata check for serial mode iio: adc: ad7124: fix chip ID mismatch usb: dwc3: core: update LC timer as per USB Spec V3.2 usb: cdns2: Fix controller reset issue usb: dwc3: Avoid waking up gadget during startxfer misc: fastrpc: Fix double free of 'buf' in error path binder: fix UAF caused by offsets overwrite nvmem: Fix return type of devm_nvmem_device_get() in kerneldoc uio_hv_generic: Fix kernel NULL pointer dereference in hv_uio_rescind Drivers: hv: vmbus: Fix rescind handling in uio_hv_generic VMCI: Fix use-after-free when removing resource in vmci_resource_remove() clocksource/drivers/imx-tpm: Fix return -ETIME when delta exceeds INT_MAX clocksource/drivers/imx-tpm: Fix next event not taking effect sometime clocksource/drivers/timer-of: Remove percpu irq related code uprobes: Use kzalloc to allocate xol area perf/aux: Fix AUX buffer serialization mm/vmscan: use folio_migratetype() instead of get_pageblock_migratetype() Revert "mm: skip CMA pages when they are not available" workqueue: wq_watchdog_touch is always called with valid CPU workqueue: Improve scalability of workqueue watchdog touch ACPI: processor: Return an error if acpi_processor_get_info() fails in processor_add() ACPI: processor: Fix memory leaks in error paths of processor_add() arm64: acpi: Move get_cpu_for_acpi_id() to a header arm64: acpi: Harden get_cpu_for_acpi_id() against missing CPU entry can: mcp251xfd: mcp251xfd_handle_rxif_ring_uinc(): factor out in separate function can: mcp251xfd: rx: prepare to workaround broken RX FIFO head index erratum can: mcp251xfd: clarify the meaning of timestamp can: mcp251xfd: rx: add workaround for erratum DS80000789E 6 of mcp2518fd drm/amd: Add gfx12 swizzle mode defs drm/amdgpu: handle gfx12 in amdgpu_display_verify_sizes ata: libata-scsi: Remove redundant sense_buffer memsets ata: libata-scsi: Check ATA_QCFLAG_RTF_FILLED before using result_tf crypto: starfive - Align rsa input data to 32-bit crypto: starfive - Fix nent assignment in rsa dec clk: qcom: ipq9574: Update the alpha PLL type for GPLLs powerpc/64e: remove unused IBM HTW code powerpc/64e: split out nohash Book3E 64-bit code powerpc/64e: Define mmu_pte_psize static powerpc/vdso: Don't discard rela sections ASoC: tegra: Fix CBB error during probe() nvmet-tcp: fix kernel crash if commands allocation fails nvme-pci: allocate tagset on reset if necessary ASoc: SOF: topology: Clear SOF link platform name upon unload ASoC: sunxi: sun4i-i2s: fix LRCLK polarity in i2s mode clk: qcom: gcc-sm8550: Don't use parking clk_ops for QUPs clk: qcom: gcc-sm8550: Don't park the USB RCG at registration time drm/i915/fence: Mark debug_fence_init_onstack() with __maybe_unused drm/i915/fence: Mark debug_fence_free() with __maybe_unused gpio: rockchip: fix OF node leak in probe() gpio: modepin: Enable module autoloading smb: client: fix double put of @cfile in smb2_rename_path() riscv: Fix toolchain vector detection riscv: Do not restrict memory size because of linear mapping on nommu ublk_drv: fix NULL pointer dereference in ublk_ctrl_start_recovery() membarrier: riscv: Add full memory barrier in switch_mm() x86/mm: Fix PTI for i386 some more btrfs: fix race between direct IO write and fsync when using same fd spi: spi-fsl-lpspi: Fix off-by-one in prescale max Bluetooth: hci_sync: Fix UAF in hci_acl_create_conn_sync Bluetooth: hci_sync: Fix UAF on create_le_conn_complete Bluetooth: hci_sync: Fix UAF on hci_abort_conn_sync Linux 6.6.51 Change-Id: I4d9ef7a63380e5875e611ee548b4cc87ccea2936 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
18bea82acf
|
@ -13739,7 +13739,7 @@ M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
|
|||
M: "Paul E. McKenney" <paulmck@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Supported
|
||||
F: arch/powerpc/include/asm/membarrier.h
|
||||
F: arch/*/include/asm/membarrier.h
|
||||
F: include/uapi/linux/membarrier.h
|
||||
F: kernel/sched/membarrier.c
|
||||
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 50
|
||||
SUBLEVEL = 51
|
||||
EXTRAVERSION =
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
|
|
|
@ -151,6 +151,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|||
|
||||
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
||||
|
||||
#define pgdp_get(pgpd) READ_ONCE(*pgdp)
|
||||
|
||||
#define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
|
||||
#define pud_write(pud) pmd_write(__pmd(pud_val(pud)))
|
||||
|
||||
|
|
|
@ -119,6 +119,18 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
|
|||
return acpi_cpu_get_madt_gicc(cpu)->uid;
|
||||
}
|
||||
|
||||
static inline int get_cpu_for_acpi_id(u32 uid)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
|
||||
if (acpi_cpu_get_madt_gicc(cpu) &&
|
||||
uid == get_acpi_id_for_cpu(cpu))
|
||||
return cpu;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void arch_fix_phys_package_id(int num, u32 slot) { }
|
||||
void __init acpi_init_cpus(void);
|
||||
int apei_claim_sea(struct pt_regs *regs);
|
||||
|
|
|
@ -34,17 +34,6 @@ int __init acpi_numa_get_nid(unsigned int cpu)
|
|||
return acpi_early_node_map[cpu];
|
||||
}
|
||||
|
||||
static inline int get_cpu_for_acpi_id(u32 uid)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
|
||||
if (uid == get_acpi_id_for_cpu(cpu))
|
||||
return cpu;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int __init acpi_parse_gicc_pxm(union acpi_subtable_headers *header,
|
||||
const unsigned long end)
|
||||
{
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <asm/bootinfo.h>
|
||||
#include <asm/early_ioremap.h>
|
||||
#include <asm/inst.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
|
@ -170,7 +171,7 @@ unsigned long __init relocate_kernel(void)
|
|||
unsigned long kernel_length;
|
||||
unsigned long random_offset = 0;
|
||||
void *location_new = _text; /* Default to original kernel start */
|
||||
char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
|
||||
char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
|
||||
|
||||
strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
|
||||
|
||||
|
@ -182,6 +183,7 @@ unsigned long __init relocate_kernel(void)
|
|||
random_offset = (unsigned long)location_new - (unsigned long)(_text);
|
||||
#endif
|
||||
reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
|
||||
early_memunmap(cmdline, COMMAND_LINE_SIZE);
|
||||
|
||||
if (random_offset) {
|
||||
kernel_length = (long)(_end) - (long)(_text);
|
||||
|
|
|
@ -303,13 +303,6 @@ int r4k_clockevent_init(void)
|
|||
if (!c0_compare_int_usable())
|
||||
return -ENXIO;
|
||||
|
||||
/*
|
||||
* With vectored interrupts things are getting platform specific.
|
||||
* get_c0_compare_int is a hook to allow a platform to return the
|
||||
* interrupt number of its liking.
|
||||
*/
|
||||
irq = get_c0_compare_int();
|
||||
|
||||
cd = &per_cpu(mips_clockevent_device, cpu);
|
||||
|
||||
cd->name = "MIPS";
|
||||
|
@ -320,7 +313,6 @@ int r4k_clockevent_init(void)
|
|||
min_delta = calculate_min_delta();
|
||||
|
||||
cd->rating = 300;
|
||||
cd->irq = irq;
|
||||
cd->cpumask = cpumask_of(cpu);
|
||||
cd->set_next_event = mips_next_event;
|
||||
cd->event_handler = mips_event_handler;
|
||||
|
@ -332,6 +324,13 @@ int r4k_clockevent_init(void)
|
|||
|
||||
cp0_timer_irq_installed = 1;
|
||||
|
||||
/*
|
||||
* With vectored interrupts things are getting platform specific.
|
||||
* get_c0_compare_int is a hook to allow a platform to return the
|
||||
* interrupt number of its liking.
|
||||
*/
|
||||
irq = get_c0_compare_int();
|
||||
|
||||
if (request_irq(irq, c0_compare_interrupt, flags, "timer",
|
||||
c0_compare_interrupt))
|
||||
pr_err("Failed to request irq %d (timer)\n", irq);
|
||||
|
|
|
@ -303,8 +303,7 @@ extern unsigned long linear_map_top;
|
|||
extern int book3e_htw_mode;
|
||||
|
||||
#define PPC_HTW_NONE 0
|
||||
#define PPC_HTW_IBM 1
|
||||
#define PPC_HTW_E6500 2
|
||||
#define PPC_HTW_E6500 1
|
||||
|
||||
/*
|
||||
* 64-bit booke platforms don't load the tlb in the tlb miss handler code.
|
||||
|
|
|
@ -74,6 +74,8 @@ SECTIONS
|
|||
.got : { *(.got) } :text
|
||||
.plt : { *(.plt) }
|
||||
|
||||
.rela.dyn : { *(.rela .rela*) }
|
||||
|
||||
_end = .;
|
||||
__end = .;
|
||||
PROVIDE(end = .);
|
||||
|
@ -87,7 +89,7 @@ SECTIONS
|
|||
*(.branch_lt)
|
||||
*(.data .data.* .gnu.linkonce.d.* .sdata*)
|
||||
*(.bss .sbss .dynbss .dynsbss)
|
||||
*(.got1 .glink .iplt .rela*)
|
||||
*(.got1 .glink .iplt)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ SECTIONS
|
|||
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
|
||||
.eh_frame : { KEEP (*(.eh_frame)) } :text
|
||||
.gcc_except_table : { *(.gcc_except_table) }
|
||||
.rela.dyn ALIGN(8) : { *(.rela.dyn) }
|
||||
.rela.dyn ALIGN(8) : { *(.rela .rela*) }
|
||||
|
||||
.got ALIGN(8) : { *(.got .toc) }
|
||||
|
||||
|
@ -86,7 +86,7 @@ SECTIONS
|
|||
*(.data .data.* .gnu.linkonce.d.* .sdata*)
|
||||
*(.bss .sbss .dynbss .dynsbss)
|
||||
*(.opd)
|
||||
*(.glink .iplt .plt .rela*)
|
||||
*(.glink .iplt .plt)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -715,7 +715,15 @@ again:
|
|||
}
|
||||
|
||||
release:
|
||||
qnodesp->count--; /* release the node */
|
||||
/*
|
||||
* Clear the lock before releasing the node, as another CPU might see stale
|
||||
* values if an interrupt occurs after we increment qnodesp->count
|
||||
* but before node->lock is initialized. The barrier ensures that
|
||||
* there are no further stores to the node after it has been released.
|
||||
*/
|
||||
node->lock = NULL;
|
||||
barrier();
|
||||
qnodesp->count--;
|
||||
}
|
||||
|
||||
void queued_spin_lock_slowpath(struct qspinlock *lock)
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
|
||||
|
||||
obj-y += mmu_context.o tlb.o tlb_low.o kup.o
|
||||
obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o
|
||||
obj-$(CONFIG_PPC_BOOK3E_64) += tlb_64e.o tlb_low_64e.o book3e_pgtable.o
|
||||
obj-$(CONFIG_40x) += 40x.o
|
||||
obj-$(CONFIG_44x) += 44x.o
|
||||
obj-$(CONFIG_PPC_8xx) += 8xx.o
|
||||
|
|
|
@ -110,28 +110,6 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
|
|||
};
|
||||
#endif
|
||||
|
||||
/* The variables below are currently only used on 64-bit Book3E
|
||||
* though this will probably be made common with other nohash
|
||||
* implementations at some point
|
||||
*/
|
||||
#ifdef CONFIG_PPC64
|
||||
|
||||
int mmu_pte_psize; /* Page size used for PTE pages */
|
||||
int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
|
||||
int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
|
||||
unsigned long linear_map_top; /* Top of linear mapping */
|
||||
|
||||
|
||||
/*
|
||||
* Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
|
||||
* exceptions. This is used for bolted and e6500 TLB miss handlers which
|
||||
* do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
|
||||
* this is set to zero.
|
||||
*/
|
||||
int extlb_level_exc;
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
#ifdef CONFIG_PPC_E500
|
||||
/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
|
||||
DEFINE_PER_CPU(int, next_tlbcam_idx);
|
||||
|
@ -358,381 +336,7 @@ void tlb_flush(struct mmu_gather *tlb)
|
|||
flush_tlb_mm(tlb->mm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Below are functions specific to the 64-bit variant of Book3E though that
|
||||
* may change in the future
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
|
||||
/*
|
||||
* Handling of virtual linear page tables or indirect TLB entries
|
||||
* flushing when PTE pages are freed
|
||||
*/
|
||||
void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
|
||||
{
|
||||
int tsize = mmu_psize_defs[mmu_pte_psize].enc;
|
||||
|
||||
if (book3e_htw_mode != PPC_HTW_NONE) {
|
||||
unsigned long start = address & PMD_MASK;
|
||||
unsigned long end = address + PMD_SIZE;
|
||||
unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
|
||||
|
||||
/* This isn't the most optimal, ideally we would factor out the
|
||||
* while preempt & CPU mask mucking around, or even the IPI but
|
||||
* it will do for now
|
||||
*/
|
||||
while (start < end) {
|
||||
__flush_tlb_page(tlb->mm, start, tsize, 1);
|
||||
start += size;
|
||||
}
|
||||
} else {
|
||||
unsigned long rmask = 0xf000000000000000ul;
|
||||
unsigned long rid = (address & rmask) | 0x1000000000000000ul;
|
||||
unsigned long vpte = address & ~rmask;
|
||||
|
||||
vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
|
||||
vpte |= rid;
|
||||
__flush_tlb_page(tlb->mm, vpte, tsize, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void __init setup_page_sizes(void)
|
||||
{
|
||||
unsigned int tlb0cfg;
|
||||
unsigned int tlb0ps;
|
||||
unsigned int eptcfg;
|
||||
int i, psize;
|
||||
|
||||
#ifdef CONFIG_PPC_E500
|
||||
unsigned int mmucfg = mfspr(SPRN_MMUCFG);
|
||||
int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
|
||||
|
||||
if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
|
||||
unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
|
||||
unsigned int min_pg, max_pg;
|
||||
|
||||
min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
|
||||
max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
|
||||
|
||||
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
|
||||
struct mmu_psize_def *def;
|
||||
unsigned int shift;
|
||||
|
||||
def = &mmu_psize_defs[psize];
|
||||
shift = def->shift;
|
||||
|
||||
if (shift == 0 || shift & 1)
|
||||
continue;
|
||||
|
||||
/* adjust to be in terms of 4^shift Kb */
|
||||
shift = (shift - 10) >> 1;
|
||||
|
||||
if ((shift >= min_pg) && (shift <= max_pg))
|
||||
def->flags |= MMU_PAGE_SIZE_DIRECT;
|
||||
}
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
|
||||
u32 tlb1cfg, tlb1ps;
|
||||
|
||||
tlb0cfg = mfspr(SPRN_TLB0CFG);
|
||||
tlb1cfg = mfspr(SPRN_TLB1CFG);
|
||||
tlb1ps = mfspr(SPRN_TLB1PS);
|
||||
eptcfg = mfspr(SPRN_EPTCFG);
|
||||
|
||||
if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
|
||||
book3e_htw_mode = PPC_HTW_E6500;
|
||||
|
||||
/*
|
||||
* We expect 4K subpage size and unrestricted indirect size.
|
||||
* The lack of a restriction on indirect size is a Freescale
|
||||
* extension, indicated by PSn = 0 but SPSn != 0.
|
||||
*/
|
||||
if (eptcfg != 2)
|
||||
book3e_htw_mode = PPC_HTW_NONE;
|
||||
|
||||
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
|
||||
struct mmu_psize_def *def = &mmu_psize_defs[psize];
|
||||
|
||||
if (!def->shift)
|
||||
continue;
|
||||
|
||||
if (tlb1ps & (1U << (def->shift - 10))) {
|
||||
def->flags |= MMU_PAGE_SIZE_DIRECT;
|
||||
|
||||
if (book3e_htw_mode && psize == MMU_PAGE_2M)
|
||||
def->flags |= MMU_PAGE_SIZE_INDIRECT;
|
||||
}
|
||||
}
|
||||
|
||||
goto out;
|
||||
}
|
||||
#endif
|
||||
|
||||
tlb0cfg = mfspr(SPRN_TLB0CFG);
|
||||
tlb0ps = mfspr(SPRN_TLB0PS);
|
||||
eptcfg = mfspr(SPRN_EPTCFG);
|
||||
|
||||
/* Look for supported direct sizes */
|
||||
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
|
||||
struct mmu_psize_def *def = &mmu_psize_defs[psize];
|
||||
|
||||
if (tlb0ps & (1U << (def->shift - 10)))
|
||||
def->flags |= MMU_PAGE_SIZE_DIRECT;
|
||||
}
|
||||
|
||||
/* Indirect page sizes supported ? */
|
||||
if ((tlb0cfg & TLBnCFG_IND) == 0 ||
|
||||
(tlb0cfg & TLBnCFG_PT) == 0)
|
||||
goto out;
|
||||
|
||||
book3e_htw_mode = PPC_HTW_IBM;
|
||||
|
||||
/* Now, we only deal with one IND page size for each
|
||||
* direct size. Hopefully all implementations today are
|
||||
* unambiguous, but we might want to be careful in the
|
||||
* future.
|
||||
*/
|
||||
for (i = 0; i < 3; i++) {
|
||||
unsigned int ps, sps;
|
||||
|
||||
sps = eptcfg & 0x1f;
|
||||
eptcfg >>= 5;
|
||||
ps = eptcfg & 0x1f;
|
||||
eptcfg >>= 5;
|
||||
if (!ps || !sps)
|
||||
continue;
|
||||
for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
|
||||
struct mmu_psize_def *def = &mmu_psize_defs[psize];
|
||||
|
||||
if (ps == (def->shift - 10))
|
||||
def->flags |= MMU_PAGE_SIZE_INDIRECT;
|
||||
if (sps == (def->shift - 10))
|
||||
def->ind = ps + 10;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
/* Cleanup array and print summary */
|
||||
pr_info("MMU: Supported page sizes\n");
|
||||
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
|
||||
struct mmu_psize_def *def = &mmu_psize_defs[psize];
|
||||
const char *__page_type_names[] = {
|
||||
"unsupported",
|
||||
"direct",
|
||||
"indirect",
|
||||
"direct & indirect"
|
||||
};
|
||||
if (def->flags == 0) {
|
||||
def->shift = 0;
|
||||
continue;
|
||||
}
|
||||
pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
|
||||
__page_type_names[def->flags & 0x3]);
|
||||
}
|
||||
}
|
||||
|
||||
static void __init setup_mmu_htw(void)
|
||||
{
|
||||
/*
|
||||
* If we want to use HW tablewalk, enable it by patching the TLB miss
|
||||
* handlers to branch to the one dedicated to it.
|
||||
*/
|
||||
|
||||
switch (book3e_htw_mode) {
|
||||
case PPC_HTW_IBM:
|
||||
patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
|
||||
patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
|
||||
break;
|
||||
#ifdef CONFIG_PPC_E500
|
||||
case PPC_HTW_E6500:
|
||||
extlb_level_exc = EX_TLB_SIZE;
|
||||
patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
|
||||
patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
pr_info("MMU: Book3E HW tablewalk %s\n",
|
||||
book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
|
||||
}
|
||||
|
||||
/*
|
||||
* Early initialization of the MMU TLB code
|
||||
*/
|
||||
static void early_init_this_mmu(void)
|
||||
{
|
||||
unsigned int mas4;
|
||||
|
||||
/* Set MAS4 based on page table setting */
|
||||
|
||||
mas4 = 0x4 << MAS4_WIMGED_SHIFT;
|
||||
switch (book3e_htw_mode) {
|
||||
case PPC_HTW_E6500:
|
||||
mas4 |= MAS4_INDD;
|
||||
mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
|
||||
mas4 |= MAS4_TLBSELD(1);
|
||||
mmu_pte_psize = MMU_PAGE_2M;
|
||||
break;
|
||||
|
||||
case PPC_HTW_IBM:
|
||||
mas4 |= MAS4_INDD;
|
||||
mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
|
||||
mmu_pte_psize = MMU_PAGE_1M;
|
||||
break;
|
||||
|
||||
case PPC_HTW_NONE:
|
||||
mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
|
||||
mmu_pte_psize = mmu_virtual_psize;
|
||||
break;
|
||||
}
|
||||
mtspr(SPRN_MAS4, mas4);
|
||||
|
||||
#ifdef CONFIG_PPC_E500
|
||||
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
|
||||
unsigned int num_cams;
|
||||
bool map = true;
|
||||
|
||||
/* use a quarter of the TLBCAM for bolted linear map */
|
||||
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
|
||||
|
||||
/*
|
||||
* Only do the mapping once per core, or else the
|
||||
* transient mapping would cause problems.
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
if (hweight32(get_tensr()) > 1)
|
||||
map = false;
|
||||
#endif
|
||||
|
||||
if (map)
|
||||
linear_map_top = map_mem_in_cams(linear_map_top,
|
||||
num_cams, false, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* A sync won't hurt us after mucking around with
|
||||
* the MMU configuration
|
||||
*/
|
||||
mb();
|
||||
}
|
||||
|
||||
static void __init early_init_mmu_global(void)
|
||||
{
|
||||
/* XXX This should be decided at runtime based on supported
|
||||
* page sizes in the TLB, but for now let's assume 16M is
|
||||
* always there and a good fit (which it probably is)
|
||||
*
|
||||
* Freescale booke only supports 4K pages in TLB0, so use that.
|
||||
*/
|
||||
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
|
||||
mmu_vmemmap_psize = MMU_PAGE_4K;
|
||||
else
|
||||
mmu_vmemmap_psize = MMU_PAGE_16M;
|
||||
|
||||
/* XXX This code only checks for TLB 0 capabilities and doesn't
|
||||
* check what page size combos are supported by the HW. It
|
||||
* also doesn't handle the case where a separate array holds
|
||||
* the IND entries from the array loaded by the PT.
|
||||
*/
|
||||
/* Look for supported page sizes */
|
||||
setup_page_sizes();
|
||||
|
||||
/* Look for HW tablewalk support */
|
||||
setup_mmu_htw();
|
||||
|
||||
#ifdef CONFIG_PPC_E500
|
||||
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
|
||||
if (book3e_htw_mode == PPC_HTW_NONE) {
|
||||
extlb_level_exc = EX_TLB_SIZE;
|
||||
patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
|
||||
patch_exception(0x1e0,
|
||||
exc_instruction_tlb_miss_bolted_book3e);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Set the global containing the top of the linear mapping
|
||||
* for use by the TLB miss code
|
||||
*/
|
||||
linear_map_top = memblock_end_of_DRAM();
|
||||
|
||||
ioremap_bot = IOREMAP_BASE;
|
||||
}
|
||||
|
||||
static void __init early_mmu_set_memory_limit(void)
|
||||
{
|
||||
#ifdef CONFIG_PPC_E500
|
||||
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
|
||||
/*
|
||||
* Limit memory so we dont have linear faults.
|
||||
* Unlike memblock_set_current_limit, which limits
|
||||
* memory available during early boot, this permanently
|
||||
* reduces the memory available to Linux. We need to
|
||||
* do this because highmem is not supported on 64-bit.
|
||||
*/
|
||||
memblock_enforce_memory_limit(linear_map_top);
|
||||
}
|
||||
#endif
|
||||
|
||||
memblock_set_current_limit(linear_map_top);
|
||||
}
|
||||
|
||||
/* boot cpu only */
|
||||
void __init early_init_mmu(void)
|
||||
{
|
||||
early_init_mmu_global();
|
||||
early_init_this_mmu();
|
||||
early_mmu_set_memory_limit();
|
||||
}
|
||||
|
||||
void early_init_mmu_secondary(void)
|
||||
{
|
||||
early_init_this_mmu();
|
||||
}
|
||||
|
||||
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
{
|
||||
/* On non-FSL Embedded 64-bit, we adjust the RMA size to match
|
||||
* the bolted TLB entry. We know for now that only 1G
|
||||
* entries are supported though that may eventually
|
||||
* change.
|
||||
*
|
||||
* on FSL Embedded 64-bit, usually all RAM is bolted, but with
|
||||
* unusual memory sizes it's possible for some RAM to not be mapped
|
||||
* (such RAM is not used at all by Linux, since we don't support
|
||||
* highmem on 64-bit). We limit ppc64_rma_size to what would be
|
||||
* mappable if this memblock is the only one. Additional memblocks
|
||||
* can only increase, not decrease, the amount that ends up getting
|
||||
* mapped. We still limit max to 1G even if we'll eventually map
|
||||
* more. This is due to what the early init code is set up to do.
|
||||
*
|
||||
* We crop it to the size of the first MEMBLOCK to
|
||||
* avoid going over total available memory just in case...
|
||||
*/
|
||||
#ifdef CONFIG_PPC_E500
|
||||
if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
|
||||
unsigned long linear_sz;
|
||||
unsigned int num_cams;
|
||||
|
||||
/* use a quarter of the TLBCAM for bolted linear map */
|
||||
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
|
||||
|
||||
linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
|
||||
true, true);
|
||||
|
||||
ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
|
||||
} else
|
||||
#endif
|
||||
ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
|
||||
|
||||
/* Finally limit subsequent allocations */
|
||||
memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
|
||||
}
|
||||
#else /* ! CONFIG_PPC64 */
|
||||
#ifndef CONFIG_PPC64
|
||||
void __init early_init_mmu(void)
|
||||
{
|
||||
unsigned long root = of_get_flat_dt_root();
|
||||
|
|
361
arch/powerpc/mm/nohash/tlb_64e.c
Normal file
361
arch/powerpc/mm/nohash/tlb_64e.c
Normal file
|
@ -0,0 +1,361 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
|
||||
* IBM Corp.
|
||||
*
|
||||
* Derived from arch/ppc/mm/init.c:
|
||||
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
||||
*
|
||||
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
||||
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
||||
* Copyright (C) 1996 Paul Mackerras
|
||||
*
|
||||
* Derived from "arch/i386/mm/init.c"
|
||||
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/cputhreads.h>
|
||||
|
||||
#include <mm/mmu_decl.h>
|
||||
|
||||
/* The variables below are currently only used on 64-bit Book3E
|
||||
* though this will probably be made common with other nohash
|
||||
* implementations at some point
|
||||
*/
|
||||
static int mmu_pte_psize; /* Page size used for PTE pages */
|
||||
int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
|
||||
int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
|
||||
unsigned long linear_map_top; /* Top of linear mapping */
|
||||
|
||||
|
||||
/*
|
||||
* Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
|
||||
* exceptions. This is used for bolted and e6500 TLB miss handlers which
|
||||
* do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
|
||||
* this is set to zero.
|
||||
*/
|
||||
int extlb_level_exc;
|
||||
|
||||
/*
|
||||
* Handling of virtual linear page tables or indirect TLB entries
|
||||
* flushing when PTE pages are freed
|
||||
*/
|
||||
void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
|
||||
{
|
||||
int tsize = mmu_psize_defs[mmu_pte_psize].enc;
|
||||
|
||||
if (book3e_htw_mode != PPC_HTW_NONE) {
|
||||
unsigned long start = address & PMD_MASK;
|
||||
unsigned long end = address + PMD_SIZE;
|
||||
unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
|
||||
|
||||
/* This isn't the most optimal, ideally we would factor out the
|
||||
* while preempt & CPU mask mucking around, or even the IPI but
|
||||
* it will do for now
|
||||
*/
|
||||
while (start < end) {
|
||||
__flush_tlb_page(tlb->mm, start, tsize, 1);
|
||||
start += size;
|
||||
}
|
||||
} else {
|
||||
unsigned long rmask = 0xf000000000000000ul;
|
||||
unsigned long rid = (address & rmask) | 0x1000000000000000ul;
|
||||
unsigned long vpte = address & ~rmask;
|
||||
|
||||
vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
|
||||
vpte |= rid;
|
||||
__flush_tlb_page(tlb->mm, vpte, tsize, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void __init setup_page_sizes(void)
|
||||
{
|
||||
unsigned int tlb0cfg;
|
||||
unsigned int eptcfg;
|
||||
int psize;
|
||||
|
||||
#ifdef CONFIG_PPC_E500
|
||||
unsigned int mmucfg = mfspr(SPRN_MMUCFG);
|
||||
int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
|
||||
|
||||
if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
|
||||
unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
|
||||
unsigned int min_pg, max_pg;
|
||||
|
||||
min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
|
||||
max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
|
||||
|
||||
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
|
||||
struct mmu_psize_def *def;
|
||||
unsigned int shift;
|
||||
|
||||
def = &mmu_psize_defs[psize];
|
||||
shift = def->shift;
|
||||
|
||||
if (shift == 0 || shift & 1)
|
||||
continue;
|
||||
|
||||
/* adjust to be in terms of 4^shift Kb */
|
||||
shift = (shift - 10) >> 1;
|
||||
|
||||
if ((shift >= min_pg) && (shift <= max_pg))
|
||||
def->flags |= MMU_PAGE_SIZE_DIRECT;
|
||||
}
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
|
||||
u32 tlb1cfg, tlb1ps;
|
||||
|
||||
tlb0cfg = mfspr(SPRN_TLB0CFG);
|
||||
tlb1cfg = mfspr(SPRN_TLB1CFG);
|
||||
tlb1ps = mfspr(SPRN_TLB1PS);
|
||||
eptcfg = mfspr(SPRN_EPTCFG);
|
||||
|
||||
if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
|
||||
book3e_htw_mode = PPC_HTW_E6500;
|
||||
|
||||
/*
|
||||
* We expect 4K subpage size and unrestricted indirect size.
|
||||
* The lack of a restriction on indirect size is a Freescale
|
||||
* extension, indicated by PSn = 0 but SPSn != 0.
|
||||
*/
|
||||
if (eptcfg != 2)
|
||||
book3e_htw_mode = PPC_HTW_NONE;
|
||||
|
||||
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
|
||||
struct mmu_psize_def *def = &mmu_psize_defs[psize];
|
||||
|
||||
if (!def->shift)
|
||||
continue;
|
||||
|
||||
if (tlb1ps & (1U << (def->shift - 10))) {
|
||||
def->flags |= MMU_PAGE_SIZE_DIRECT;
|
||||
|
||||
if (book3e_htw_mode && psize == MMU_PAGE_2M)
|
||||
def->flags |= MMU_PAGE_SIZE_INDIRECT;
|
||||
}
|
||||
}
|
||||
|
||||
goto out;
|
||||
}
|
||||
#endif
|
||||
out:
|
||||
/* Cleanup array and print summary */
|
||||
pr_info("MMU: Supported page sizes\n");
|
||||
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
|
||||
struct mmu_psize_def *def = &mmu_psize_defs[psize];
|
||||
const char *__page_type_names[] = {
|
||||
"unsupported",
|
||||
"direct",
|
||||
"indirect",
|
||||
"direct & indirect"
|
||||
};
|
||||
if (def->flags == 0) {
|
||||
def->shift = 0;
|
||||
continue;
|
||||
}
|
||||
pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
|
||||
__page_type_names[def->flags & 0x3]);
|
||||
}
|
||||
}
|
||||
|
||||
static void __init setup_mmu_htw(void)
|
||||
{
|
||||
/*
|
||||
* If we want to use HW tablewalk, enable it by patching the TLB miss
|
||||
* handlers to branch to the one dedicated to it.
|
||||
*/
|
||||
|
||||
switch (book3e_htw_mode) {
|
||||
#ifdef CONFIG_PPC_E500
|
||||
case PPC_HTW_E6500:
|
||||
extlb_level_exc = EX_TLB_SIZE;
|
||||
patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
|
||||
patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
pr_info("MMU: Book3E HW tablewalk %s\n",
|
||||
book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
|
||||
}
|
||||
|
||||
/*
|
||||
* Early initialization of the MMU TLB code
|
||||
*/
|
||||
static void early_init_this_mmu(void)
|
||||
{
|
||||
unsigned int mas4;
|
||||
|
||||
/* Set MAS4 based on page table setting */
|
||||
|
||||
mas4 = 0x4 << MAS4_WIMGED_SHIFT;
|
||||
switch (book3e_htw_mode) {
|
||||
case PPC_HTW_E6500:
|
||||
mas4 |= MAS4_INDD;
|
||||
mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
|
||||
mas4 |= MAS4_TLBSELD(1);
|
||||
mmu_pte_psize = MMU_PAGE_2M;
|
||||
break;
|
||||
|
||||
case PPC_HTW_NONE:
|
||||
mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
|
||||
mmu_pte_psize = mmu_virtual_psize;
|
||||
break;
|
||||
}
|
||||
mtspr(SPRN_MAS4, mas4);
|
||||
|
||||
#ifdef CONFIG_PPC_E500
|
||||
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
|
||||
unsigned int num_cams;
|
||||
bool map = true;
|
||||
|
||||
/* use a quarter of the TLBCAM for bolted linear map */
|
||||
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
|
||||
|
||||
/*
|
||||
* Only do the mapping once per core, or else the
|
||||
* transient mapping would cause problems.
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
if (hweight32(get_tensr()) > 1)
|
||||
map = false;
|
||||
#endif
|
||||
|
||||
if (map)
|
||||
linear_map_top = map_mem_in_cams(linear_map_top,
|
||||
num_cams, false, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* A sync won't hurt us after mucking around with
|
||||
* the MMU configuration
|
||||
*/
|
||||
mb();
|
||||
}
|
||||
|
||||
static void __init early_init_mmu_global(void)
|
||||
{
|
||||
/* XXX This should be decided at runtime based on supported
|
||||
* page sizes in the TLB, but for now let's assume 16M is
|
||||
* always there and a good fit (which it probably is)
|
||||
*
|
||||
* Freescale booke only supports 4K pages in TLB0, so use that.
|
||||
*/
|
||||
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
|
||||
mmu_vmemmap_psize = MMU_PAGE_4K;
|
||||
else
|
||||
mmu_vmemmap_psize = MMU_PAGE_16M;
|
||||
|
||||
/* XXX This code only checks for TLB 0 capabilities and doesn't
|
||||
* check what page size combos are supported by the HW. It
|
||||
* also doesn't handle the case where a separate array holds
|
||||
* the IND entries from the array loaded by the PT.
|
||||
*/
|
||||
/* Look for supported page sizes */
|
||||
setup_page_sizes();
|
||||
|
||||
/* Look for HW tablewalk support */
|
||||
setup_mmu_htw();
|
||||
|
||||
#ifdef CONFIG_PPC_E500
|
||||
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
|
||||
if (book3e_htw_mode == PPC_HTW_NONE) {
|
||||
extlb_level_exc = EX_TLB_SIZE;
|
||||
patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
|
||||
patch_exception(0x1e0,
|
||||
exc_instruction_tlb_miss_bolted_book3e);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Set the global containing the top of the linear mapping
|
||||
* for use by the TLB miss code
|
||||
*/
|
||||
linear_map_top = memblock_end_of_DRAM();
|
||||
|
||||
ioremap_bot = IOREMAP_BASE;
|
||||
}
|
||||
|
||||
static void __init early_mmu_set_memory_limit(void)
|
||||
{
|
||||
#ifdef CONFIG_PPC_E500
|
||||
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
|
||||
/*
|
||||
* Limit memory so we dont have linear faults.
|
||||
* Unlike memblock_set_current_limit, which limits
|
||||
* memory available during early boot, this permanently
|
||||
* reduces the memory available to Linux. We need to
|
||||
* do this because highmem is not supported on 64-bit.
|
||||
*/
|
||||
memblock_enforce_memory_limit(linear_map_top);
|
||||
}
|
||||
#endif
|
||||
|
||||
memblock_set_current_limit(linear_map_top);
|
||||
}
|
||||
|
||||
/* boot cpu only */
|
||||
void __init early_init_mmu(void)
|
||||
{
|
||||
early_init_mmu_global();
|
||||
early_init_this_mmu();
|
||||
early_mmu_set_memory_limit();
|
||||
}
|
||||
|
||||
void early_init_mmu_secondary(void)
|
||||
{
|
||||
early_init_this_mmu();
|
||||
}
|
||||
|
||||
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
{
|
||||
/* On non-FSL Embedded 64-bit, we adjust the RMA size to match
|
||||
* the bolted TLB entry. We know for now that only 1G
|
||||
* entries are supported though that may eventually
|
||||
* change.
|
||||
*
|
||||
* on FSL Embedded 64-bit, usually all RAM is bolted, but with
|
||||
* unusual memory sizes it's possible for some RAM to not be mapped
|
||||
* (such RAM is not used at all by Linux, since we don't support
|
||||
* highmem on 64-bit). We limit ppc64_rma_size to what would be
|
||||
* mappable if this memblock is the only one. Additional memblocks
|
||||
* can only increase, not decrease, the amount that ends up getting
|
||||
* mapped. We still limit max to 1G even if we'll eventually map
|
||||
* more. This is due to what the early init code is set up to do.
|
||||
*
|
||||
* We crop it to the size of the first MEMBLOCK to
|
||||
* avoid going over total available memory just in case...
|
||||
*/
|
||||
#ifdef CONFIG_PPC_E500
|
||||
if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
|
||||
unsigned long linear_sz;
|
||||
unsigned int num_cams;
|
||||
|
||||
/* use a quarter of the TLBCAM for bolted linear map */
|
||||
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
|
||||
|
||||
linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
|
||||
true, true);
|
||||
|
||||
ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
|
||||
} else
|
||||
#endif
|
||||
ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
|
||||
|
||||
/* Finally limit subsequent allocations */
|
||||
memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
|
||||
}
|
|
@ -893,201 +893,6 @@ virt_page_table_tlb_miss_whacko_fault:
|
|||
TLB_MISS_EPILOG_ERROR
|
||||
b exc_data_storage_book3e
|
||||
|
||||
|
||||
/**************************************************************
|
||||
* *
|
||||
* TLB miss handling for Book3E with hw page table support *
|
||||
* *
|
||||
**************************************************************/
|
||||
|
||||
|
||||
/* Data TLB miss */
|
||||
START_EXCEPTION(data_tlb_miss_htw)
|
||||
TLB_MISS_PROLOG
|
||||
|
||||
/* Now we handle the fault proper. We only save DEAR in normal
|
||||
* fault case since that's the only interesting values here.
|
||||
* We could probably also optimize by not saving SRR0/1 in the
|
||||
* linear mapping case but I'll leave that for later
|
||||
*/
|
||||
mfspr r14,SPRN_ESR
|
||||
mfspr r16,SPRN_DEAR /* get faulting address */
|
||||
srdi r11,r16,44 /* get region */
|
||||
xoris r11,r11,0xc
|
||||
cmpldi cr0,r11,0 /* linear mapping ? */
|
||||
beq tlb_load_linear /* yes -> go to linear map load */
|
||||
cmpldi cr1,r11,1 /* vmalloc mapping ? */
|
||||
|
||||
/* We do the user/kernel test for the PID here along with the RW test
|
||||
*/
|
||||
srdi. r11,r16,60 /* Check for user region */
|
||||
ld r15,PACAPGD(r13) /* Load user pgdir */
|
||||
beq htw_tlb_miss
|
||||
|
||||
/* XXX replace the RMW cycles with immediate loads + writes */
|
||||
1: mfspr r10,SPRN_MAS1
|
||||
rlwinm r10,r10,0,16,1 /* Clear TID */
|
||||
mtspr SPRN_MAS1,r10
|
||||
ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
|
||||
beq+ cr1,htw_tlb_miss
|
||||
|
||||
/* We got a crappy address, just fault with whatever DEAR and ESR
|
||||
* are here
|
||||
*/
|
||||
TLB_MISS_EPILOG_ERROR
|
||||
b exc_data_storage_book3e
|
||||
|
||||
/* Instruction TLB miss */
|
||||
START_EXCEPTION(instruction_tlb_miss_htw)
|
||||
TLB_MISS_PROLOG
|
||||
|
||||
/* If we take a recursive fault, the second level handler may need
|
||||
* to know whether we are handling a data or instruction fault in
|
||||
* order to get to the right store fault handler. We provide that
|
||||
* info by keeping a crazy value for ESR in r14
|
||||
*/
|
||||
li r14,-1 /* store to exception frame is done later */
|
||||
|
||||
/* Now we handle the fault proper. We only save DEAR in the non
|
||||
* linear mapping case since we know the linear mapping case will
|
||||
* not re-enter. We could indeed optimize and also not save SRR0/1
|
||||
* in the linear mapping case but I'll leave that for later
|
||||
*
|
||||
* Faulting address is SRR0 which is already in r16
|
||||
*/
|
||||
srdi r11,r16,44 /* get region */
|
||||
xoris r11,r11,0xc
|
||||
cmpldi cr0,r11,0 /* linear mapping ? */
|
||||
beq tlb_load_linear /* yes -> go to linear map load */
|
||||
cmpldi cr1,r11,1 /* vmalloc mapping ? */
|
||||
|
||||
/* We do the user/kernel test for the PID here along with the RW test
|
||||
*/
|
||||
srdi. r11,r16,60 /* Check for user region */
|
||||
ld r15,PACAPGD(r13) /* Load user pgdir */
|
||||
beq htw_tlb_miss
|
||||
|
||||
/* XXX replace the RMW cycles with immediate loads + writes */
|
||||
1: mfspr r10,SPRN_MAS1
|
||||
rlwinm r10,r10,0,16,1 /* Clear TID */
|
||||
mtspr SPRN_MAS1,r10
|
||||
ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
|
||||
beq+ htw_tlb_miss
|
||||
|
||||
/* We got a crappy address, just fault */
|
||||
TLB_MISS_EPILOG_ERROR
|
||||
b exc_instruction_storage_book3e
|
||||
|
||||
|
||||
/*
|
||||
* This is the guts of the second-level TLB miss handler for direct
|
||||
* misses. We are entered with:
|
||||
*
|
||||
* r16 = virtual page table faulting address
|
||||
* r15 = PGD pointer
|
||||
* r14 = ESR
|
||||
* r13 = PACA
|
||||
* r12 = TLB exception frame in PACA
|
||||
* r11 = crap (free to use)
|
||||
* r10 = crap (free to use)
|
||||
*
|
||||
* It can be re-entered by the linear mapping miss handler. However, to
|
||||
* avoid too much complication, it will save/restore things for us
|
||||
*/
|
||||
htw_tlb_miss:
|
||||
#ifdef CONFIG_PPC_KUAP
|
||||
mfspr r10,SPRN_MAS1
|
||||
rlwinm. r10,r10,0,0x3fff0000
|
||||
beq- htw_tlb_miss_fault /* KUAP fault */
|
||||
#endif
|
||||
/* Search if we already have a TLB entry for that virtual address, and
|
||||
* if we do, bail out.
|
||||
*
|
||||
* MAS1:IND should be already set based on MAS4
|
||||
*/
|
||||
PPC_TLBSRX_DOT(0,R16)
|
||||
beq htw_tlb_miss_done
|
||||
|
||||
/* Now, we need to walk the page tables. First check if we are in
|
||||
* range.
|
||||
*/
|
||||
rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
|
||||
bne- htw_tlb_miss_fault
|
||||
|
||||
/* Get the PGD pointer */
|
||||
cmpldi cr0,r15,0
|
||||
beq- htw_tlb_miss_fault
|
||||
|
||||
/* Get to PGD entry */
|
||||
rldicl r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3
|
||||
clrrdi r10,r11,3
|
||||
ldx r15,r10,r15
|
||||
cmpdi cr0,r15,0
|
||||
bge htw_tlb_miss_fault
|
||||
|
||||
/* Get to PUD entry */
|
||||
rldicl r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3
|
||||
clrrdi r10,r11,3
|
||||
ldx r15,r10,r15
|
||||
cmpdi cr0,r15,0
|
||||
bge htw_tlb_miss_fault
|
||||
|
||||
/* Get to PMD entry */
|
||||
rldicl r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3
|
||||
clrrdi r10,r11,3
|
||||
ldx r15,r10,r15
|
||||
cmpdi cr0,r15,0
|
||||
bge htw_tlb_miss_fault
|
||||
|
||||
/* Ok, we're all right, we can now create an indirect entry for
|
||||
* a 1M or 256M page.
|
||||
*
|
||||
* The last trick is now that because we use "half" pages for
|
||||
* the HTW (1M IND is 2K and 256M IND is 32K) we need to account
|
||||
* for an added LSB bit to the RPN. For 64K pages, there is no
|
||||
* problem as we already use 32K arrays (half PTE pages), but for
|
||||
* 4K page we need to extract a bit from the virtual address and
|
||||
* insert it into the "PA52" bit of the RPN.
|
||||
*/
|
||||
rlwimi r15,r16,32-9,20,20
|
||||
/* Now we build the MAS:
|
||||
*
|
||||
* MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
|
||||
* MAS 1 : Almost fully setup
|
||||
* - PID already updated by caller if necessary
|
||||
* - TSIZE for now is base ind page size always
|
||||
* MAS 2 : Use defaults
|
||||
* MAS 3+7 : Needs to be done
|
||||
*/
|
||||
ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
|
||||
|
||||
srdi r16,r10,32
|
||||
mtspr SPRN_MAS3,r10
|
||||
mtspr SPRN_MAS7,r16
|
||||
|
||||
tlbwe
|
||||
|
||||
htw_tlb_miss_done:
|
||||
/* We don't bother with restoring DEAR or ESR since we know we are
|
||||
* level 0 and just going back to userland. They are only needed
|
||||
* if you are going to take an access fault
|
||||
*/
|
||||
TLB_MISS_EPILOG_SUCCESS
|
||||
rfi
|
||||
|
||||
htw_tlb_miss_fault:
|
||||
/* We need to check if it was an instruction miss. We know this
|
||||
* though because r14 would contain -1
|
||||
*/
|
||||
cmpdi cr0,r14,-1
|
||||
beq 1f
|
||||
mtspr SPRN_DEAR,r16
|
||||
mtspr SPRN_ESR,r14
|
||||
TLB_MISS_EPILOG_ERROR
|
||||
b exc_data_storage_book3e
|
||||
1: TLB_MISS_EPILOG_ERROR
|
||||
b exc_instruction_storage_book3e
|
||||
|
||||
/*
|
||||
* This is the guts of "any" level TLB miss handler for kernel linear
|
||||
* mapping misses. We are entered with:
|
||||
|
|
|
@ -27,6 +27,7 @@ config RISCV
|
|||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_GIGANTIC_PAGE
|
||||
select ARCH_HAS_KCOV
|
||||
select ARCH_HAS_MEMBARRIER_CALLBACKS
|
||||
select ARCH_HAS_MMIOWB
|
||||
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
|
||||
select ARCH_HAS_PMEM_API
|
||||
|
@ -490,8 +491,8 @@ config RISCV_ISA_SVPBMT
|
|||
config TOOLCHAIN_HAS_V
|
||||
bool
|
||||
default y
|
||||
depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64iv)
|
||||
depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32iv)
|
||||
depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64imv)
|
||||
depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32imv)
|
||||
depends on LLD_VERSION >= 140000 || LD_VERSION >= 23800
|
||||
depends on AS_HAS_OPTION_ARCH
|
||||
|
||||
|
|
|
@ -18,9 +18,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
|||
pte_t *pte = virt_to_kpte(addr);
|
||||
|
||||
if (protect)
|
||||
set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
|
||||
set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~_PAGE_PRESENT));
|
||||
else
|
||||
set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
|
||||
set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT));
|
||||
|
||||
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
|
||||
|
||||
|
|
31
arch/riscv/include/asm/membarrier.h
Normal file
31
arch/riscv/include/asm/membarrier.h
Normal file
|
@ -0,0 +1,31 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef _ASM_RISCV_MEMBARRIER_H
|
||||
#define _ASM_RISCV_MEMBARRIER_H
|
||||
|
||||
static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
|
||||
struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
/*
|
||||
* Only need the full barrier when switching between processes.
|
||||
* Barrier when switching from kernel to userspace is not
|
||||
* required here, given that it is implied by mmdrop(). Barrier
|
||||
* when switching from userspace to kernel is not needed after
|
||||
* store to rq->curr.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_SMP) &&
|
||||
likely(!(atomic_read(&next->membarrier_state) &
|
||||
(MEMBARRIER_STATE_PRIVATE_EXPEDITED |
|
||||
MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The membarrier system call requires a full memory barrier
|
||||
* after storing to rq->curr, before going back to user-space.
|
||||
* Matches a full barrier in the proximity of the membarrier
|
||||
* system call entry.
|
||||
*/
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
#endif /* _ASM_RISCV_MEMBARRIER_H */
|
|
@ -198,7 +198,7 @@ static inline int pud_user(pud_t pud)
|
|||
|
||||
static inline void set_pud(pud_t *pudp, pud_t pud)
|
||||
{
|
||||
*pudp = pud;
|
||||
WRITE_ONCE(*pudp, pud);
|
||||
}
|
||||
|
||||
static inline void pud_clear(pud_t *pudp)
|
||||
|
@ -274,7 +274,7 @@ static inline unsigned long _pmd_pfn(pmd_t pmd)
|
|||
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
|
||||
{
|
||||
if (pgtable_l4_enabled)
|
||||
*p4dp = p4d;
|
||||
WRITE_ONCE(*p4dp, p4d);
|
||||
else
|
||||
set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) });
|
||||
}
|
||||
|
@ -336,18 +336,12 @@ static inline struct page *p4d_page(p4d_t p4d)
|
|||
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
|
||||
|
||||
#define pud_offset pud_offset
|
||||
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
|
||||
{
|
||||
if (pgtable_l4_enabled)
|
||||
return p4d_pgtable(*p4d) + pud_index(address);
|
||||
|
||||
return (pud_t *)p4d;
|
||||
}
|
||||
pud_t *pud_offset(p4d_t *p4d, unsigned long address);
|
||||
|
||||
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
|
||||
{
|
||||
if (pgtable_l5_enabled)
|
||||
*pgdp = pgd;
|
||||
WRITE_ONCE(*pgdp, pgd);
|
||||
else
|
||||
set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) });
|
||||
}
|
||||
|
@ -400,12 +394,6 @@ static inline struct page *pgd_page(pgd_t pgd)
|
|||
#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
|
||||
|
||||
#define p4d_offset p4d_offset
|
||||
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
|
||||
{
|
||||
if (pgtable_l5_enabled)
|
||||
return pgd_pgtable(*pgd) + p4d_index(address);
|
||||
|
||||
return (p4d_t *)pgd;
|
||||
}
|
||||
p4d_t *p4d_offset(pgd_t *pgd, unsigned long address);
|
||||
|
||||
#endif /* _ASM_RISCV_PGTABLE_64_H */
|
||||
|
|
|
@ -248,7 +248,7 @@ static inline int pmd_leaf(pmd_t pmd)
|
|||
|
||||
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
*pmdp = pmd;
|
||||
WRITE_ONCE(*pmdp, pmd);
|
||||
}
|
||||
|
||||
static inline void pmd_clear(pmd_t *pmdp)
|
||||
|
@ -515,7 +515,7 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
|
|||
*/
|
||||
static inline void set_pte(pte_t *ptep, pte_t pteval)
|
||||
{
|
||||
*ptep = pteval;
|
||||
WRITE_ONCE(*ptep, pteval);
|
||||
}
|
||||
|
||||
void flush_icache_pte(pte_t pte);
|
||||
|
@ -551,19 +551,12 @@ static inline void pte_clear(struct mm_struct *mm,
|
|||
__set_pte_at(ptep, __pte(0));
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
|
||||
static inline int ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long address, pte_t *ptep,
|
||||
pte_t entry, int dirty)
|
||||
{
|
||||
if (!pte_same(*ptep, entry))
|
||||
__set_pte_at(ptep, entry);
|
||||
/*
|
||||
* update_mmu_cache will unconditionally execute, handling both
|
||||
* the case that the PTE changed and the spurious fault case.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */
|
||||
extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
|
||||
pte_t *ptep, pte_t entry, int dirty);
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */
|
||||
extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address,
|
||||
pte_t *ptep);
|
||||
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
|
||||
|
@ -576,16 +569,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
|
|||
return pte;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
pte_t *ptep)
|
||||
{
|
||||
if (!pte_young(*ptep))
|
||||
return 0;
|
||||
return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||
static inline void ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long address, pte_t *ptep)
|
||||
|
|
|
@ -60,7 +60,7 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
|
|||
static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
|
||||
{
|
||||
efi_memory_desc_t *md = data;
|
||||
pte_t pte = READ_ONCE(*ptep);
|
||||
pte_t pte = ptep_get(ptep);
|
||||
unsigned long val;
|
||||
|
||||
if (md->attribute & EFI_MEMORY_RO) {
|
||||
|
|
|
@ -305,6 +305,9 @@ clear_bss_done:
|
|||
#else
|
||||
mv a0, a1
|
||||
#endif /* CONFIG_BUILTIN_DTB */
|
||||
/* Set trap vector to spin forever to help debug */
|
||||
la a3, .Lsecondary_park
|
||||
csrw CSR_TVEC, a3
|
||||
call setup_vm
|
||||
#ifdef CONFIG_MMU
|
||||
la a0, early_pg_dir
|
||||
|
|
|
@ -28,9 +28,8 @@ static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
|
|||
|
||||
p->ainsn.api.restore = (unsigned long)p->addr + offset;
|
||||
|
||||
patch_text(p->ainsn.api.insn, &p->opcode, 1);
|
||||
patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset),
|
||||
&insn, 1);
|
||||
patch_text_nosync(p->ainsn.api.insn, &p->opcode, 1);
|
||||
patch_text_nosync(p->ainsn.api.insn + offset, &insn, 1);
|
||||
}
|
||||
|
||||
static void __kprobes arch_prepare_simulate(struct kprobe *p)
|
||||
|
|
|
@ -103,7 +103,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
|
|||
*ptep_level = current_level;
|
||||
ptep = (pte_t *)kvm->arch.pgd;
|
||||
ptep = &ptep[gstage_pte_index(addr, current_level)];
|
||||
while (ptep && pte_val(*ptep)) {
|
||||
while (ptep && pte_val(ptep_get(ptep))) {
|
||||
if (gstage_pte_leaf(ptep)) {
|
||||
*ptep_level = current_level;
|
||||
*ptepp = ptep;
|
||||
|
@ -113,7 +113,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
|
|||
if (current_level) {
|
||||
current_level--;
|
||||
*ptep_level = current_level;
|
||||
ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
|
||||
ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
|
||||
ptep = &ptep[gstage_pte_index(addr, current_level)];
|
||||
} else {
|
||||
ptep = NULL;
|
||||
|
@ -149,25 +149,25 @@ static int gstage_set_pte(struct kvm *kvm, u32 level,
|
|||
if (gstage_pte_leaf(ptep))
|
||||
return -EEXIST;
|
||||
|
||||
if (!pte_val(*ptep)) {
|
||||
if (!pte_val(ptep_get(ptep))) {
|
||||
if (!pcache)
|
||||
return -ENOMEM;
|
||||
next_ptep = kvm_mmu_memory_cache_alloc(pcache);
|
||||
if (!next_ptep)
|
||||
return -ENOMEM;
|
||||
*ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
|
||||
__pgprot(_PAGE_TABLE));
|
||||
set_pte(ptep, pfn_pte(PFN_DOWN(__pa(next_ptep)),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
} else {
|
||||
if (gstage_pte_leaf(ptep))
|
||||
return -EEXIST;
|
||||
next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
|
||||
next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
|
||||
}
|
||||
|
||||
current_level--;
|
||||
ptep = &next_ptep[gstage_pte_index(addr, current_level)];
|
||||
}
|
||||
|
||||
*ptep = *new_pte;
|
||||
set_pte(ptep, *new_pte);
|
||||
if (gstage_pte_leaf(ptep))
|
||||
gstage_remote_tlb_flush(kvm, current_level, addr);
|
||||
|
||||
|
@ -239,11 +239,11 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
|
|||
|
||||
BUG_ON(addr & (page_size - 1));
|
||||
|
||||
if (!pte_val(*ptep))
|
||||
if (!pte_val(ptep_get(ptep)))
|
||||
return;
|
||||
|
||||
if (ptep_level && !gstage_pte_leaf(ptep)) {
|
||||
next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
|
||||
next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
|
||||
next_ptep_level = ptep_level - 1;
|
||||
ret = gstage_level_to_page_size(next_ptep_level,
|
||||
&next_page_size);
|
||||
|
@ -261,7 +261,7 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
|
|||
if (op == GSTAGE_OP_CLEAR)
|
||||
set_pte(ptep, __pte(0));
|
||||
else if (op == GSTAGE_OP_WP)
|
||||
set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE));
|
||||
set_pte(ptep, __pte(pte_val(ptep_get(ptep)) & ~_PAGE_WRITE));
|
||||
gstage_remote_tlb_flush(kvm, ptep_level, addr);
|
||||
}
|
||||
}
|
||||
|
@ -603,7 +603,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
|
|||
&ptep, &ptep_level))
|
||||
return false;
|
||||
|
||||
return pte_young(*ptep);
|
||||
return pte_young(ptep_get(ptep));
|
||||
}
|
||||
|
||||
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
|
||||
|
|
|
@ -13,10 +13,9 @@ endif
|
|||
KCOV_INSTRUMENT_init.o := n
|
||||
|
||||
obj-y += init.o
|
||||
obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o
|
||||
obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o
|
||||
obj-y += cacheflush.o
|
||||
obj-y += context.o
|
||||
obj-y += pgtable.o
|
||||
obj-y += pmem.o
|
||||
|
||||
ifeq ($(CONFIG_MMU),y)
|
||||
|
|
|
@ -323,6 +323,8 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
if (unlikely(prev == next))
|
||||
return;
|
||||
|
||||
membarrier_arch_switch_mm(prev, next, task);
|
||||
|
||||
/*
|
||||
* Mark the current MM context as inactive, and the next as
|
||||
* active. This is at least used by the icache flushing
|
||||
|
|
|
@ -137,24 +137,24 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
|
|||
pgd = (pgd_t *)pfn_to_virt(pfn) + index;
|
||||
pgd_k = init_mm.pgd + index;
|
||||
|
||||
if (!pgd_present(*pgd_k)) {
|
||||
if (!pgd_present(pgdp_get(pgd_k))) {
|
||||
no_context(regs, addr);
|
||||
return;
|
||||
}
|
||||
set_pgd(pgd, *pgd_k);
|
||||
set_pgd(pgd, pgdp_get(pgd_k));
|
||||
|
||||
p4d_k = p4d_offset(pgd_k, addr);
|
||||
if (!p4d_present(*p4d_k)) {
|
||||
if (!p4d_present(p4dp_get(p4d_k))) {
|
||||
no_context(regs, addr);
|
||||
return;
|
||||
}
|
||||
|
||||
pud_k = pud_offset(p4d_k, addr);
|
||||
if (!pud_present(*pud_k)) {
|
||||
if (!pud_present(pudp_get(pud_k))) {
|
||||
no_context(regs, addr);
|
||||
return;
|
||||
}
|
||||
if (pud_leaf(*pud_k))
|
||||
if (pud_leaf(pudp_get(pud_k)))
|
||||
goto flush_tlb;
|
||||
|
||||
/*
|
||||
|
@ -162,11 +162,11 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
|
|||
* to copy individual PTEs
|
||||
*/
|
||||
pmd_k = pmd_offset(pud_k, addr);
|
||||
if (!pmd_present(*pmd_k)) {
|
||||
if (!pmd_present(pmdp_get(pmd_k))) {
|
||||
no_context(regs, addr);
|
||||
return;
|
||||
}
|
||||
if (pmd_leaf(*pmd_k))
|
||||
if (pmd_leaf(pmdp_get(pmd_k)))
|
||||
goto flush_tlb;
|
||||
|
||||
/*
|
||||
|
@ -176,7 +176,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
|
|||
* silently loop forever.
|
||||
*/
|
||||
pte_k = pte_offset_kernel(pmd_k, addr);
|
||||
if (!pte_present(*pte_k)) {
|
||||
if (!pte_present(ptep_get(pte_k))) {
|
||||
no_context(regs, addr);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|||
}
|
||||
|
||||
if (sz == PMD_SIZE) {
|
||||
if (want_pmd_share(vma, addr) && pud_none(*pud))
|
||||
if (want_pmd_share(vma, addr) && pud_none(pudp_get(pud)))
|
||||
pte = huge_pmd_share(mm, vma, addr, pud);
|
||||
else
|
||||
pte = (pte_t *)pmd_alloc(mm, pud, addr);
|
||||
|
@ -93,11 +93,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
|
|||
pmd_t *pmd;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (!pgd_present(*pgd))
|
||||
if (!pgd_present(pgdp_get(pgd)))
|
||||
return NULL;
|
||||
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (!p4d_present(*p4d))
|
||||
if (!p4d_present(p4dp_get(p4d)))
|
||||
return NULL;
|
||||
|
||||
pud = pud_offset(p4d, addr);
|
||||
|
@ -105,7 +105,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
|
|||
/* must be pud huge, non-present or none */
|
||||
return (pte_t *)pud;
|
||||
|
||||
if (!pud_present(*pud))
|
||||
if (!pud_present(pudp_get(pud)))
|
||||
return NULL;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
|
@ -113,7 +113,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
|
|||
/* must be pmd huge, non-present or none */
|
||||
return (pte_t *)pmd;
|
||||
|
||||
if (!pmd_present(*pmd))
|
||||
if (!pmd_present(pmdp_get(pmd)))
|
||||
return NULL;
|
||||
|
||||
for_each_napot_order(order) {
|
||||
|
@ -351,7 +351,7 @@ void huge_pte_clear(struct mm_struct *mm,
|
|||
pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
pte_t pte = READ_ONCE(*ptep);
|
||||
pte_t pte = ptep_get(ptep);
|
||||
int i, pte_num;
|
||||
|
||||
if (!pte_napot(pte)) {
|
||||
|
|
|
@ -235,7 +235,7 @@ static void __init setup_bootmem(void)
|
|||
* The size of the linear page mapping may restrict the amount of
|
||||
* usable RAM.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_64BIT)) {
|
||||
if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) {
|
||||
max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
|
||||
memblock_cap_memory_range(phys_ram_base,
|
||||
max_mapped_addr - phys_ram_base);
|
||||
|
|
|
@ -31,7 +31,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
|
|||
phys_addr_t phys_addr;
|
||||
pte_t *ptep, *p;
|
||||
|
||||
if (pmd_none(*pmd)) {
|
||||
if (pmd_none(pmdp_get(pmd))) {
|
||||
p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
|
||||
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
|
|||
ptep = pte_offset_kernel(pmd, vaddr);
|
||||
|
||||
do {
|
||||
if (pte_none(*ptep)) {
|
||||
if (pte_none(ptep_get(ptep))) {
|
||||
phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
|
||||
memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE);
|
||||
|
@ -53,7 +53,7 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
|
|||
pmd_t *pmdp, *p;
|
||||
unsigned long next;
|
||||
|
||||
if (pud_none(*pud)) {
|
||||
if (pud_none(pudp_get(pud))) {
|
||||
p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
|
||||
set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
|
||||
}
|
||||
|
@ -63,7 +63,8 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
|
|||
do {
|
||||
next = pmd_addr_end(vaddr, end);
|
||||
|
||||
if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
|
||||
if (pmd_none(pmdp_get(pmdp)) && IS_ALIGNED(vaddr, PMD_SIZE) &&
|
||||
(next - vaddr) >= PMD_SIZE) {
|
||||
phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
|
||||
if (phys_addr) {
|
||||
set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
|
||||
|
@ -83,7 +84,7 @@ static void __init kasan_populate_pud(p4d_t *p4d,
|
|||
pud_t *pudp, *p;
|
||||
unsigned long next;
|
||||
|
||||
if (p4d_none(*p4d)) {
|
||||
if (p4d_none(p4dp_get(p4d))) {
|
||||
p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
|
||||
set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
|
||||
}
|
||||
|
@ -93,7 +94,8 @@ static void __init kasan_populate_pud(p4d_t *p4d,
|
|||
do {
|
||||
next = pud_addr_end(vaddr, end);
|
||||
|
||||
if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
|
||||
if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
|
||||
(next - vaddr) >= PUD_SIZE) {
|
||||
phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
|
||||
if (phys_addr) {
|
||||
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
|
||||
|
@ -113,7 +115,7 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
|
|||
p4d_t *p4dp, *p;
|
||||
unsigned long next;
|
||||
|
||||
if (pgd_none(*pgd)) {
|
||||
if (pgd_none(pgdp_get(pgd))) {
|
||||
p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
|
||||
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
|
||||
}
|
||||
|
@ -123,7 +125,8 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
|
|||
do {
|
||||
next = p4d_addr_end(vaddr, end);
|
||||
|
||||
if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
|
||||
if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
|
||||
(next - vaddr) >= P4D_SIZE) {
|
||||
phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
|
||||
if (phys_addr) {
|
||||
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
|
||||
|
@ -145,7 +148,7 @@ static void __init kasan_populate_pgd(pgd_t *pgdp,
|
|||
do {
|
||||
next = pgd_addr_end(vaddr, end);
|
||||
|
||||
if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
|
||||
if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
|
||||
(next - vaddr) >= PGDIR_SIZE) {
|
||||
phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
|
||||
if (phys_addr) {
|
||||
|
@ -168,7 +171,7 @@ static void __init kasan_early_clear_pud(p4d_t *p4dp,
|
|||
if (!pgtable_l4_enabled) {
|
||||
pudp = (pud_t *)p4dp;
|
||||
} else {
|
||||
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
|
||||
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
|
||||
pudp = base_pud + pud_index(vaddr);
|
||||
}
|
||||
|
||||
|
@ -193,7 +196,7 @@ static void __init kasan_early_clear_p4d(pgd_t *pgdp,
|
|||
if (!pgtable_l5_enabled) {
|
||||
p4dp = (p4d_t *)pgdp;
|
||||
} else {
|
||||
base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
|
||||
base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
|
||||
p4dp = base_p4d + p4d_index(vaddr);
|
||||
}
|
||||
|
||||
|
@ -239,14 +242,14 @@ static void __init kasan_early_populate_pud(p4d_t *p4dp,
|
|||
if (!pgtable_l4_enabled) {
|
||||
pudp = (pud_t *)p4dp;
|
||||
} else {
|
||||
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
|
||||
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
|
||||
pudp = base_pud + pud_index(vaddr);
|
||||
}
|
||||
|
||||
do {
|
||||
next = pud_addr_end(vaddr, end);
|
||||
|
||||
if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) &&
|
||||
if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
|
||||
(next - vaddr) >= PUD_SIZE) {
|
||||
phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
|
||||
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
|
||||
|
@ -277,14 +280,14 @@ static void __init kasan_early_populate_p4d(pgd_t *pgdp,
|
|||
if (!pgtable_l5_enabled) {
|
||||
p4dp = (p4d_t *)pgdp;
|
||||
} else {
|
||||
base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
|
||||
base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
|
||||
p4dp = base_p4d + p4d_index(vaddr);
|
||||
}
|
||||
|
||||
do {
|
||||
next = p4d_addr_end(vaddr, end);
|
||||
|
||||
if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) &&
|
||||
if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
|
||||
(next - vaddr) >= P4D_SIZE) {
|
||||
phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
|
||||
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
|
||||
|
@ -305,7 +308,7 @@ static void __init kasan_early_populate_pgd(pgd_t *pgdp,
|
|||
do {
|
||||
next = pgd_addr_end(vaddr, end);
|
||||
|
||||
if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
|
||||
if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
|
||||
(next - vaddr) >= PGDIR_SIZE) {
|
||||
phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
|
||||
set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
|
||||
|
@ -381,7 +384,7 @@ static void __init kasan_shallow_populate_pud(p4d_t *p4d,
|
|||
do {
|
||||
next = pud_addr_end(vaddr, end);
|
||||
|
||||
if (pud_none(*pud_k)) {
|
||||
if (pud_none(pudp_get(pud_k))) {
|
||||
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
|
||||
continue;
|
||||
|
@ -401,7 +404,7 @@ static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
|
|||
do {
|
||||
next = p4d_addr_end(vaddr, end);
|
||||
|
||||
if (p4d_none(*p4d_k)) {
|
||||
if (p4d_none(p4dp_get(p4d_k))) {
|
||||
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
|
||||
continue;
|
||||
|
@ -420,7 +423,7 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long
|
|||
do {
|
||||
next = pgd_addr_end(vaddr, end);
|
||||
|
||||
if (pgd_none(*pgd_k)) {
|
||||
if (pgd_none(pgdp_get(pgd_k))) {
|
||||
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
|
||||
continue;
|
||||
|
@ -451,7 +454,7 @@ static void __init create_tmp_mapping(void)
|
|||
|
||||
/* Copy the last p4d since it is shared with the kernel mapping. */
|
||||
if (pgtable_l5_enabled) {
|
||||
ptr = (p4d_t *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
|
||||
ptr = (p4d_t *)pgd_page_vaddr(pgdp_get(pgd_offset_k(KASAN_SHADOW_END)));
|
||||
memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D);
|
||||
set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)],
|
||||
pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE));
|
||||
|
@ -462,7 +465,7 @@ static void __init create_tmp_mapping(void)
|
|||
|
||||
/* Copy the last pud since it is shared with the kernel mapping. */
|
||||
if (pgtable_l4_enabled) {
|
||||
ptr = (pud_t *)p4d_page_vaddr(*(base_p4d + p4d_index(KASAN_SHADOW_END)));
|
||||
ptr = (pud_t *)p4d_page_vaddr(p4dp_get(base_p4d + p4d_index(KASAN_SHADOW_END)));
|
||||
memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD);
|
||||
set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)],
|
||||
pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE));
|
||||
|
|
|
@ -29,7 +29,7 @@ static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
|
|||
static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
|
||||
unsigned long next, struct mm_walk *walk)
|
||||
{
|
||||
p4d_t val = READ_ONCE(*p4d);
|
||||
p4d_t val = p4dp_get(p4d);
|
||||
|
||||
if (p4d_leaf(val)) {
|
||||
val = __p4d(set_pageattr_masks(p4d_val(val), walk));
|
||||
|
@ -42,7 +42,7 @@ static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
|
|||
static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
|
||||
unsigned long next, struct mm_walk *walk)
|
||||
{
|
||||
pud_t val = READ_ONCE(*pud);
|
||||
pud_t val = pudp_get(pud);
|
||||
|
||||
if (pud_leaf(val)) {
|
||||
val = __pud(set_pageattr_masks(pud_val(val), walk));
|
||||
|
@ -55,7 +55,7 @@ static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
|
|||
static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long next, struct mm_walk *walk)
|
||||
{
|
||||
pmd_t val = READ_ONCE(*pmd);
|
||||
pmd_t val = pmdp_get(pmd);
|
||||
|
||||
if (pmd_leaf(val)) {
|
||||
val = __pmd(set_pageattr_masks(pmd_val(val), walk));
|
||||
|
@ -68,7 +68,7 @@ static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
|
|||
static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
|
||||
unsigned long next, struct mm_walk *walk)
|
||||
{
|
||||
pte_t val = READ_ONCE(*pte);
|
||||
pte_t val = ptep_get(pte);
|
||||
|
||||
val = __pte(set_pageattr_masks(pte_val(val), walk));
|
||||
set_pte(pte, val);
|
||||
|
@ -108,10 +108,10 @@ static int __split_linear_mapping_pmd(pud_t *pudp,
|
|||
vaddr <= (vaddr & PMD_MASK) && end >= next)
|
||||
continue;
|
||||
|
||||
if (pmd_leaf(*pmdp)) {
|
||||
if (pmd_leaf(pmdp_get(pmdp))) {
|
||||
struct page *pte_page;
|
||||
unsigned long pfn = _pmd_pfn(*pmdp);
|
||||
pgprot_t prot = __pgprot(pmd_val(*pmdp) & ~_PAGE_PFN_MASK);
|
||||
unsigned long pfn = _pmd_pfn(pmdp_get(pmdp));
|
||||
pgprot_t prot = __pgprot(pmd_val(pmdp_get(pmdp)) & ~_PAGE_PFN_MASK);
|
||||
pte_t *ptep_new;
|
||||
int i;
|
||||
|
||||
|
@ -148,10 +148,10 @@ static int __split_linear_mapping_pud(p4d_t *p4dp,
|
|||
vaddr <= (vaddr & PUD_MASK) && end >= next)
|
||||
continue;
|
||||
|
||||
if (pud_leaf(*pudp)) {
|
||||
if (pud_leaf(pudp_get(pudp))) {
|
||||
struct page *pmd_page;
|
||||
unsigned long pfn = _pud_pfn(*pudp);
|
||||
pgprot_t prot = __pgprot(pud_val(*pudp) & ~_PAGE_PFN_MASK);
|
||||
unsigned long pfn = _pud_pfn(pudp_get(pudp));
|
||||
pgprot_t prot = __pgprot(pud_val(pudp_get(pudp)) & ~_PAGE_PFN_MASK);
|
||||
pmd_t *pmdp_new;
|
||||
int i;
|
||||
|
||||
|
@ -197,10 +197,10 @@ static int __split_linear_mapping_p4d(pgd_t *pgdp,
|
|||
vaddr <= (vaddr & P4D_MASK) && end >= next)
|
||||
continue;
|
||||
|
||||
if (p4d_leaf(*p4dp)) {
|
||||
if (p4d_leaf(p4dp_get(p4dp))) {
|
||||
struct page *pud_page;
|
||||
unsigned long pfn = _p4d_pfn(*p4dp);
|
||||
pgprot_t prot = __pgprot(p4d_val(*p4dp) & ~_PAGE_PFN_MASK);
|
||||
unsigned long pfn = _p4d_pfn(p4dp_get(p4dp));
|
||||
pgprot_t prot = __pgprot(p4d_val(p4dp_get(p4dp)) & ~_PAGE_PFN_MASK);
|
||||
pud_t *pudp_new;
|
||||
int i;
|
||||
|
||||
|
@ -427,29 +427,29 @@ bool kernel_page_present(struct page *page)
|
|||
pte_t *pte;
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
if (!pgd_present(*pgd))
|
||||
if (!pgd_present(pgdp_get(pgd)))
|
||||
return false;
|
||||
if (pgd_leaf(*pgd))
|
||||
if (pgd_leaf(pgdp_get(pgd)))
|
||||
return true;
|
||||
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (!p4d_present(*p4d))
|
||||
if (!p4d_present(p4dp_get(p4d)))
|
||||
return false;
|
||||
if (p4d_leaf(*p4d))
|
||||
if (p4d_leaf(p4dp_get(p4d)))
|
||||
return true;
|
||||
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (!pud_present(*pud))
|
||||
if (!pud_present(pudp_get(pud)))
|
||||
return false;
|
||||
if (pud_leaf(*pud))
|
||||
if (pud_leaf(pudp_get(pud)))
|
||||
return true;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (!pmd_present(*pmd))
|
||||
if (!pmd_present(pmdp_get(pmd)))
|
||||
return false;
|
||||
if (pmd_leaf(*pmd))
|
||||
if (pmd_leaf(pmdp_get(pmd)))
|
||||
return true;
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
return pte_present(*pte);
|
||||
return pte_present(ptep_get(pte));
|
||||
}
|
||||
|
|
|
@ -5,6 +5,47 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
int ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long address, pte_t *ptep,
|
||||
pte_t entry, int dirty)
|
||||
{
|
||||
if (!pte_same(ptep_get(ptep), entry))
|
||||
__set_pte_at(ptep, entry);
|
||||
/*
|
||||
* update_mmu_cache will unconditionally execute, handling both
|
||||
* the case that the PTE changed and the spurious fault case.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
int ptep_test_and_clear_young(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
pte_t *ptep)
|
||||
{
|
||||
if (!pte_young(ptep_get(ptep)))
|
||||
return 0;
|
||||
return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
pud_t *pud_offset(p4d_t *p4d, unsigned long address)
|
||||
{
|
||||
if (pgtable_l4_enabled)
|
||||
return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
|
||||
|
||||
return (pud_t *)p4d;
|
||||
}
|
||||
|
||||
p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
|
||||
{
|
||||
if (pgtable_l5_enabled)
|
||||
return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address);
|
||||
|
||||
return (p4d_t *)pgd;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
|
||||
int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
|
||||
{
|
||||
|
@ -25,7 +66,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
|
|||
|
||||
int pud_clear_huge(pud_t *pud)
|
||||
{
|
||||
if (!pud_leaf(READ_ONCE(*pud)))
|
||||
if (!pud_leaf(pudp_get(pud)))
|
||||
return 0;
|
||||
pud_clear(pud);
|
||||
return 1;
|
||||
|
@ -33,7 +74,7 @@ int pud_clear_huge(pud_t *pud)
|
|||
|
||||
int pud_free_pmd_page(pud_t *pud, unsigned long addr)
|
||||
{
|
||||
pmd_t *pmd = pud_pgtable(*pud);
|
||||
pmd_t *pmd = pud_pgtable(pudp_get(pud));
|
||||
int i;
|
||||
|
||||
pud_clear(pud);
|
||||
|
@ -63,7 +104,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
|
|||
|
||||
int pmd_clear_huge(pmd_t *pmd)
|
||||
{
|
||||
if (!pmd_leaf(READ_ONCE(*pmd)))
|
||||
if (!pmd_leaf(pmdp_get(pmd)))
|
||||
return 0;
|
||||
pmd_clear(pmd);
|
||||
return 1;
|
||||
|
@ -71,7 +112,7 @@ int pmd_clear_huge(pmd_t *pmd)
|
|||
|
||||
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
|
||||
{
|
||||
pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
|
||||
pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd));
|
||||
|
||||
pmd_clear(pmd);
|
||||
|
||||
|
@ -88,7 +129,7 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
|
|||
pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
|
||||
|
||||
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
||||
VM_BUG_ON(pmd_trans_huge(*pmdp));
|
||||
VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp)));
|
||||
/*
|
||||
* When leaf PTE entries (regular pages) are collapsed into a leaf
|
||||
* PMD entry (huge page), a valid non-leaf PTE is converted into a
|
||||
|
|
|
@ -71,6 +71,15 @@ SECTIONS
|
|||
. = ALIGN(PAGE_SIZE);
|
||||
__end_ro_after_init = .;
|
||||
|
||||
.data.rel.ro : {
|
||||
*(.data.rel.ro .data.rel.ro.*)
|
||||
}
|
||||
.got : {
|
||||
__got_start = .;
|
||||
*(.got)
|
||||
__got_end = .;
|
||||
}
|
||||
|
||||
RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
|
||||
BOOT_DATA_PRESERVED
|
||||
|
||||
|
|
|
@ -383,6 +383,7 @@ int setup_one_line(struct line *lines, int n, char *init,
|
|||
parse_chan_pair(NULL, line, n, opts, error_out);
|
||||
err = 0;
|
||||
}
|
||||
*error_out = "configured as 'none'";
|
||||
} else {
|
||||
char *new = kstrdup(init, GFP_KERNEL);
|
||||
if (!new) {
|
||||
|
@ -406,6 +407,7 @@ int setup_one_line(struct line *lines, int n, char *init,
|
|||
}
|
||||
}
|
||||
if (err) {
|
||||
*error_out = "failed to parse channel pair";
|
||||
line->init_str = NULL;
|
||||
line->valid = 0;
|
||||
kfree(new);
|
||||
|
|
|
@ -362,7 +362,6 @@ static bool mmio_read(int size, unsigned long addr, unsigned long *val)
|
|||
.r12 = size,
|
||||
.r13 = EPT_READ,
|
||||
.r14 = addr,
|
||||
.r15 = *val,
|
||||
};
|
||||
|
||||
if (__tdx_hypercall_ret(&args))
|
||||
|
|
|
@ -4465,6 +4465,25 @@ static u8 adl_get_hybrid_cpu_type(void)
|
|||
return hybrid_big;
|
||||
}
|
||||
|
||||
static inline bool erratum_hsw11(struct perf_event *event)
|
||||
{
|
||||
return (event->hw.config & INTEL_ARCH_EVENT_MASK) ==
|
||||
X86_CONFIG(.event=0xc0, .umask=0x01);
|
||||
}
|
||||
|
||||
/*
|
||||
* The HSW11 requires a period larger than 100 which is the same as the BDM11.
|
||||
* A minimum period of 128 is enforced as well for the INST_RETIRED.ALL.
|
||||
*
|
||||
* The message 'interrupt took too long' can be observed on any counter which
|
||||
* was armed with a period < 32 and two events expired in the same NMI.
|
||||
* A minimum period of 32 is enforced for the rest of the events.
|
||||
*/
|
||||
static void hsw_limit_period(struct perf_event *event, s64 *left)
|
||||
{
|
||||
*left = max(*left, erratum_hsw11(event) ? 128 : 32);
|
||||
}
|
||||
|
||||
/*
|
||||
* Broadwell:
|
||||
*
|
||||
|
@ -4482,8 +4501,7 @@ static u8 adl_get_hybrid_cpu_type(void)
|
|||
*/
|
||||
static void bdw_limit_period(struct perf_event *event, s64 *left)
|
||||
{
|
||||
if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
|
||||
X86_CONFIG(.event=0xc0, .umask=0x01)) {
|
||||
if (erratum_hsw11(event)) {
|
||||
if (*left < 128)
|
||||
*left = 128;
|
||||
*left &= ~0x3fULL;
|
||||
|
@ -6392,6 +6410,7 @@ __init int intel_pmu_init(void)
|
|||
|
||||
x86_pmu.hw_config = hsw_hw_config;
|
||||
x86_pmu.get_event_constraints = hsw_get_event_constraints;
|
||||
x86_pmu.limit_period = hsw_limit_period;
|
||||
x86_pmu.lbr_double_abort = true;
|
||||
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
|
||||
hsw_format_attr : nhm_format_attr;
|
||||
|
|
|
@ -589,6 +589,13 @@ struct fpu_state_config {
|
|||
* even without XSAVE support, i.e. legacy features FP + SSE
|
||||
*/
|
||||
u64 legacy_features;
|
||||
/*
|
||||
* @independent_features:
|
||||
*
|
||||
* Features that are supported by XSAVES, but not managed as part of
|
||||
* the FPU core, such as LBR
|
||||
*/
|
||||
u64 independent_features;
|
||||
};
|
||||
|
||||
/* FPU state configuration information */
|
||||
|
|
|
@ -17,6 +17,7 @@ extern unsigned long phys_base;
|
|||
extern unsigned long page_offset_base;
|
||||
extern unsigned long vmalloc_base;
|
||||
extern unsigned long vmemmap_base;
|
||||
extern unsigned long physmem_end;
|
||||
|
||||
static __always_inline unsigned long __phys_addr_nodebug(unsigned long x)
|
||||
{
|
||||
|
|
|
@ -140,6 +140,10 @@ extern unsigned int ptrs_per_p4d;
|
|||
# define VMEMMAP_START __VMEMMAP_BASE_L4
|
||||
#endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */
|
||||
|
||||
#ifdef CONFIG_RANDOMIZE_MEMORY
|
||||
# define PHYSMEM_END physmem_end
|
||||
#endif
|
||||
|
||||
/*
|
||||
* End of the region for which vmalloc page tables are pre-allocated.
|
||||
* For non-KMSAN builds, this is the same as VMALLOC_END.
|
||||
|
|
|
@ -1812,12 +1812,9 @@ static __init void apic_set_fixmap(bool read_apic);
|
|||
|
||||
static __init void x2apic_disable(void)
|
||||
{
|
||||
u32 x2apic_id, state = x2apic_state;
|
||||
u32 x2apic_id;
|
||||
|
||||
x2apic_mode = 0;
|
||||
x2apic_state = X2APIC_DISABLED;
|
||||
|
||||
if (state != X2APIC_ON)
|
||||
if (x2apic_state < X2APIC_ON)
|
||||
return;
|
||||
|
||||
x2apic_id = read_apic_id();
|
||||
|
@ -1830,6 +1827,10 @@ static __init void x2apic_disable(void)
|
|||
}
|
||||
|
||||
__x2apic_disable();
|
||||
|
||||
x2apic_mode = 0;
|
||||
x2apic_state = X2APIC_DISABLED;
|
||||
|
||||
/*
|
||||
* Don't reread the APIC ID as it was already done from
|
||||
* check_x2apic() and the APIC driver still is a x2APIC variant,
|
||||
|
|
|
@ -788,6 +788,9 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
|
|||
goto out_disable;
|
||||
}
|
||||
|
||||
fpu_kernel_cfg.independent_features = fpu_kernel_cfg.max_features &
|
||||
XFEATURE_MASK_INDEPENDENT;
|
||||
|
||||
/*
|
||||
* Clear XSAVE features that are disabled in the normal CPUID.
|
||||
*/
|
||||
|
|
|
@ -64,9 +64,9 @@ static inline u64 xfeatures_mask_supervisor(void)
|
|||
static inline u64 xfeatures_mask_independent(void)
|
||||
{
|
||||
if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR))
|
||||
return XFEATURE_MASK_INDEPENDENT & ~XFEATURE_MASK_LBR;
|
||||
return fpu_kernel_cfg.independent_features & ~XFEATURE_MASK_LBR;
|
||||
|
||||
return XFEATURE_MASK_INDEPENDENT;
|
||||
return fpu_kernel_cfg.independent_features;
|
||||
}
|
||||
|
||||
/* XSAVE/XRSTOR wrapper functions */
|
||||
|
|
|
@ -2869,6 +2869,12 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
case MSR_CSTAR:
|
||||
msr_info->data = svm->vmcb01.ptr->save.cstar;
|
||||
break;
|
||||
case MSR_GS_BASE:
|
||||
msr_info->data = svm->vmcb01.ptr->save.gs.base;
|
||||
break;
|
||||
case MSR_FS_BASE:
|
||||
msr_info->data = svm->vmcb01.ptr->save.fs.base;
|
||||
break;
|
||||
case MSR_KERNEL_GS_BASE:
|
||||
msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
|
||||
break;
|
||||
|
@ -3090,6 +3096,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|||
case MSR_CSTAR:
|
||||
svm->vmcb01.ptr->save.cstar = data;
|
||||
break;
|
||||
case MSR_GS_BASE:
|
||||
svm->vmcb01.ptr->save.gs.base = data;
|
||||
break;
|
||||
case MSR_FS_BASE:
|
||||
svm->vmcb01.ptr->save.fs.base = data;
|
||||
break;
|
||||
case MSR_KERNEL_GS_BASE:
|
||||
svm->vmcb01.ptr->save.kernel_gs_base = data;
|
||||
break;
|
||||
|
@ -5166,6 +5178,9 @@ static __init void svm_set_cpu_caps(void)
|
|||
|
||||
/* CPUID 0x8000001F (SME/SEV features) */
|
||||
sev_set_cpu_caps();
|
||||
|
||||
/* Don't advertise Bus Lock Detect to guest if SVM support is absent */
|
||||
kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT);
|
||||
}
|
||||
|
||||
static __init int svm_hardware_setup(void)
|
||||
|
|
|
@ -5829,7 +5829,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
|
||||
break;
|
||||
|
||||
kvm_vcpu_srcu_read_lock(vcpu);
|
||||
r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
|
||||
kvm_vcpu_srcu_read_unlock(vcpu);
|
||||
break;
|
||||
}
|
||||
case KVM_GET_DEBUGREGS: {
|
||||
|
|
|
@ -25,6 +25,9 @@ static __always_inline void rep_movs(void *to, const void *from, size_t n)
|
|||
|
||||
static void string_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
|
||||
{
|
||||
const void *orig_to = to;
|
||||
const size_t orig_n = n;
|
||||
|
||||
if (unlikely(!n))
|
||||
return;
|
||||
|
||||
|
@ -39,7 +42,7 @@ static void string_memcpy_fromio(void *to, const volatile void __iomem *from, si
|
|||
}
|
||||
rep_movs(to, (const void *)from, n);
|
||||
/* KMSAN must treat values read from devices as initialized. */
|
||||
kmsan_unpoison_memory(to, n);
|
||||
kmsan_unpoison_memory(orig_to, orig_n);
|
||||
}
|
||||
|
||||
static void string_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
|
||||
|
|
|
@ -950,8 +950,12 @@ static void update_end_of_memory_vars(u64 start, u64 size)
|
|||
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
||||
struct mhp_params *params)
|
||||
{
|
||||
unsigned long end = ((start_pfn + nr_pages) << PAGE_SHIFT) - 1;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON_ONCE(end > PHYSMEM_END))
|
||||
return -ERANGE;
|
||||
|
||||
ret = __add_pages(nid, start_pfn, nr_pages, params);
|
||||
WARN_ON_ONCE(ret);
|
||||
|
||||
|
|
|
@ -47,13 +47,24 @@ static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
|
|||
*/
|
||||
static __initdata struct kaslr_memory_region {
|
||||
unsigned long *base;
|
||||
unsigned long *end;
|
||||
unsigned long size_tb;
|
||||
} kaslr_regions[] = {
|
||||
{ &page_offset_base, 0 },
|
||||
{ &vmalloc_base, 0 },
|
||||
{ &vmemmap_base, 0 },
|
||||
{
|
||||
.base = &page_offset_base,
|
||||
.end = &physmem_end,
|
||||
},
|
||||
{
|
||||
.base = &vmalloc_base,
|
||||
},
|
||||
{
|
||||
.base = &vmemmap_base,
|
||||
},
|
||||
};
|
||||
|
||||
/* The end of the possible address space for physical memory */
|
||||
unsigned long physmem_end __ro_after_init;
|
||||
|
||||
/* Get size in bytes used by the memory region */
|
||||
static inline unsigned long get_padding(struct kaslr_memory_region *region)
|
||||
{
|
||||
|
@ -82,6 +93,8 @@ void __init kernel_randomize_memory(void)
|
|||
BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
|
||||
BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
|
||||
|
||||
/* Preset the end of the possible address space for physical memory */
|
||||
physmem_end = ((1ULL << MAX_PHYSMEM_BITS) - 1);
|
||||
if (!kaslr_memory_enabled())
|
||||
return;
|
||||
|
||||
|
@ -128,11 +141,18 @@ void __init kernel_randomize_memory(void)
|
|||
vaddr += entropy;
|
||||
*kaslr_regions[i].base = vaddr;
|
||||
|
||||
/*
|
||||
* Jump the region and add a minimum padding based on
|
||||
* randomization alignment.
|
||||
*/
|
||||
/* Calculate the end of the region */
|
||||
vaddr += get_padding(&kaslr_regions[i]);
|
||||
/*
|
||||
* KASLR trims the maximum possible size of the
|
||||
* direct-map. Update the physmem_end boundary.
|
||||
* No rounding required as the region starts
|
||||
* PUD aligned and size is in units of TB.
|
||||
*/
|
||||
if (kaslr_regions[i].end)
|
||||
*kaslr_regions[i].end = __pa_nodebug(vaddr - 1);
|
||||
|
||||
/* Add a minimum padding based on randomization alignment. */
|
||||
vaddr = round_up(vaddr + 1, PUD_SIZE);
|
||||
remain_entropy -= entropy;
|
||||
}
|
||||
|
|
|
@ -241,7 +241,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
|
|||
*
|
||||
* Returns a pointer to a PTE on success, or NULL on failure.
|
||||
*/
|
||||
static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
|
||||
static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
|
||||
{
|
||||
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
|
||||
pmd_t *pmd;
|
||||
|
@ -251,10 +251,15 @@ static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
|
|||
if (!pmd)
|
||||
return NULL;
|
||||
|
||||
/* We can't do anything sensible if we hit a large mapping. */
|
||||
/* Large PMD mapping found */
|
||||
if (pmd_large(*pmd)) {
|
||||
WARN_ON(1);
|
||||
return NULL;
|
||||
/* Clear the PMD if we hit a large mapping from the first round */
|
||||
if (late_text) {
|
||||
set_pmd(pmd, __pmd(0));
|
||||
} else {
|
||||
WARN_ON_ONCE(1);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (pmd_none(*pmd)) {
|
||||
|
@ -283,7 +288,7 @@ static void __init pti_setup_vsyscall(void)
|
|||
if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
|
||||
return;
|
||||
|
||||
target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
|
||||
target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
|
||||
if (WARN_ON(!target_pte))
|
||||
return;
|
||||
|
||||
|
@ -301,7 +306,7 @@ enum pti_clone_level {
|
|||
|
||||
static void
|
||||
pti_clone_pgtable(unsigned long start, unsigned long end,
|
||||
enum pti_clone_level level)
|
||||
enum pti_clone_level level, bool late_text)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
|
@ -390,7 +395,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
|
|||
return;
|
||||
|
||||
/* Allocate PTE in the user page-table */
|
||||
target_pte = pti_user_pagetable_walk_pte(addr);
|
||||
target_pte = pti_user_pagetable_walk_pte(addr, late_text);
|
||||
if (WARN_ON(!target_pte))
|
||||
return;
|
||||
|
||||
|
@ -452,7 +457,7 @@ static void __init pti_clone_user_shared(void)
|
|||
phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
|
||||
pte_t *target_pte;
|
||||
|
||||
target_pte = pti_user_pagetable_walk_pte(va);
|
||||
target_pte = pti_user_pagetable_walk_pte(va, false);
|
||||
if (WARN_ON(!target_pte))
|
||||
return;
|
||||
|
||||
|
@ -475,7 +480,7 @@ static void __init pti_clone_user_shared(void)
|
|||
start = CPU_ENTRY_AREA_BASE;
|
||||
end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
|
||||
|
||||
pti_clone_pgtable(start, end, PTI_CLONE_PMD);
|
||||
pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
|
||||
}
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
|
@ -492,11 +497,11 @@ static void __init pti_setup_espfix64(void)
|
|||
/*
|
||||
* Clone the populated PMDs of the entry text and force it RO.
|
||||
*/
|
||||
static void pti_clone_entry_text(void)
|
||||
static void pti_clone_entry_text(bool late)
|
||||
{
|
||||
pti_clone_pgtable((unsigned long) __entry_text_start,
|
||||
(unsigned long) __entry_text_end,
|
||||
PTI_LEVEL_KERNEL_IMAGE);
|
||||
PTI_LEVEL_KERNEL_IMAGE, late);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -571,7 +576,7 @@ static void pti_clone_kernel_text(void)
|
|||
* pti_set_kernel_image_nonglobal() did to clear the
|
||||
* global bit.
|
||||
*/
|
||||
pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
|
||||
pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
|
||||
|
||||
/*
|
||||
* pti_clone_pgtable() will set the global bit in any PMDs
|
||||
|
@ -638,8 +643,15 @@ void __init pti_init(void)
|
|||
|
||||
/* Undo all global bits from the init pagetables in head_64.S: */
|
||||
pti_set_kernel_image_nonglobal();
|
||||
|
||||
/* Replace some of the global bits just for shared entry text: */
|
||||
pti_clone_entry_text();
|
||||
/*
|
||||
* This is very early in boot. Device and Late initcalls can do
|
||||
* modprobe before free_initmem() and mark_readonly(). This
|
||||
* pti_clone_entry_text() allows those user-mode-helpers to function,
|
||||
* but notably the text is still RW.
|
||||
*/
|
||||
pti_clone_entry_text(false);
|
||||
pti_setup_espfix64();
|
||||
pti_setup_vsyscall();
|
||||
}
|
||||
|
@ -656,10 +668,11 @@ void pti_finalize(void)
|
|||
if (!boot_cpu_has(X86_FEATURE_PTI))
|
||||
return;
|
||||
/*
|
||||
* We need to clone everything (again) that maps parts of the
|
||||
* kernel image.
|
||||
* This is after free_initmem() (all initcalls are done) and we've done
|
||||
* mark_readonly(). Text is now NX which might've split some PMDs
|
||||
* relative to the early clone.
|
||||
*/
|
||||
pti_clone_entry_text();
|
||||
pti_clone_entry_text(true);
|
||||
pti_clone_kernel_text();
|
||||
|
||||
debug_checkwx_user();
|
||||
|
|
|
@ -479,6 +479,7 @@ static const u32 gaudi2_pb_dcr0_edma0_unsecured_regs[] = {
|
|||
mmDCORE0_EDMA0_CORE_CTX_TE_NUMROWS,
|
||||
mmDCORE0_EDMA0_CORE_CTX_IDX,
|
||||
mmDCORE0_EDMA0_CORE_CTX_IDX_INC,
|
||||
mmDCORE0_EDMA0_CORE_WR_COMP_MAX_OUTSTAND,
|
||||
mmDCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG,
|
||||
mmDCORE0_EDMA0_QM_CQ_CFG0_0,
|
||||
mmDCORE0_EDMA0_QM_CQ_CFG0_1,
|
||||
|
|
|
@ -415,7 +415,7 @@ static int acpi_processor_add(struct acpi_device *device,
|
|||
|
||||
result = acpi_processor_get_info(device);
|
||||
if (result) /* Processor is not physically present or unavailable */
|
||||
return 0;
|
||||
goto err_clear_driver_data;
|
||||
|
||||
BUG_ON(pr->id >= nr_cpu_ids);
|
||||
|
||||
|
@ -430,7 +430,7 @@ static int acpi_processor_add(struct acpi_device *device,
|
|||
"BIOS reported wrong ACPI id %d for the processor\n",
|
||||
pr->id);
|
||||
/* Give up, but do not abort the namespace scan. */
|
||||
goto err;
|
||||
goto err_clear_driver_data;
|
||||
}
|
||||
/*
|
||||
* processor_device_array is not cleared on errors to allow buggy BIOS
|
||||
|
@ -442,12 +442,12 @@ static int acpi_processor_add(struct acpi_device *device,
|
|||
dev = get_cpu_device(pr->id);
|
||||
if (!dev) {
|
||||
result = -ENODEV;
|
||||
goto err;
|
||||
goto err_clear_per_cpu;
|
||||
}
|
||||
|
||||
result = acpi_bind_one(dev, device);
|
||||
if (result)
|
||||
goto err;
|
||||
goto err_clear_per_cpu;
|
||||
|
||||
pr->dev = dev;
|
||||
|
||||
|
@ -458,10 +458,11 @@ static int acpi_processor_add(struct acpi_device *device,
|
|||
dev_err(dev, "Processor driver could not be attached\n");
|
||||
acpi_unbind_one(dev);
|
||||
|
||||
err:
|
||||
free_cpumask_var(pr->throttling.shared_cpu_map);
|
||||
device->driver_data = NULL;
|
||||
err_clear_per_cpu:
|
||||
per_cpu(processors, pr->id) = NULL;
|
||||
err_clear_driver_data:
|
||||
device->driver_data = NULL;
|
||||
free_cpumask_var(pr->throttling.shared_cpu_map);
|
||||
err_free_pr:
|
||||
kfree(pr);
|
||||
return result;
|
||||
|
|
|
@ -1196,6 +1196,19 @@ int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
|
|||
return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
|
||||
}
|
||||
|
||||
/**
|
||||
* cppc_get_highest_perf - Get the highest performance register value.
|
||||
* @cpunum: CPU from which to get highest performance.
|
||||
* @highest_perf: Return address.
|
||||
*
|
||||
* Return: 0 for success, -EIO otherwise.
|
||||
*/
|
||||
int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
|
||||
{
|
||||
return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
|
||||
|
||||
/**
|
||||
* cppc_get_epp_perf - Get the epp register value.
|
||||
* @cpunum: CPU from which to get epp preference value.
|
||||
|
|
|
@ -5593,8 +5593,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
|
|||
}
|
||||
|
||||
dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
|
||||
if (!dr)
|
||||
if (!dr) {
|
||||
kfree(host);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
devres_add(dev, dr);
|
||||
dev_set_drvdata(dev, host);
|
||||
|
|
|
@ -242,10 +242,17 @@ void ata_scsi_set_sense_information(struct ata_device *dev,
|
|||
*/
|
||||
static void ata_scsi_set_passthru_sense_fields(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_device *dev = qc->dev;
|
||||
struct scsi_cmnd *cmd = qc->scsicmd;
|
||||
struct ata_taskfile *tf = &qc->result_tf;
|
||||
unsigned char *sb = cmd->sense_buffer;
|
||||
|
||||
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
|
||||
ata_dev_dbg(dev,
|
||||
"missing result TF: can't set ATA PT sense fields\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if ((sb[0] & 0x7f) >= 0x72) {
|
||||
unsigned char *desc;
|
||||
u8 len;
|
||||
|
@ -924,12 +931,16 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
|
|||
*/
|
||||
static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_device *dev = qc->dev;
|
||||
struct scsi_cmnd *cmd = qc->scsicmd;
|
||||
struct ata_taskfile *tf = &qc->result_tf;
|
||||
unsigned char *sb = cmd->sense_buffer;
|
||||
u8 sense_key, asc, ascq;
|
||||
|
||||
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
|
||||
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
|
||||
ata_dev_dbg(dev,
|
||||
"missing result TF: can't generate ATA PT sense data\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use ata_to_sense_error() to map status register bits
|
||||
|
@ -976,14 +987,19 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
|
|||
u64 block;
|
||||
u8 sense_key, asc, ascq;
|
||||
|
||||
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
|
||||
|
||||
if (ata_dev_disabled(dev)) {
|
||||
/* Device disabled after error recovery */
|
||||
/* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */
|
||||
ata_scsi_set_sense(dev, cmd, NOT_READY, 0x04, 0x21);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
|
||||
ata_dev_dbg(dev,
|
||||
"missing result TF: can't generate sense data\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Use ata_to_sense_error() to map status register bits
|
||||
* onto sense key, asc & ascq.
|
||||
*/
|
||||
|
|
|
@ -541,7 +541,8 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
|
|||
|
||||
while (sg_len) {
|
||||
/* table overflow should never happen */
|
||||
BUG_ON (pi++ >= MAX_DCMDS);
|
||||
if (WARN_ON_ONCE(pi >= MAX_DCMDS))
|
||||
return AC_ERR_SYSTEM;
|
||||
|
||||
len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
|
||||
table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
|
||||
|
@ -553,11 +554,13 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
|
|||
addr += len;
|
||||
sg_len -= len;
|
||||
++table;
|
||||
++pi;
|
||||
}
|
||||
}
|
||||
|
||||
/* Should never happen according to Tejun */
|
||||
BUG_ON(!pi);
|
||||
if (WARN_ON_ONCE(!pi))
|
||||
return AC_ERR_SYSTEM;
|
||||
|
||||
/* Convert the last command to an input/output */
|
||||
table--;
|
||||
|
|
|
@ -567,6 +567,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
|
|||
grp->id = grp;
|
||||
if (id)
|
||||
grp->id = id;
|
||||
grp->color = 0;
|
||||
|
||||
spin_lock_irqsave(&dev->devres_lock, flags);
|
||||
add_dr(dev, &grp->node[0]);
|
||||
|
|
|
@ -110,7 +110,8 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
|
|||
struct maple_tree *mt = map->cache;
|
||||
MA_STATE(mas, mt, min, max);
|
||||
unsigned long *entry, *lower, *upper;
|
||||
unsigned long lower_index, lower_last;
|
||||
/* initialized to work around false-positive -Wuninitialized warning */
|
||||
unsigned long lower_index = 0, lower_last = 0;
|
||||
unsigned long upper_index, upper_last;
|
||||
int ret = 0;
|
||||
|
||||
|
|
|
@ -2603,6 +2603,8 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
|
|||
mutex_lock(&ub->mutex);
|
||||
if (!ublk_can_use_recovery(ub))
|
||||
goto out_unlock;
|
||||
if (!ub->nr_queues_ready)
|
||||
goto out_unlock;
|
||||
/*
|
||||
* START_RECOVERY is only allowd after:
|
||||
*
|
||||
|
|
|
@ -1326,8 +1326,10 @@ static int btnxpuart_close(struct hci_dev *hdev)
|
|||
|
||||
serdev_device_close(nxpdev->serdev);
|
||||
skb_queue_purge(&nxpdev->txq);
|
||||
kfree_skb(nxpdev->rx_skb);
|
||||
nxpdev->rx_skb = NULL;
|
||||
if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) {
|
||||
kfree_skb(nxpdev->rx_skb);
|
||||
nxpdev->rx_skb = NULL;
|
||||
}
|
||||
clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1342,8 +1344,10 @@ static int btnxpuart_flush(struct hci_dev *hdev)
|
|||
|
||||
cancel_work_sync(&nxpdev->tx_work);
|
||||
|
||||
kfree_skb(nxpdev->rx_skb);
|
||||
nxpdev->rx_skb = NULL;
|
||||
if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) {
|
||||
kfree_skb(nxpdev->rx_skb);
|
||||
nxpdev->rx_skb = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1090,6 +1090,7 @@ static void qca_controller_memdump(struct work_struct *work)
|
|||
qca->memdump_state = QCA_MEMDUMP_COLLECTED;
|
||||
cancel_delayed_work(&qca->ctrl_memdump_timeout);
|
||||
clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
|
||||
clear_bit(QCA_IBS_DISABLED, &qca->flags);
|
||||
mutex_unlock(&qca->hci_memdump_lock);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
|
||||
#define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
|
||||
# define PLL_POST_DIV_SHIFT 8
|
||||
# define PLL_POST_DIV_MASK(p) GENMASK((p)->width, 0)
|
||||
# define PLL_POST_DIV_MASK(p) GENMASK((p)->width - 1, 0)
|
||||
# define PLL_ALPHA_EN BIT(24)
|
||||
# define PLL_ALPHA_MODE BIT(25)
|
||||
# define PLL_VCO_SHIFT 20
|
||||
|
@ -1478,8 +1478,8 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||
}
|
||||
|
||||
return regmap_update_bits(regmap, PLL_USER_CTL(pll),
|
||||
PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT,
|
||||
val << PLL_POST_DIV_SHIFT);
|
||||
PLL_POST_DIV_MASK(pll) << pll->post_div_shift,
|
||||
val << pll->post_div_shift);
|
||||
}
|
||||
|
||||
const struct clk_ops clk_alpha_pll_postdiv_trion_ops = {
|
||||
|
|
|
@ -176,6 +176,7 @@ extern const struct clk_ops clk_byte2_ops;
|
|||
extern const struct clk_ops clk_pixel_ops;
|
||||
extern const struct clk_ops clk_gfx3d_ops;
|
||||
extern const struct clk_ops clk_rcg2_shared_ops;
|
||||
extern const struct clk_ops clk_rcg2_shared_no_init_park_ops;
|
||||
extern const struct clk_ops clk_dp_ops;
|
||||
|
||||
struct clk_rcg_dfs_data {
|
||||
|
|
|
@ -1182,6 +1182,36 @@ const struct clk_ops clk_rcg2_shared_ops = {
|
|||
};
|
||||
EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
|
||||
|
||||
static int clk_rcg2_shared_no_init_park(struct clk_hw *hw)
|
||||
{
|
||||
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
|
||||
|
||||
/*
|
||||
* Read the config register so that the parent is properly mapped at
|
||||
* registration time.
|
||||
*/
|
||||
regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Like clk_rcg2_shared_ops but skip the init so that the clk frequency is left
|
||||
* unchanged at registration time.
|
||||
*/
|
||||
const struct clk_ops clk_rcg2_shared_no_init_park_ops = {
|
||||
.init = clk_rcg2_shared_no_init_park,
|
||||
.enable = clk_rcg2_shared_enable,
|
||||
.disable = clk_rcg2_shared_disable,
|
||||
.get_parent = clk_rcg2_shared_get_parent,
|
||||
.set_parent = clk_rcg2_shared_set_parent,
|
||||
.recalc_rate = clk_rcg2_shared_recalc_rate,
|
||||
.determine_rate = clk_rcg2_determine_rate,
|
||||
.set_rate = clk_rcg2_shared_set_rate,
|
||||
.set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(clk_rcg2_shared_no_init_park_ops);
|
||||
|
||||
/* Common APIs to be used for DFS based RCGR */
|
||||
static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
|
||||
struct freq_tbl *f)
|
||||
|
|
|
@ -65,7 +65,7 @@ static const struct clk_parent_data gcc_sleep_clk_data[] = {
|
|||
|
||||
static struct clk_alpha_pll gpll0_main = {
|
||||
.offset = 0x20000,
|
||||
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
|
||||
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
|
||||
.clkr = {
|
||||
.enable_reg = 0x0b000,
|
||||
.enable_mask = BIT(0),
|
||||
|
@ -93,7 +93,7 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
|
|||
|
||||
static struct clk_alpha_pll_postdiv gpll0 = {
|
||||
.offset = 0x20000,
|
||||
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
|
||||
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
|
||||
.width = 4,
|
||||
.clkr.hw.init = &(const struct clk_init_data) {
|
||||
.name = "gpll0",
|
||||
|
@ -107,7 +107,7 @@ static struct clk_alpha_pll_postdiv gpll0 = {
|
|||
|
||||
static struct clk_alpha_pll gpll4_main = {
|
||||
.offset = 0x22000,
|
||||
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
|
||||
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
|
||||
.clkr = {
|
||||
.enable_reg = 0x0b000,
|
||||
.enable_mask = BIT(2),
|
||||
|
@ -122,7 +122,7 @@ static struct clk_alpha_pll gpll4_main = {
|
|||
|
||||
static struct clk_alpha_pll_postdiv gpll4 = {
|
||||
.offset = 0x22000,
|
||||
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
|
||||
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
|
||||
.width = 4,
|
||||
.clkr.hw.init = &(const struct clk_init_data) {
|
||||
.name = "gpll4",
|
||||
|
@ -136,7 +136,7 @@ static struct clk_alpha_pll_postdiv gpll4 = {
|
|||
|
||||
static struct clk_alpha_pll gpll2_main = {
|
||||
.offset = 0x21000,
|
||||
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
|
||||
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
|
||||
.clkr = {
|
||||
.enable_reg = 0x0b000,
|
||||
.enable_mask = BIT(1),
|
||||
|
@ -151,7 +151,7 @@ static struct clk_alpha_pll gpll2_main = {
|
|||
|
||||
static struct clk_alpha_pll_postdiv gpll2 = {
|
||||
.offset = 0x21000,
|
||||
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
|
||||
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
|
||||
.width = 4,
|
||||
.clkr.hw.init = &(const struct clk_init_data) {
|
||||
.name = "gpll2",
|
||||
|
|
|
@ -536,7 +536,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s0_clk_src = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -551,7 +551,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s1_clk_src = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -566,7 +566,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s2_clk_src = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -581,7 +581,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s3_clk_src = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -596,7 +596,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s4_clk_src = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -611,7 +611,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s5_clk_src = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -626,7 +626,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s6_clk_src = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -641,7 +641,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s7_clk_src = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -656,7 +656,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s8_clk_src = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -671,7 +671,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s9_clk_src = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -700,7 +700,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
};
|
||||
|
||||
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
|
||||
|
@ -717,7 +717,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
};
|
||||
|
||||
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
|
||||
|
@ -750,7 +750,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
};
|
||||
|
||||
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
|
||||
|
@ -767,7 +767,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
};
|
||||
|
||||
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
|
||||
|
@ -784,7 +784,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
};
|
||||
|
||||
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
|
||||
|
@ -801,7 +801,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
};
|
||||
|
||||
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
|
||||
|
@ -818,7 +818,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
};
|
||||
|
||||
static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
|
||||
|
@ -835,7 +835,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
};
|
||||
|
||||
static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
|
||||
|
@ -852,7 +852,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
};
|
||||
|
||||
static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
|
||||
|
@ -869,7 +869,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
};
|
||||
|
||||
static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
|
||||
|
@ -886,7 +886,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
};
|
||||
|
||||
static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
|
||||
|
@ -903,7 +903,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
};
|
||||
|
||||
static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
|
||||
|
@ -920,7 +920,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
};
|
||||
|
||||
static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
|
||||
|
@ -937,7 +937,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
};
|
||||
|
||||
static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
|
||||
|
@ -975,7 +975,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
|
|||
.parent_data = gcc_parent_data_8,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_8),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
};
|
||||
|
||||
static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
|
||||
|
@ -992,7 +992,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
};
|
||||
|
||||
static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
|
||||
|
@ -1159,7 +1159,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
|
|||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
.ops = &clk_rcg2_shared_no_init_park_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -385,6 +385,32 @@ int jh7110_reset_controller_register(struct jh71x0_clk_priv *priv,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(jh7110_reset_controller_register);
|
||||
|
||||
/*
|
||||
* This clock notifier is called when the rate of PLL0 clock is to be changed.
|
||||
* The cpu_root clock should save the curent parent clock and switch its parent
|
||||
* clock to osc before PLL0 rate will be changed. Then switch its parent clock
|
||||
* back after the PLL0 rate is completed.
|
||||
*/
|
||||
static int jh7110_pll0_clk_notifier_cb(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct jh71x0_clk_priv *priv = container_of(nb, struct jh71x0_clk_priv, pll_clk_nb);
|
||||
struct clk *cpu_root = priv->reg[JH7110_SYSCLK_CPU_ROOT].hw.clk;
|
||||
int ret = 0;
|
||||
|
||||
if (action == PRE_RATE_CHANGE) {
|
||||
struct clk *osc = clk_get(priv->dev, "osc");
|
||||
|
||||
priv->original_clk = clk_get_parent(cpu_root);
|
||||
ret = clk_set_parent(cpu_root, osc);
|
||||
clk_put(osc);
|
||||
} else if (action == POST_RATE_CHANGE) {
|
||||
ret = clk_set_parent(cpu_root, priv->original_clk);
|
||||
}
|
||||
|
||||
return notifier_from_errno(ret);
|
||||
}
|
||||
|
||||
static int __init jh7110_syscrg_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct jh71x0_clk_priv *priv;
|
||||
|
@ -413,7 +439,10 @@ static int __init jh7110_syscrg_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(priv->pll[0]))
|
||||
return PTR_ERR(priv->pll[0]);
|
||||
} else {
|
||||
clk_put(pllclk);
|
||||
priv->pll_clk_nb.notifier_call = jh7110_pll0_clk_notifier_cb;
|
||||
ret = clk_notifier_register(pllclk, &priv->pll_clk_nb);
|
||||
if (ret)
|
||||
return ret;
|
||||
priv->pll[0] = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -114,6 +114,8 @@ struct jh71x0_clk_priv {
|
|||
spinlock_t rmw_lock;
|
||||
struct device *dev;
|
||||
void __iomem *base;
|
||||
struct clk *original_clk;
|
||||
struct notifier_block pll_clk_nb;
|
||||
struct clk_hw *pll[3];
|
||||
struct jh71x0_clk reg[];
|
||||
};
|
||||
|
|
|
@ -83,20 +83,28 @@ static u64 notrace tpm_read_sched_clock(void)
|
|||
static int tpm_set_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
unsigned long next, now;
|
||||
unsigned long next, prev, now;
|
||||
|
||||
next = tpm_read_counter();
|
||||
next += delta;
|
||||
prev = tpm_read_counter();
|
||||
next = prev + delta;
|
||||
writel(next, timer_base + TPM_C0V);
|
||||
now = tpm_read_counter();
|
||||
|
||||
/*
|
||||
* Need to wait CNT increase at least 1 cycle to make sure
|
||||
* the C0V has been updated into HW.
|
||||
*/
|
||||
if ((next & 0xffffffff) != readl(timer_base + TPM_C0V))
|
||||
while (now == tpm_read_counter())
|
||||
;
|
||||
|
||||
/*
|
||||
* NOTE: We observed in a very small probability, the bus fabric
|
||||
* contention between GPU and A7 may results a few cycles delay
|
||||
* of writing CNT registers which may cause the min_delta event got
|
||||
* missed, so we need add a ETIME check here in case it happened.
|
||||
*/
|
||||
return (int)(next - now) <= 0 ? -ETIME : 0;
|
||||
return (now - prev) >= delta ? -ETIME : 0;
|
||||
}
|
||||
|
||||
static int tpm_set_state_oneshot(struct clock_event_device *evt)
|
||||
|
|
|
@ -25,10 +25,7 @@ static void timer_of_irq_exit(struct of_timer_irq *of_irq)
|
|||
|
||||
struct clock_event_device *clkevt = &to->clkevt;
|
||||
|
||||
if (of_irq->percpu)
|
||||
free_percpu_irq(of_irq->irq, clkevt);
|
||||
else
|
||||
free_irq(of_irq->irq, clkevt);
|
||||
free_irq(of_irq->irq, clkevt);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -42,9 +39,6 @@ static void timer_of_irq_exit(struct of_timer_irq *of_irq)
|
|||
* - Get interrupt number by name
|
||||
* - Get interrupt number by index
|
||||
*
|
||||
* When the interrupt is per CPU, 'request_percpu_irq()' is called,
|
||||
* otherwise 'request_irq()' is used.
|
||||
*
|
||||
* Returns 0 on success, < 0 otherwise
|
||||
*/
|
||||
static int timer_of_irq_init(struct device_node *np,
|
||||
|
@ -69,12 +63,9 @@ static int timer_of_irq_init(struct device_node *np,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = of_irq->percpu ?
|
||||
request_percpu_irq(of_irq->irq, of_irq->handler,
|
||||
np->full_name, clkevt) :
|
||||
request_irq(of_irq->irq, of_irq->handler,
|
||||
of_irq->flags ? of_irq->flags : IRQF_TIMER,
|
||||
np->full_name, clkevt);
|
||||
ret = request_irq(of_irq->irq, of_irq->handler,
|
||||
of_irq->flags ? of_irq->flags : IRQF_TIMER,
|
||||
np->full_name, clkevt);
|
||||
if (ret) {
|
||||
pr_err("Failed to request irq %d for %pOF\n", of_irq->irq, np);
|
||||
return ret;
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
struct of_timer_irq {
|
||||
int irq;
|
||||
int index;
|
||||
int percpu;
|
||||
const char *name;
|
||||
unsigned long flags;
|
||||
irq_handler_t handler;
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <linux/static_call.h>
|
||||
#include <linux/amd-pstate.h>
|
||||
#include <linux/topology.h>
|
||||
|
||||
#include <acpi/processor.h>
|
||||
#include <acpi/cppc_acpi.h>
|
||||
|
@ -49,6 +50,8 @@
|
|||
|
||||
#define AMD_PSTATE_TRANSITION_LATENCY 20000
|
||||
#define AMD_PSTATE_TRANSITION_DELAY 1000
|
||||
#define CPPC_HIGHEST_PERF_PERFORMANCE 196
|
||||
#define CPPC_HIGHEST_PERF_DEFAULT 166
|
||||
|
||||
/*
|
||||
* TODO: We need more time to fine tune processors with shared memory solution
|
||||
|
@ -64,6 +67,7 @@ static struct cpufreq_driver amd_pstate_driver;
|
|||
static struct cpufreq_driver amd_pstate_epp_driver;
|
||||
static int cppc_state = AMD_PSTATE_UNDEFINED;
|
||||
static bool cppc_enabled;
|
||||
static bool amd_pstate_prefcore = true;
|
||||
|
||||
/*
|
||||
* AMD Energy Preference Performance (EPP)
|
||||
|
@ -310,6 +314,21 @@ static inline int amd_pstate_enable(bool enable)
|
|||
return static_call(amd_pstate_enable)(enable);
|
||||
}
|
||||
|
||||
static u32 amd_pstate_highest_perf_set(struct amd_cpudata *cpudata)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
/*
|
||||
* For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f,
|
||||
* the highest performance level is set to 196.
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=218759
|
||||
*/
|
||||
if (c->x86 == 0x19 && (c->x86_model >= 0x70 && c->x86_model <= 0x7f))
|
||||
return CPPC_HIGHEST_PERF_PERFORMANCE;
|
||||
|
||||
return CPPC_HIGHEST_PERF_DEFAULT;
|
||||
}
|
||||
|
||||
static int pstate_init_perf(struct amd_cpudata *cpudata)
|
||||
{
|
||||
u64 cap1;
|
||||
|
@ -320,13 +339,14 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* TODO: Introduce AMD specific power feature.
|
||||
*
|
||||
* CPPC entry doesn't indicate the highest performance in some ASICs.
|
||||
/* For platforms that do not support the preferred core feature, the
|
||||
* highest_pef may be configured with 166 or 255, to avoid max frequency
|
||||
* calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1) value as
|
||||
* the default max perf.
|
||||
*/
|
||||
highest_perf = amd_get_highest_perf();
|
||||
if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
|
||||
if (cpudata->hw_prefcore)
|
||||
highest_perf = amd_pstate_highest_perf_set(cpudata);
|
||||
else
|
||||
highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
|
||||
|
||||
WRITE_ONCE(cpudata->highest_perf, highest_perf);
|
||||
|
@ -347,8 +367,9 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
highest_perf = amd_get_highest_perf();
|
||||
if (highest_perf > cppc_perf.highest_perf)
|
||||
if (cpudata->hw_prefcore)
|
||||
highest_perf = amd_pstate_highest_perf_set(cpudata);
|
||||
else
|
||||
highest_perf = cppc_perf.highest_perf;
|
||||
|
||||
WRITE_ONCE(cpudata->highest_perf, highest_perf);
|
||||
|
@ -709,6 +730,80 @@ static void amd_perf_ctl_reset(unsigned int cpu)
|
|||
wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks
|
||||
* due to locking, so queue the work for later.
|
||||
*/
|
||||
static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
|
||||
{
|
||||
sched_set_itmt_support();
|
||||
}
|
||||
static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
|
||||
|
||||
/*
|
||||
* Get the highest performance register value.
|
||||
* @cpu: CPU from which to get highest performance.
|
||||
* @highest_perf: Return address.
|
||||
*
|
||||
* Return: 0 for success, -EIO otherwise.
|
||||
*/
|
||||
static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_CPPC)) {
|
||||
u64 cap1;
|
||||
|
||||
ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
|
||||
if (ret)
|
||||
return ret;
|
||||
WRITE_ONCE(*highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
|
||||
} else {
|
||||
u64 cppc_highest_perf;
|
||||
|
||||
ret = cppc_get_highest_perf(cpu, &cppc_highest_perf);
|
||||
if (ret)
|
||||
return ret;
|
||||
WRITE_ONCE(*highest_perf, cppc_highest_perf);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
#define CPPC_MAX_PERF U8_MAX
|
||||
|
||||
static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
|
||||
{
|
||||
int ret, prio;
|
||||
u32 highest_perf;
|
||||
|
||||
ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
cpudata->hw_prefcore = true;
|
||||
/* check if CPPC preferred core feature is enabled*/
|
||||
if (highest_perf < CPPC_MAX_PERF)
|
||||
prio = (int)highest_perf;
|
||||
else {
|
||||
pr_debug("AMD CPPC preferred core is unsupported!\n");
|
||||
cpudata->hw_prefcore = false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!amd_pstate_prefcore)
|
||||
return;
|
||||
|
||||
/*
|
||||
* The priorities can be set regardless of whether or not
|
||||
* sched_set_itmt_support(true) has been called and it is valid to
|
||||
* update them at any time after it has been called.
|
||||
*/
|
||||
sched_set_itmt_core_prio(prio, cpudata->cpu);
|
||||
|
||||
schedule_work(&sched_prefcore_work);
|
||||
}
|
||||
|
||||
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
|
||||
|
@ -730,6 +825,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
|
|||
|
||||
cpudata->cpu = policy->cpu;
|
||||
|
||||
amd_pstate_init_prefcore(cpudata);
|
||||
|
||||
ret = amd_pstate_init_perf(cpudata);
|
||||
if (ret)
|
||||
goto free_cpudata1;
|
||||
|
@ -880,6 +977,17 @@ static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
|
|||
return sysfs_emit(buf, "%u\n", perf);
|
||||
}
|
||||
|
||||
static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
|
||||
char *buf)
|
||||
{
|
||||
bool hw_prefcore;
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
|
||||
hw_prefcore = READ_ONCE(cpudata->hw_prefcore);
|
||||
|
||||
return sysfs_emit(buf, "%s\n", str_enabled_disabled(hw_prefcore));
|
||||
}
|
||||
|
||||
static ssize_t show_energy_performance_available_preferences(
|
||||
struct cpufreq_policy *policy, char *buf)
|
||||
{
|
||||
|
@ -1077,18 +1185,27 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
|
|||
return ret < 0 ? ret : count;
|
||||
}
|
||||
|
||||
static ssize_t prefcore_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore));
|
||||
}
|
||||
|
||||
cpufreq_freq_attr_ro(amd_pstate_max_freq);
|
||||
cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
|
||||
|
||||
cpufreq_freq_attr_ro(amd_pstate_highest_perf);
|
||||
cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
|
||||
cpufreq_freq_attr_rw(energy_performance_preference);
|
||||
cpufreq_freq_attr_ro(energy_performance_available_preferences);
|
||||
static DEVICE_ATTR_RW(status);
|
||||
static DEVICE_ATTR_RO(prefcore);
|
||||
|
||||
static struct freq_attr *amd_pstate_attr[] = {
|
||||
&amd_pstate_max_freq,
|
||||
&amd_pstate_lowest_nonlinear_freq,
|
||||
&amd_pstate_highest_perf,
|
||||
&amd_pstate_hw_prefcore,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -1096,6 +1213,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
|
|||
&amd_pstate_max_freq,
|
||||
&amd_pstate_lowest_nonlinear_freq,
|
||||
&amd_pstate_highest_perf,
|
||||
&amd_pstate_hw_prefcore,
|
||||
&energy_performance_preference,
|
||||
&energy_performance_available_preferences,
|
||||
NULL,
|
||||
|
@ -1103,6 +1221,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
|
|||
|
||||
static struct attribute *pstate_global_attributes[] = {
|
||||
&dev_attr_status.attr,
|
||||
&dev_attr_prefcore.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -1154,6 +1273,8 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
|
|||
cpudata->cpu = policy->cpu;
|
||||
cpudata->epp_policy = 0;
|
||||
|
||||
amd_pstate_init_prefcore(cpudata);
|
||||
|
||||
ret = amd_pstate_init_perf(cpudata);
|
||||
if (ret)
|
||||
goto free_cpudata1;
|
||||
|
@ -1577,7 +1698,17 @@ static int __init amd_pstate_param(char *str)
|
|||
|
||||
return amd_pstate_set_driver(mode_idx);
|
||||
}
|
||||
|
||||
static int __init amd_prefcore_param(char *str)
|
||||
{
|
||||
if (!strcmp(str, "disable"))
|
||||
amd_pstate_prefcore = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
early_param("amd_pstate", amd_pstate_param);
|
||||
early_param("amd_prefcore", amd_prefcore_param);
|
||||
|
||||
MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
|
||||
MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
|
||||
|
|
|
@ -100,7 +100,9 @@ static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
|
|||
errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
|
||||
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
|
||||
|
||||
errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
|
||||
/* Update only section of errmsk3 related to VF2PF */
|
||||
errmsk3 &= ~ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
|
||||
errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
|
||||
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
|
||||
|
||||
/* Return the sources of the (new) interrupt(s) */
|
||||
|
|
|
@ -191,8 +191,12 @@ static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
|
|||
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
|
||||
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
|
||||
|
||||
errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
|
||||
errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
|
||||
/* Update only section of errmsk3 and errmsk5 related to VF2PF */
|
||||
errmsk3 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
|
||||
errmsk5 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
|
||||
|
||||
errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
|
||||
errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
|
||||
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
|
||||
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#define MAX_KEY_SIZE SHA512_BLOCK_SIZE
|
||||
#define STARFIVE_AES_IV_LEN AES_BLOCK_SIZE
|
||||
#define STARFIVE_AES_CTR_LEN AES_BLOCK_SIZE
|
||||
#define STARFIVE_RSA_MAX_KEYSZ 256
|
||||
|
||||
union starfive_aes_csr {
|
||||
u32 v;
|
||||
|
@ -212,12 +213,11 @@ struct starfive_cryp_request_ctx {
|
|||
struct scatterlist *out_sg;
|
||||
struct ahash_request ahash_fbk_req;
|
||||
size_t total;
|
||||
size_t nents;
|
||||
unsigned int blksize;
|
||||
unsigned int digsize;
|
||||
unsigned long in_sg_len;
|
||||
unsigned char *adata;
|
||||
u8 rsa_data[] __aligned(sizeof(u32));
|
||||
u8 rsa_data[STARFIVE_RSA_MAX_KEYSZ] __aligned(sizeof(u32));
|
||||
};
|
||||
|
||||
struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx);
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
// A * A * R mod N ==> A
|
||||
#define CRYPTO_CMD_AARN 0x7
|
||||
|
||||
#define STARFIVE_RSA_MAX_KEYSZ 256
|
||||
#define STARFIVE_RSA_RESET 0x2
|
||||
|
||||
static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx)
|
||||
|
@ -91,7 +90,7 @@ static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx,
|
|||
{
|
||||
struct starfive_cryp_dev *cryp = ctx->cryp;
|
||||
struct starfive_cryp_request_ctx *rctx = ctx->rctx;
|
||||
int count = rctx->total / sizeof(u32) - 1;
|
||||
int count = (ALIGN(rctx->total, 4) / 4) - 1;
|
||||
int loop;
|
||||
u32 temp;
|
||||
u8 opsize;
|
||||
|
@ -274,12 +273,17 @@ static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc)
|
|||
struct starfive_cryp_dev *cryp = ctx->cryp;
|
||||
struct starfive_cryp_request_ctx *rctx = ctx->rctx;
|
||||
struct starfive_rsa_key *key = &ctx->rsa_key;
|
||||
int ret = 0;
|
||||
int ret = 0, shift = 0;
|
||||
|
||||
writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET);
|
||||
|
||||
rctx->total = sg_copy_to_buffer(rctx->in_sg, rctx->nents,
|
||||
rctx->rsa_data, rctx->total);
|
||||
if (!IS_ALIGNED(rctx->total, sizeof(u32))) {
|
||||
shift = sizeof(u32) - (rctx->total & 0x3);
|
||||
memset(rctx->rsa_data, 0, shift);
|
||||
}
|
||||
|
||||
rctx->total = sg_copy_to_buffer(rctx->in_sg, sg_nents(rctx->in_sg),
|
||||
rctx->rsa_data + shift, rctx->total);
|
||||
|
||||
if (enc) {
|
||||
key->bitlen = key->e_bitlen;
|
||||
|
@ -329,7 +333,6 @@ static int starfive_rsa_enc(struct akcipher_request *req)
|
|||
rctx->in_sg = req->src;
|
||||
rctx->out_sg = req->dst;
|
||||
rctx->total = req->src_len;
|
||||
rctx->nents = sg_nents(rctx->in_sg);
|
||||
ctx->rctx = rctx;
|
||||
|
||||
return starfive_rsa_enc_core(ctx, 1);
|
||||
|
|
|
@ -1528,10 +1528,13 @@ static int cxl_region_attach_position(struct cxl_region *cxlr,
|
|||
const struct cxl_dport *dport, int pos)
|
||||
{
|
||||
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
|
||||
struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
|
||||
struct cxl_decoder *cxld = &cxlsd->cxld;
|
||||
int iw = cxld->interleave_ways;
|
||||
struct cxl_port *iter;
|
||||
int rc;
|
||||
|
||||
if (cxlrd->calc_hb(cxlrd, pos) != dport) {
|
||||
if (dport != cxlrd->cxlsd.target[pos % iw]) {
|
||||
dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
|
||||
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
|
||||
dev_name(&cxlrd->cxlsd.cxld.dev));
|
||||
|
|
|
@ -796,6 +796,9 @@ int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl,
|
|||
|
||||
lockdep_assert_held(&ctl->dsp->pwr_lock);
|
||||
|
||||
if (ctl->flags && !(ctl->flags & WMFW_CTL_FLAG_WRITEABLE))
|
||||
return -EPERM;
|
||||
|
||||
if (len + off * sizeof(u32) > ctl->len)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -713,6 +713,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
|
|||
return -ENODEV;
|
||||
|
||||
pctldev = of_pinctrl_get(pctlnp);
|
||||
of_node_put(pctlnp);
|
||||
if (!pctldev)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
|
|
|
@ -146,6 +146,7 @@ static const struct of_device_id modepin_platform_id[] = {
|
|||
{ .compatible = "xlnx,zynqmp-gpio-modepin", },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, modepin_platform_id);
|
||||
|
||||
static struct platform_driver modepin_platform_driver = {
|
||||
.driver = {
|
||||
|
|
|
@ -1096,6 +1096,21 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
|||
unsigned int i;
|
||||
int r;
|
||||
|
||||
/*
|
||||
* We can't use gang submit on with reserved VMIDs when the VM changes
|
||||
* can't be invalidated by more than one engine at the same time.
|
||||
*/
|
||||
if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) {
|
||||
for (i = 0; i < p->gang_size; ++i) {
|
||||
struct drm_sched_entity *entity = p->entities[i];
|
||||
struct drm_gpu_scheduler *sched = entity->rq->sched;
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(sched);
|
||||
|
||||
if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -909,8 +909,7 @@ static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
|
|||
{
|
||||
u64 micro_tile_mode;
|
||||
|
||||
/* Zero swizzle mode means linear */
|
||||
if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
|
||||
if (AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) == 1) /* LINEAR_ALIGNED */
|
||||
return 0;
|
||||
|
||||
micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
|
||||
|
@ -1034,6 +1033,30 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
|
|||
block_width = 256 / format_info->cpp[i];
|
||||
block_height = 1;
|
||||
block_size_log2 = 8;
|
||||
} else if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX12) {
|
||||
int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
|
||||
|
||||
switch (swizzle) {
|
||||
case AMD_FMT_MOD_TILE_GFX12_256B_2D:
|
||||
block_size_log2 = 8;
|
||||
break;
|
||||
case AMD_FMT_MOD_TILE_GFX12_4K_2D:
|
||||
block_size_log2 = 12;
|
||||
break;
|
||||
case AMD_FMT_MOD_TILE_GFX12_64K_2D:
|
||||
block_size_log2 = 16;
|
||||
break;
|
||||
case AMD_FMT_MOD_TILE_GFX12_256K_2D:
|
||||
block_size_log2 = 18;
|
||||
break;
|
||||
default:
|
||||
drm_dbg_kms(rfb->base.dev,
|
||||
"Gfx12 swizzle mode with unknown block size: %d\n", swizzle);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
get_block_dimensions(block_size_log2, format_info->cpp[i],
|
||||
&block_width, &block_height);
|
||||
} else {
|
||||
int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
|
||||
|
||||
|
@ -1069,7 +1092,8 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (AMD_FMT_MOD_GET(DCC, modifier)) {
|
||||
if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11 &&
|
||||
AMD_FMT_MOD_GET(DCC, modifier)) {
|
||||
if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
|
||||
block_size_log2 = get_dcc_block_size(modifier, false, false);
|
||||
get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
|
||||
|
|
|
@ -409,7 +409,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|||
if (r || !idle)
|
||||
goto error;
|
||||
|
||||
if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) {
|
||||
if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
|
||||
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
|
||||
if (r || !id)
|
||||
goto error;
|
||||
|
@ -459,6 +459,19 @@ error:
|
|||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
|
||||
* @vm: the VM to check
|
||||
* @vmhub: the VMHUB which will be used
|
||||
*
|
||||
* Returns: True if the VM will use a reserved VMID.
|
||||
*/
|
||||
bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
|
||||
{
|
||||
return vm->reserved_vmid[vmhub] ||
|
||||
(enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)));
|
||||
}
|
||||
|
||||
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
|
||||
unsigned vmhub)
|
||||
{
|
||||
|
|
|
@ -78,6 +78,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
|
|||
|
||||
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
|
||||
struct amdgpu_vmid *id);
|
||||
bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
|
||||
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
|
||||
unsigned vmhub);
|
||||
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
|
||||
|
|
|
@ -137,8 +137,10 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
|
|||
|
||||
if (virt->ops && virt->ops->req_full_gpu) {
|
||||
r = virt->ops->req_full_gpu(adev, init);
|
||||
if (r)
|
||||
if (r) {
|
||||
adev->no_hw_access = true;
|
||||
return r;
|
||||
}
|
||||
|
||||
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
|
||||
}
|
||||
|
|
|
@ -4269,11 +4269,11 @@ static int gfx_v11_0_hw_init(void *handle)
|
|||
/* RLC autoload sequence 1: Program rlc ram */
|
||||
if (adev->gfx.imu.funcs->program_rlc_ram)
|
||||
adev->gfx.imu.funcs->program_rlc_ram(adev);
|
||||
/* rlc autoload firmware */
|
||||
r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
/* rlc autoload firmware */
|
||||
r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
|
||||
if (r)
|
||||
return r;
|
||||
} else {
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
|
||||
|
|
|
@ -135,6 +135,34 @@ static int ih_v6_0_toggle_ring_interrupts(struct amdgpu_device *adev,
|
|||
|
||||
tmp = RREG32(ih_regs->ih_rb_cntl);
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
|
||||
|
||||
if (enable) {
|
||||
/* Unset the CLEAR_OVERFLOW bit to make sure the next step
|
||||
* is switching the bit from 0 to 1
|
||||
*/
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
|
||||
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
|
||||
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
|
||||
return -ETIMEDOUT;
|
||||
} else {
|
||||
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
|
||||
}
|
||||
|
||||
/* Clear RB_OVERFLOW bit */
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
|
||||
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
|
||||
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
|
||||
return -ETIMEDOUT;
|
||||
} else {
|
||||
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
|
||||
}
|
||||
|
||||
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
|
||||
* can be detected.
|
||||
*/
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
|
||||
}
|
||||
|
||||
/* enable_intr field is only valid in ring0 */
|
||||
if (ih == &adev->irq.ih)
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
|
||||
|
|
|
@ -6937,7 +6937,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
|
|||
}
|
||||
}
|
||||
|
||||
if (j == dc_state->stream_count)
|
||||
if (j == dc_state->stream_count || pbn_div == 0)
|
||||
continue;
|
||||
|
||||
slot_num = DIV_ROUND_UP(pbn, pbn_div);
|
||||
|
|
|
@ -629,14 +629,14 @@ static bool construct_phy(struct dc_link *link,
|
|||
link->link_enc =
|
||||
link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data);
|
||||
|
||||
DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
|
||||
DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
|
||||
|
||||
if (!link->link_enc) {
|
||||
DC_ERROR("Failed to create link encoder!\n");
|
||||
goto link_enc_create_fail;
|
||||
}
|
||||
|
||||
DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
|
||||
DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
|
||||
|
||||
/* Update link encoder tracking variables. These are used for the dynamic
|
||||
* assignment of link encoders to streams.
|
||||
*/
|
||||
|
|
|
@ -433,17 +433,20 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
|
|||
}
|
||||
|
||||
if (status == MOD_HDCP_STATUS_SUCCESS)
|
||||
mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
|
||||
if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
|
||||
&input->bstatus_read, &status,
|
||||
hdcp, "bstatus_read");
|
||||
hdcp, "bstatus_read"))
|
||||
goto out;
|
||||
if (status == MOD_HDCP_STATUS_SUCCESS)
|
||||
mod_hdcp_execute_and_set(check_link_integrity_dp,
|
||||
if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
|
||||
&input->link_integrity_check, &status,
|
||||
hdcp, "link_integrity_check");
|
||||
hdcp, "link_integrity_check"))
|
||||
goto out;
|
||||
if (status == MOD_HDCP_STATUS_SUCCESS)
|
||||
mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
|
||||
if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
|
||||
&input->reauth_request_check, &status,
|
||||
hdcp, "reauth_request_check");
|
||||
hdcp, "reauth_request_check"))
|
||||
goto out;
|
||||
out:
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -1883,7 +1883,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
|
|||
smu_dpm_ctx->dpm_level = level;
|
||||
}
|
||||
|
||||
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
|
||||
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
|
||||
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
|
||||
index = fls(smu->workload_mask);
|
||||
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
|
||||
workload[0] = smu->workload_setting[index];
|
||||
|
@ -1962,7 +1963,8 @@ static int smu_switch_power_profile(void *handle,
|
|||
workload[0] = smu->workload_setting[index];
|
||||
}
|
||||
|
||||
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
|
||||
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
|
||||
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
|
||||
smu_bump_power_profile_mode(smu, workload, 0);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -304,7 +304,7 @@ void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
|
|||
{
|
||||
struct intel_gt *gt = gsc_uc_to_gt(gsc);
|
||||
|
||||
if (!intel_uc_fw_is_loadable(&gsc->fw))
|
||||
if (!intel_uc_fw_is_loadable(&gsc->fw) || intel_uc_fw_is_in_error(&gsc->fw))
|
||||
return;
|
||||
|
||||
if (intel_gsc_uc_fw_init_done(gsc))
|
||||
|
|
|
@ -258,6 +258,11 @@ static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw)
|
|||
return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING;
|
||||
}
|
||||
|
||||
static inline bool intel_uc_fw_is_in_error(struct intel_uc_fw *uc_fw)
|
||||
{
|
||||
return intel_uc_fw_status_to_error(__intel_uc_fw_status(uc_fw)) != 0;
|
||||
}
|
||||
|
||||
static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
|
||||
{
|
||||
return uc_fw->user_overridden;
|
||||
|
|
|
@ -51,7 +51,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
|
|||
debug_object_init(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
|
||||
static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
|
||||
static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
|
|||
debug_object_destroy(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
|
||||
static inline void debug_fence_free(struct i915_sw_fence *fence)
|
||||
static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_object_free(fence, &i915_sw_fence_debug_descr);
|
||||
smp_wmb(); /* flush the change in state before reallocation */
|
||||
|
@ -94,7 +94,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
|
||||
static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -115,7 +115,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void debug_fence_free(struct i915_sw_fence *fence)
|
||||
static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -171,11 +171,13 @@ err_hid_data:
|
|||
void amdtp_hid_remove(struct amdtp_cl_data *cli_data)
|
||||
{
|
||||
int i;
|
||||
struct amdtp_hid_data *hid_data;
|
||||
|
||||
for (i = 0; i < cli_data->num_hid_devices; ++i) {
|
||||
if (cli_data->hid_sensor_hubs[i]) {
|
||||
kfree(cli_data->hid_sensor_hubs[i]->driver_data);
|
||||
hid_data = cli_data->hid_sensor_hubs[i]->driver_data;
|
||||
hid_destroy_device(cli_data->hid_sensor_hubs[i]);
|
||||
kfree(hid_data);
|
||||
cli_data->hid_sensor_hubs[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -106,7 +106,7 @@ static void cougar_fix_g6_mapping(void)
|
|||
static __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
||||
unsigned int *rsize)
|
||||
{
|
||||
if (rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
|
||||
if (*rsize >= 117 && rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
|
||||
(rdesc[115] | rdesc[116] << 8) >= HID_MAX_USAGES) {
|
||||
hid_info(hdev,
|
||||
"usage count exceeds max: fixing up report descriptor\n");
|
||||
|
|
|
@ -1962,6 +1962,7 @@ void vmbus_device_unregister(struct hv_device *device_obj)
|
|||
*/
|
||||
device_unregister(&device_obj->device);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_device_unregister);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
/*
|
||||
|
|
|
@ -176,7 +176,7 @@ static ssize_t adc128_in_store(struct device *dev,
|
|||
|
||||
mutex_lock(&data->update_lock);
|
||||
/* 10 mV LSB on limit registers */
|
||||
regval = clamp_val(DIV_ROUND_CLOSEST(val, 10), 0, 255);
|
||||
regval = DIV_ROUND_CLOSEST(clamp_val(val, 0, 2550), 10);
|
||||
data->in[index][nr] = regval << 4;
|
||||
reg = index == 1 ? ADC128_REG_IN_MIN(nr) : ADC128_REG_IN_MAX(nr);
|
||||
i2c_smbus_write_byte_data(data->client, reg, regval);
|
||||
|
@ -214,7 +214,7 @@ static ssize_t adc128_temp_store(struct device *dev,
|
|||
return err;
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
regval = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
|
||||
regval = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
|
||||
data->temp[index] = regval << 1;
|
||||
i2c_smbus_write_byte_data(data->client,
|
||||
index == 1 ? ADC128_REG_TEMP_MAX
|
||||
|
|
|
@ -1637,6 +1637,8 @@ static void hp_wmi_notify(u32 value, void *context)
|
|||
goto out_unlock;
|
||||
|
||||
wobj = out.pointer;
|
||||
if (!wobj)
|
||||
goto out_unlock;
|
||||
|
||||
err = populate_event_from_wobj(dev, &event, wobj);
|
||||
if (err) {
|
||||
|
|
|
@ -301,7 +301,8 @@ static ssize_t tcrit2_store(struct device *dev, struct device_attribute *attr,
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, index ? 255 : 127);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, 0, (index ? 255 : 127) * 1000),
|
||||
1000);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
data->tcrit2[index] = val;
|
||||
|
@ -350,7 +351,7 @@ static ssize_t tcrit1_store(struct device *dev, struct device_attribute *attr,
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 255);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
data->tcrit1[index] = val;
|
||||
|
@ -391,7 +392,7 @@ static ssize_t tcrit1_hyst_store(struct device *dev,
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
val = DIV_ROUND_CLOSEST(val, 1000);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000);
|
||||
val = clamp_val((int)data->tcrit1[index] - val, 0, 31);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
|
@ -431,7 +432,7 @@ static ssize_t offset_store(struct device *dev, struct device_attribute *attr,
|
|||
return ret;
|
||||
|
||||
/* Accuracy is 1/2 degrees C */
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 500), -128, 127);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
data->toffset[index] = val;
|
||||
|
|
|
@ -2262,7 +2262,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr,
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
data->temp_offset[nr] = val;
|
||||
|
|
|
@ -895,7 +895,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
data->target_temp[nr] = val;
|
||||
|
@ -920,7 +920,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
|
|||
return err;
|
||||
|
||||
/* Limit the temp to 0C - 15C */
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 15000), 1000);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user