Merge 6.12.36 into android16-6.12-lts

GKI (arm64) relevant 43 out of 213 changes, affecting 47 files +324/-116
  8a997e1ab5 mailbox: Not protect module_put with spin_lock_irqsave [1 file, +1/-1]
  dcd5b32139 leds: multicolor: Fix intensity setting while SW blinking [1 file, +2/-1]
  3a0f33c420 fuse: fix race between concurrent setattrs from multiple nodes [1 file, +11/-0]
  9f0fa01811 PCI: dwc: Make link training more robust by setting PORT_LOGIC_LINK_WIDTH to one lane [1 file, +1/-4]
  53809d38ec usb: Add checks for snprintf() calls in usb_alloc_dev() [1 file, +10/-4]
  cf69fedbb1 usb: gadget: f_hid: wake up readers on disable/unbind [1 file, +17/-2]
  10cc2cfd3e usb: typec: displayport: Receive DP Status Update NAK request exit dp altmode [1 file, +4/-0]
  1ef2737432 usb: typec: mux: do not return on EOPNOTSUPP in {mux, switch}_set [1 file, +2/-2]
  1818fc3602 ALSA: usb-audio: Add a quirk for Lenovo Thinkpad Thunderbolt 3 dock [1 file, +2/-0]
  ae2353d862 scsi: ufs: core: Don't perform UFS clkscaling during host async scan [1 file, +3/-0]
  0437390086 ovl: Check for NULL d_inode() in ovl_dentry_upper() [1 file, +3/-1]
  4149f0ee5e f2fs: don't over-report free space or inodes in statvfs [1 file, +18/-12]
  61a9ad7b69 af_unix: Don't leave consecutive consumed OOB skbs. [1 file, +11/-2]
  93abf5e0d5 fs/proc/task_mmu: fix PAGE_IS_PFNZERO detection for the huge zero folio [1 file, +1/-1]
  29d39e0d5f lib/group_cpus: fix NULL pointer dereference from group_cpus_evenly() [1 file, +8/-1]
  a4f182ffa3 HID: wacom: fix crash in wacom_aes_battery_handler() [1 file, +1/-0]
  ce23b73f0f Bluetooth: hci_core: Fix use-after-free in vhci_flush() [2 files, +32/-4]
  0ee87c2814 ALSA: usb-audio: Fix out-of-bounds read in snd_usb_get_audioformat_uac3() [1 file, +2/-0]
  b10a795364 attach_recursive_mnt(): do not lock the covering tree when sliding something under it [1 file, +4/-4]
  949060a623 af_unix: Don't set -ECONNRESET for consumed OOB skb. [1 file, +12/-6]
  dbcd546400 vsock/uapi: fix linux/vm_sockets.h userspace compilation errors [1 file, +4/-0]
  104048a4a4 net: selftests: fix TCP packet checksum [1 file, +3/-2]
  2b8788496f serial: core: restore of_node information in sysfs [1 file, +1/-0]
  5a8400ebc2 Bluetooth: L2CAP: Fix L2CAP MTU negotiation [1 file, +8/-1]
  cf95f8426f maple_tree: fix MA_STATE_PREALLOC flag in mas_preallocate() [1 file, +3/-1]
  7b4ac8433c mm/gup: revert "mm: gup: fix infinite loop within __get_longterm_locked" [1 file, +10/-4]
  c465f52333 f2fs: fix to zero post-eof page [1 file, +38/-0]
  a85999b987 HID: wacom: fix memory leak on kobject creation failure [1 file, +3/-1]
  2746d02066 HID: wacom: fix memory leak on sysfs attribute creation failure [1 file, +1/-0]
  70017f56b1 HID: wacom: fix kobject reference count leak [1 file, +1/-0]
  6a87e79404 scsi: ufs: core: Fix clk scaling to be conditional in reset and restore [1 file, +2/-1]
  0519b61075 media: uvcvideo: Rollback non processed entities on error [1 file, +23/-11]
  8b8a366e8c io_uring: fix potential page leak in io_sqe_buffer_register() [1 file, +5/-4]
  53fd75f25b io_uring/rsrc: fix folio unpinning [1 file, +9/-4]
  50998b0ae7 io_uring/rsrc: don't rely on user vaddr alignment [2 files, +5/-1]
  399214d703 io_uring/net: improve recv bundles [1 file, +18/-0]
  0c07f2bf49 io_uring/net: only retry recv bundle for a full transfer [1 file, +10/-4]
  725fcba8bd io_uring/net: only consider msg_inq if larger than 1 [1 file, +2/-2]
  b8be3ae062 io_uring/net: always use current transfer count for buffer put [1 file, +1/-1]
  c8d152b8c1 io_uring/net: mark iov as dynamically allocated even for single segments [1 file, +6/-5]
  560c3b51c7 io_uring/kbuf: flag partial buffer mappings [3 files, +17/-8]
  1f4b030e08 mm/vma: reset VMA iterator on commit_merge() OOM failure [1 file, +8/-19]
  287b9cec2e usb: typec: tcpm: PSSourceOffTimer timeout in PR_Swap enters ERROR_RECOVERY [1 file, +1/-2]

Changes in 6.12.36
	cifs: Correctly set SMB1 SessionKey field in Session Setup Request
	cifs: Fix cifs_query_path_info() for Windows NT servers
	cifs: Fix encoding of SMB1 Session Setup NTLMSSP Request in non-UNICODE mode
	NFSv4: Always set NLINK even if the server doesn't support it
	NFSv4.2: fix listxattr to return selinux security label
	NFSv4.2: fix setattr caching of TIME_[MODIFY|ACCESS]_SET when timestamps are delegated
	mailbox: Not protect module_put with spin_lock_irqsave
	mfd: max14577: Fix wakeup source leaks on device unbind
	sunrpc: don't immediately retransmit on seqno miss
	dm vdo indexer: don't read request structure after enqueuing
	leds: multicolor: Fix intensity setting while SW blinking
	fuse: fix race between concurrent setattrs from multiple nodes
	cxl/region: Add a dev_err() on missing target list entries
	NFSv4: xattr handlers should check for absent nfs filehandles
	hwmon: (pmbus/max34440) Fix support for max34451
	ksmbd: allow a filename to contain special characters on SMB3.1.1 posix extension
	ksmbd: provide zero as a unique ID to the Mac client
	rust: module: place cleanup_module() in .exit.text section
	rust: arm: fix unknown (to Clang) argument '-mno-fdpic'
	dmaengine: idxd: Check availability of workqueue allocated by idxd wq driver before using
	dmaengine: xilinx_dma: Set dma_device directions
	PCI: dwc: Make link training more robust by setting PORT_LOGIC_LINK_WIDTH to one lane
	PCI: apple: Fix missing OF node reference in apple_pcie_setup_port
	PCI: imx6: Add workaround for errata ERR051624
	nvme-tcp: fix I/O stalls on congested sockets
	nvme-tcp: sanitize request list handling
	md/md-bitmap: fix dm-raid max_write_behind setting
	amd/amdkfd: fix a kfd_process ref leak
	bcache: fix NULL pointer in cache_set_flush()
	drm/amdgpu: seq64 memory unmap uses uninterruptible lock
	drm/scheduler: signal scheduled fence when kill job
	iio: pressure: zpa2326: Use aligned_s64 for the timestamp
	um: Add cmpxchg8b_emu and checksum functions to asm-prototypes.h
	um: use proper care when taking mmap lock during segfault
	8250: microchip: pci1xxxx: Add PCIe Hot reset disable support for Rev C0 and later devices
	coresight: Only check bottom two claim bits
	usb: dwc2: also exit clock_gating when stopping udc while suspended
	iio: adc: ad_sigma_delta: Fix use of uninitialized status_pos
	misc: tps6594-pfsm: Add NULL pointer check in tps6594_pfsm_probe()
	usb: potential integer overflow in usbg_make_tpg()
	tty: serial: uartlite: register uart driver in init
	usb: common: usb-conn-gpio: use a unique name for usb connector device
	usb: Add checks for snprintf() calls in usb_alloc_dev()
	usb: cdc-wdm: avoid setting WDM_READ for ZLP-s
	usb: gadget: f_hid: wake up readers on disable/unbind
	usb: typec: displayport: Receive DP Status Update NAK request exit dp altmode
	usb: typec: mux: do not return on EOPNOTSUPP in {mux, switch}_set
	riscv: add a data fence for CMODX in the kernel mode
	ALSA: hda: Ignore unsol events for cards being shut down
	ALSA: hda: Add new pci id for AMD GPU display HD audio controller
	ALSA: usb-audio: Add a quirk for Lenovo Thinkpad Thunderbolt 3 dock
	ASoC: rt1320: fix speaker noise when volume bar is 100%
	ceph: fix possible integer overflow in ceph_zero_objects()
	scsi: ufs: core: Don't perform UFS clkscaling during host async scan
	ovl: Check for NULL d_inode() in ovl_dentry_upper()
	btrfs: handle csum tree error with rescue=ibadroots correctly
	drm/i915/gem: Allow EXEC_CAPTURE on recoverable contexts on DG1
	Revert "drm/i915/gem: Allow EXEC_CAPTURE on recoverable contexts on DG1"
	btrfs: factor out nocow ordered extent and extent map generation into a helper
	btrfs: use unsigned types for constants defined as bit shifts
	btrfs: fix qgroup reservation leak on failure to allocate ordered extent
	fs/jfs: consolidate sanity checking in dbMount
	jfs: validate AG parameters in dbMount() to prevent crashes
	ASoC: codec: wcd9335: Convert to GPIO descriptors
	ASoC: codecs: wcd9335: Fix missing free of regulator supplies
	f2fs: don't over-report free space or inodes in statvfs
	PCI: apple: Use helper function for_each_child_of_node_scoped()
	PCI: apple: Set only available ports up
	accel/ivpu: Do not fail on cmdq if failed to allocate preemption buffers
	accel/ivpu: Remove copy engine support
	accel/ivpu: Make command queue ID allocated on XArray
	accel/ivpu: Separate DB ID and CMDQ ID allocations from CMDQ allocation
	accel/ivpu: Add debugfs interface for setting HWS priority bands
	accel/ivpu: Trigger device recovery on engine reset/resume failure
	af_unix: Don't leave consecutive consumed OOB skbs.
	i2c: tiny-usb: disable zero-length read messages
	i2c: robotfuzz-osif: disable zero-length read messages
	ata: ahci: Use correct DMI identifier for ASUSPRO-D840SA LPM quirk
	smb: client: remove \t from TP_printk statements
	mm/damon/sysfs-schemes: free old damon_sysfs_scheme_filter->memcg_path on write
	ASoC: amd: yc: Add DMI quirk for Lenovo IdeaPad Slim 5 15
	s390/pkey: Prevent overflow in size calculation for memdup_user()
	fs/proc/task_mmu: fix PAGE_IS_PFNZERO detection for the huge zero folio
	lib/group_cpus: fix NULL pointer dereference from group_cpus_evenly()
	Revert "riscv: Define TASK_SIZE_MAX for __access_ok()"
	Revert "riscv: misaligned: fix sleeping function called during misaligned access handling"
	drm/xe/display: Add check for alloc_ordered_workqueue()
	HID: wacom: fix crash in wacom_aes_battery_handler()
	atm: clip: prevent NULL deref in clip_push()
	Bluetooth: hci_core: Fix use-after-free in vhci_flush()
	ALSA: usb-audio: Fix out-of-bounds read in snd_usb_get_audioformat_uac3()
	attach_recursive_mnt(): do not lock the covering tree when sliding something under it
	libbpf: Fix null pointer dereference in btf_dump__free on allocation failure
	ethernet: ionic: Fix DMA mapping tests
	wifi: mac80211: fix beacon interval calculation overflow
	af_unix: Don't set -ECONNRESET for consumed OOB skb.
	wifi: mac80211: Add link iteration macro for link data
	wifi: mac80211: Create separate links for VLAN interfaces
	wifi: mac80211: finish link init before RCU publish
	vsock/uapi: fix linux/vm_sockets.h userspace compilation errors
	bnxt: properly flush XDP redirect lists
	um: ubd: Add missing error check in start_io_thread()
	libbpf: Fix possible use-after-free for externs
	net: enetc: Correct endianness handling in _enetc_rd_reg64
	netlink: specs: tc: replace underscores with dashes in names
	atm: Release atm_dev_mutex after removing procfs in atm_dev_deregister().
	ALSA: hda/realtek: Fix built-in mic on ASUS VivoBook X507UAR
	net: selftests: fix TCP packet checksum
	drm/amdgpu/discovery: optionally use fw based ip discovery
	drm/amd: Adjust output for discovery error handling
	drm/i915: fix build error some more
	drm/bridge: ti-sn65dsi86: make use of debugfs_init callback
	drm/bridge: ti-sn65dsi86: Add HPD for DisplayPort connector type
	drm/xe: Process deferred GGTT node removals on device unwind
	smb: client: fix potential deadlock when reconnecting channels
	smb: smbdirect: add smbdirect_pdu.h with protocol definitions
	smb: client: make use of common smbdirect_pdu.h
	smb: smbdirect: add smbdirect.h with public structures
	smb: smbdirect: add smbdirect_socket.h
	smb: client: make use of common smbdirect_socket
	smb: smbdirect: introduce smbdirect_socket_parameters
	smb: client: make use of common smbdirect_socket_parameters
	cifs: Fix the smbd_response slab to allow usercopy
	cifs: Fix reading into an ITER_FOLIOQ from the smbdirect code
	EDAC/amd64: Fix size calculation for Non-Power-of-Two DIMMs
	x86/traps: Initialize DR6 by writing its architectural reset value
	staging: rtl8723bs: Avoid memset() in aes_cipher() and aes_decipher()
	dt-bindings: serial: 8250: Make clocks and clock-frequency exclusive
	serial: core: restore of_node information in sysfs
	serial: imx: Restore original RXTL for console to fix data loss
	Bluetooth: L2CAP: Fix L2CAP MTU negotiation
	dm-raid: fix variable in journal device check
	btrfs: fix a race between renames and directory logging
	btrfs: update superblock's device bytes_used when dropping chunk
	spi: spi-cadence-quadspi: Fix pm runtime unbalance
	net: libwx: fix the creation of page_pool
	maple_tree: fix MA_STATE_PREALLOC flag in mas_preallocate()
	mm/gup: revert "mm: gup: fix infinite loop within __get_longterm_locked"
	f2fs: fix to zero post-eof page
	HID: lenovo: Restrict F7/9/11 mode to compact keyboards only
	HID: wacom: fix memory leak on kobject creation failure
	HID: wacom: fix memory leak on sysfs attribute creation failure
	HID: wacom: fix kobject reference count leak
	scsi: megaraid_sas: Fix invalid node index
	scsi: ufs: core: Fix clk scaling to be conditional in reset and restore
	drm/ast: Fix comment on modeset lock
	drm/cirrus-qemu: Fix pitch programming
	drm/etnaviv: Protect the scheduler's pending list with its lock
	drm/tegra: Assign plane type before registration
	drm/tegra: Fix a possible null pointer dereference
	drm/udl: Unregister device before cleaning up on disconnect
	drm/msm/gpu: Fix crash when throttling GPU immediately during boot
	drm/amdkfd: Fix race in GWS queue scheduling
	drm/bridge: cdns-dsi: Fix the clock variable for mode_valid()
	drm/bridge: cdns-dsi: Fix phy de-init and flag it so
	drm/bridge: cdns-dsi: Fix connecting to next bridge
	drm/bridge: cdns-dsi: Check return value when getting default PHY config
	drm/bridge: cdns-dsi: Wait for Clk and Data Lanes to be ready
	drm/amd/display: Add null pointer check for get_first_active_display()
	drm/amdgpu: amdgpu_vram_mgr_new(): Clamp lpfn to total vram
	drm/amd/display: Correct non-OLED pre_T11_delay.
	drm/xe/vm: move rebind_work init earlier
	drm/xe/sched: stop re-submitting signalled jobs
	drm/xe/guc_submit: add back fix
	drm/amd/display: Fix RMCM programming seq errors
	drm/amdgpu: Add kicker device detection
	drm/amd/display: Check dce_hwseq before dereferencing it
	drm/xe: Fix memset on iomem
	drm/xe: Fix taking invalid lock on wedge
	drm/xe: Fix early wedge on GuC load failure
	drm/i915/dsi: Fix off by one in BXT_MIPI_TRANS_VTOTAL
	drm/amdgpu: Fix SDMA UTC_L1 handling during start/stop sequences
	drm/amdgpu: switch job hw_fence to amdgpu_fence
	drm/amd/display: Fix mpv playback corruption on weston
	media: uvcvideo: Rollback non processed entities on error
	x86/fpu: Refactor xfeature bitmask update code for sigframe XSAVE
	x86/pkeys: Simplify PKRU update in signal frame
	net: libwx: fix Tx L4 checksum
	io_uring: fix potential page leak in io_sqe_buffer_register()
	io_uring/rsrc: fix folio unpinning
	io_uring/rsrc: don't rely on user vaddr alignment
	io_uring/net: improve recv bundles
	io_uring/net: only retry recv bundle for a full transfer
	io_uring/net: only consider msg_inq if larger than 1
	io_uring/net: always use current transfer count for buffer put
	io_uring/net: mark iov as dynamically allocated even for single segments
	io_uring/kbuf: flag partial buffer mappings
	mm/vma: reset VMA iterator on commit_merge() OOM failure
	r8169: add support for RTL8125D
	net: phy: realtek: merge the drivers for internal NBase-T PHY's
	net: phy: realtek: add RTL8125D-internal PHY
	btrfs: do proper folio cleanup when cow_file_range() failed
	iio: dac: ad3552r: changes to use FIELD_PREP
	iio: dac: ad3552r: extract common code (no changes in behavior intended)
	iio: dac: ad3552r-common: fix ad3541/2r ranges
	drm/xe: Carve out wopcm portion from the stolen memory
	usb: typec: tcpm: PSSourceOffTimer timeout in PR_Swap enters ERROR_RECOVERY
	drm/msm/dp: account for widebus and yuv420 during mode validation
	drm/fbdev-dma: Add shadow buffering for deferred I/O
	btrfs: skip inodes without loaded extent maps when shrinking extent maps
	btrfs: make the extent map shrinker run asynchronously as a work queue job
	btrfs: do regular iput instead of delayed iput during extent map shrinking
	riscv/atomic: Do proper sign extension also for unsigned in arch_cmpxchg
	arm64: dts: rockchip: Add avdd HDMI supplies to RockPro64 board dtsi
	ALSA: hda/realtek: Bass speaker fixup for ASUS UM5606KA
	drm/amdkfd: remove gfx 12 trap handler page size cap
	drm/amdkfd: Fix instruction hazard in gfx12 trap handler
	net: stmmac: Fix accessing freed irq affinity_hint
	spi: fsl-qspi: use devm function instead of driver remove
	btrfs: zoned: fix extent range end unlock in cow_file_range()
	btrfs: fix use-after-free on inode when scanning root during em shrinking
	spi: fsl-qspi: Fix double cleanup in probe error path
	Linux 6.12.36

Change-Id: Ie7748fa6d766a9cf7800e67297e404cb90bd359c
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2025-07-09 09:52:56 +00:00
commit a778622fe4
211 changed files with 3776 additions and 2225 deletions

View File

@ -45,7 +45,7 @@ allOf:
- ns16550
- ns16550a
then:
anyOf:
oneOf:
- required: [ clock-frequency ]
- required: [ clocks ]

View File

@ -227,7 +227,7 @@ definitions:
type: u8
doc: log(P_max / (qth-max - qth-min))
-
name: Scell_log
name: Scell-log
type: u8
doc: cell size for idle damping
-
@ -248,7 +248,7 @@ definitions:
name: DPs
type: u32
-
name: def_DP
name: def-DP
type: u32
-
name: grio

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 12
SUBLEVEL = 35
SUBLEVEL = 36
EXTRAVERSION =
NAME = Baby Opossum Posse

View File

@ -227,6 +227,16 @@
vin-supply = <&vcc12v_dcin>;
};
vcca_0v9: vcca-0v9 {
compatible = "regulator-fixed";
regulator-name = "vcca_0v9";
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <900000>;
vin-supply = <&vcc3v3_sys>;
};
vdd_log: vdd-log {
compatible = "pwm-regulator";
pwms = <&pwm2 0 25000 1>;
@ -312,6 +322,8 @@
};
&hdmi {
avdd-0v9-supply = <&vcca_0v9>;
avdd-1v8-supply = <&vcc1v8_dvp>;
ddc-i2c-bus = <&i2c3>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_cec>;

View File

@ -169,7 +169,7 @@
break; \
case 4: \
__arch_cmpxchg(".w", ".w" sc_sfx, prepend, append, \
__ret, __ptr, (long), __old, __new); \
__ret, __ptr, (long)(int)(long), __old, __new); \
break; \
case 8: \
__arch_cmpxchg(".d", ".d" sc_sfx, prepend, append, \

View File

@ -916,7 +916,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
*/
#ifdef CONFIG_64BIT
#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
#define TASK_SIZE_MAX LONG_MAX
#ifdef CONFIG_COMPAT
#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)

View File

@ -429,7 +429,7 @@ int handle_misaligned_load(struct pt_regs *regs)
val.data_u64 = 0;
if (user_mode(regs)) {
if (copy_from_user_nofault(&val, (u8 __user *)addr, len))
if (copy_from_user(&val, (u8 __user *)addr, len))
return -1;
} else {
memcpy(&val, (u8 *)addr, len);
@ -530,7 +530,7 @@ int handle_misaligned_store(struct pt_regs *regs)
return -EOPNOTSUPP;
if (user_mode(regs)) {
if (copy_to_user_nofault((u8 __user *)addr, &val, len))
if (copy_to_user((u8 __user *)addr, &val, len))
return -1;
} else {
memcpy((u8 *)addr, &val, len);

View File

@ -24,7 +24,20 @@ void flush_icache_all(void)
if (num_online_cpus() < 2)
return;
else if (riscv_use_sbi_for_rfence())
/*
* Make sure all previous writes to the D$ are ordered before making
* the IPI. The RISC-V spec states that a hart must execute a data fence
* before triggering a remote fence.i in order to make the modification
* visable for remote harts.
*
* IPIs on RISC-V are triggered by MMIO writes to either CLINT or
* S-IMSIC, so the fence ensures previous data writes "happen before"
* the MMIO.
*/
RISCV_FENCE(w, o);
if (riscv_use_sbi_for_rfence())
sbi_remote_fence_i(NULL);
else
on_each_cpu(ipi_remote_fence_i, NULL, 1);

View File

@ -41,7 +41,7 @@ int start_io_thread(unsigned long sp, int *fd_out)
*fd_out = fds[1];
err = os_set_fd_block(*fd_out, 0);
err = os_set_fd_block(kernel_fd, 0);
err |= os_set_fd_block(kernel_fd, 0);
if (err) {
printk("start_io_thread - failed to set nonblocking I/O.\n");
goto out_close;

View File

@ -1 +1,6 @@
#include <asm-generic/asm-prototypes.h>
#include <asm/checksum.h>
#ifdef CONFIG_UML_X86
extern void cmpxchg8b_emu(void);
#endif

View File

@ -17,6 +17,122 @@
#include <os.h>
#include <skas.h>
/*
* NOTE: UML does not have exception tables. As such, this is almost a copy
* of the code in mm/memory.c, only adjusting the logic to simply check whether
* we are coming from the kernel instead of doing an additional lookup in the
* exception table.
* We can do this simplification because we never get here if the exception was
* fixable.
*/
static inline bool get_mmap_lock_carefully(struct mm_struct *mm, bool is_user)
{
if (likely(mmap_read_trylock(mm)))
return true;
if (!is_user)
return false;
return !mmap_read_lock_killable(mm);
}
static inline bool mmap_upgrade_trylock(struct mm_struct *mm)
{
/*
* We don't have this operation yet.
*
* It should be easy enough to do: it's basically a
* atomic_long_try_cmpxchg_acquire()
* from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but
* it also needs the proper lockdep magic etc.
*/
return false;
}
static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, bool is_user)
{
mmap_read_unlock(mm);
if (!is_user)
return false;
return !mmap_write_lock_killable(mm);
}
/*
* Helper for page fault handling.
*
* This is kind of equivalend to "mmap_read_lock()" followed
* by "find_extend_vma()", except it's a lot more careful about
* the locking (and will drop the lock on failure).
*
* For example, if we have a kernel bug that causes a page
* fault, we don't want to just use mmap_read_lock() to get
* the mm lock, because that would deadlock if the bug were
* to happen while we're holding the mm lock for writing.
*
* So this checks the exception tables on kernel faults in
* order to only do this all for instructions that are actually
* expected to fault.
*
* We can also actually take the mm lock for writing if we
* need to extend the vma, which helps the VM layer a lot.
*/
static struct vm_area_struct *
um_lock_mm_and_find_vma(struct mm_struct *mm,
unsigned long addr, bool is_user)
{
struct vm_area_struct *vma;
if (!get_mmap_lock_carefully(mm, is_user))
return NULL;
vma = find_vma(mm, addr);
if (likely(vma && (vma->vm_start <= addr)))
return vma;
/*
* Well, dang. We might still be successful, but only
* if we can extend a vma to do so.
*/
if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) {
mmap_read_unlock(mm);
return NULL;
}
/*
* We can try to upgrade the mmap lock atomically,
* in which case we can continue to use the vma
* we already looked up.
*
* Otherwise we'll have to drop the mmap lock and
* re-take it, and also look up the vma again,
* re-checking it.
*/
if (!mmap_upgrade_trylock(mm)) {
if (!upgrade_mmap_lock_carefully(mm, is_user))
return NULL;
vma = find_vma(mm, addr);
if (!vma)
goto fail;
if (vma->vm_start <= addr)
goto success;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto fail;
}
if (expand_stack_locked(vma, addr))
goto fail;
success:
mmap_write_downgrade(mm);
return vma;
fail:
mmap_write_unlock(mm);
return NULL;
}
/*
* Note this is constrained to return 0, -EFAULT, -EACCES, -ENOMEM by
* segv().
@ -43,21 +159,10 @@ int handle_page_fault(unsigned long address, unsigned long ip,
if (is_user)
flags |= FAULT_FLAG_USER;
retry:
mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto out;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto out;
if (is_user && !ARCH_IS_STACKGROW(address))
goto out;
vma = expand_stack(mm, address);
vma = um_lock_mm_and_find_vma(mm, address, is_user);
if (!vma)
goto out_nosemaphore;
good_area:
*code_out = SEGV_ACCERR;
if (is_write) {
if (!(vma->vm_flags & VM_WRITE))

View File

@ -15,7 +15,26 @@
which debugging register was responsible for the trap. The other bits
are either reserved or not of interest to us. */
/* Define reserved bits in DR6 which are always set to 1 */
/*
* Define bits in DR6 which are set to 1 by default.
*
* This is also the DR6 architectural value following Power-up, Reset or INIT.
*
* Note, with the introduction of Bus Lock Detection (BLD) and Restricted
* Transactional Memory (RTM), the DR6 register has been modified:
*
* 1) BLD flag (bit 11) is no longer reserved to 1 if the CPU supports
* Bus Lock Detection. The assertion of a bus lock could clear it.
*
* 2) RTM flag (bit 16) is no longer reserved to 1 if the CPU supports
* restricted transactional memory. #DB occurred inside an RTM region
* could clear it.
*
* Apparently, DR6.BLD and DR6.RTM are active low bits.
*
* As a result, DR6_RESERVED is an incorrect name now, but it is kept for
* compatibility.
*/
#define DR6_RESERVED (0xFFFF0FF0)
#define DR_TRAP0 (0x1) /* db0 */

View File

@ -2145,20 +2145,16 @@ EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
#endif /* CONFIG_X86_64 */
/*
* Clear all 6 debug registers:
*/
static void clear_all_debug_regs(void)
static void initialize_debug_regs(void)
{
int i;
for (i = 0; i < 8; i++) {
/* Ignore db4, db5 */
if ((i == 4) || (i == 5))
continue;
set_debugreg(0, i);
}
/* Control register first -- to make sure everything is disabled. */
set_debugreg(0, 7);
set_debugreg(DR6_RESERVED, 6);
/* dr5 and dr4 don't exist */
set_debugreg(0, 3);
set_debugreg(0, 2);
set_debugreg(0, 1);
set_debugreg(0, 0);
}
#ifdef CONFIG_KGDB
@ -2319,7 +2315,7 @@ void cpu_init(void)
load_mm_ldt(&init_mm);
clear_all_debug_regs();
initialize_debug_regs();
dbg_restore_debug_regs();
doublefault_init_cpu_tss();

View File

@ -119,7 +119,6 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
{
struct xregs_state __user *x = buf;
struct _fpx_sw_bytes sw_bytes = {};
u32 xfeatures;
int err;
/* Setup the bytes not touched by the [f]xsave and reserved for SW. */
@ -132,12 +131,6 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
err |= __put_user(FP_XSTATE_MAGIC2,
(__u32 __user *)(buf + fpstate->user_size));
/*
* Read the xfeatures which we copied (directly from the cpu or
* from the state in task struct) to the user buffers.
*/
err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
/*
* For legacy compatible, we always set FP/SSE bits in the bit
* vector while saving the state to the user context. This will
@ -149,9 +142,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
* header as well as change any contents in the memory layout.
* xrestore as part of sigreturn will capture all the changes.
*/
xfeatures |= XFEATURE_MASK_FPSSE;
err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
err |= set_xfeature_in_sigframe(x, XFEATURE_MASK_FPSSE);
return !err;
}

View File

@ -69,21 +69,31 @@ static inline u64 xfeatures_mask_independent(void)
return fpu_kernel_cfg.independent_features;
}
static inline int set_xfeature_in_sigframe(struct xregs_state __user *xbuf, u64 mask)
{
u64 xfeatures;
int err;
/* Read the xfeatures value already saved in the user buffer */
err = __get_user(xfeatures, &xbuf->header.xfeatures);
xfeatures |= mask;
err |= __put_user(xfeatures, &xbuf->header.xfeatures);
return err;
}
/*
* Update the value of PKRU register that was already pushed onto the signal frame.
*/
static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 mask, u32 pkru)
static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
{
u64 xstate_bv;
int err;
if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
return 0;
/* Mark PKRU as in-use so that it is restored correctly. */
xstate_bv = (mask & xfeatures_in_use()) | XFEATURE_MASK_PKRU;
err = __put_user(xstate_bv, &buf->header.xfeatures);
err = set_xfeature_in_sigframe(buf, XFEATURE_MASK_PKRU);
if (err)
return err;
@ -304,7 +314,7 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkr
clac();
if (!err)
err = update_pkru_in_sigframe(buf, mask, pkru);
err = update_pkru_in_sigframe(buf, pkru);
return err;
}

View File

@ -977,24 +977,32 @@ static bool is_sysenter_singlestep(struct pt_regs *regs)
#endif
}
static __always_inline unsigned long debug_read_clear_dr6(void)
static __always_inline unsigned long debug_read_reset_dr6(void)
{
unsigned long dr6;
get_debugreg(dr6, 6);
dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
/*
* The Intel SDM says:
*
* Certain debug exceptions may clear bits 0-3. The remaining
* contents of the DR6 register are never cleared by the
* processor. To avoid confusion in identifying debug
* exceptions, debug handlers should clear the register before
* returning to the interrupted task.
* Certain debug exceptions may clear bits 0-3 of DR6.
*
* Keep it simple: clear DR6 immediately.
* BLD induced #DB clears DR6.BLD and any other debug
* exception doesn't modify DR6.BLD.
*
* RTM induced #DB clears DR6.RTM and any other debug
* exception sets DR6.RTM.
*
* To avoid confusion in identifying debug exceptions,
* debug handlers should set DR6.BLD and DR6.RTM, and
* clear other DR6 bits before returning.
*
* Keep it simple: write DR6 with its architectural reset
* value 0xFFFF0FF0, defined as DR6_RESERVED, immediately.
*/
get_debugreg(dr6, 6);
set_debugreg(DR6_RESERVED, 6);
dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
return dr6;
}
@ -1194,13 +1202,13 @@ out:
/* IST stack entry */
DEFINE_IDTENTRY_DEBUG(exc_debug)
{
exc_debug_kernel(regs, debug_read_clear_dr6());
exc_debug_kernel(regs, debug_read_reset_dr6());
}
/* User entry, runs on regular task stack */
DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
{
exc_debug_user(regs, debug_read_clear_dr6());
exc_debug_user(regs, debug_read_reset_dr6());
}
#ifdef CONFIG_X86_FRED
@ -1219,7 +1227,7 @@ DEFINE_FREDENTRY_DEBUG(exc_debug)
{
/*
* FRED #DB stores DR6 on the stack in the format which
* debug_read_clear_dr6() returns for the IDT entry points.
* debug_read_reset_dr6() returns for the IDT entry points.
*/
unsigned long dr6 = fred_event_data(regs);
@ -1234,7 +1242,7 @@ DEFINE_FREDENTRY_DEBUG(exc_debug)
/* 32 bit does not have separate entry points. */
DEFINE_IDTENTRY_RAW(exc_debug)
{
unsigned long dr6 = debug_read_clear_dr6();
unsigned long dr6 = debug_read_reset_dr6();
if (user_mode(regs))
exc_debug_user(regs, dr6);

View File

@ -20,6 +20,9 @@
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/* Do not call this directly. Declared for export type visibility. */
extern __visible __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
/**
* csum_fold - Fold and invert a 32bit checksum.
* sum: 32bit unfolded sum

View File

@ -423,6 +423,88 @@ static int dct_active_set(void *data, u64 active_percent)
DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
static int priority_bands_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = s->private;
struct ivpu_hw_info *hw = vdev->hw;
for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
switch (band) {
case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE:
seq_puts(s, "Idle: ");
break;
case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL:
seq_puts(s, "Normal: ");
break;
case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS:
seq_puts(s, "Focus: ");
break;
case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME:
seq_puts(s, "Realtime: ");
break;
}
seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n",
hw->hws.grace_period[band], hw->hws.process_grace_period[band],
hw->hws.process_quantum[band]);
}
return 0;
}
static int priority_bands_fops_open(struct inode *inode, struct file *file)
{
return single_open(file, priority_bands_show, inode->i_private);
}
static ssize_t
priority_bands_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct seq_file *s = file->private_data;
struct ivpu_device *vdev = s->private;
char buf[64];
u32 grace_period;
u32 process_grace_period;
u32 process_quantum;
u32 band;
int ret;
if (size >= sizeof(buf))
return -EINVAL;
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, pos, user_buf, size);
if (ret < 0)
return ret;
buf[size] = '\0';
ret = sscanf(buf, "%u %u %u %u", &band, &grace_period, &process_grace_period,
&process_quantum);
if (ret != 4)
return -EINVAL;
if (band >= VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT)
return -EINVAL;
vdev->hw->hws.grace_period[band] = grace_period;
vdev->hw->hws.process_grace_period[band] = process_grace_period;
vdev->hw->hws.process_quantum[band] = process_quantum;
return size;
}
static const struct file_operations ivpu_hws_priority_bands_fops = {
.owner = THIS_MODULE,
.open = priority_bands_fops_open,
.write = priority_bands_fops_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
void ivpu_debugfs_init(struct ivpu_device *vdev)
{
struct dentry *debugfs_root = vdev->drm.debugfs_root;
@ -445,6 +527,8 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
&fw_trace_hw_comp_mask_fops);
debugfs_create_file("fw_trace_level", 0200, debugfs_root, vdev,
&fw_trace_level_fops);
debugfs_create_file("hws_priority_bands", 0200, debugfs_root, vdev,
&ivpu_hws_priority_bands_fops);
debugfs_create_file("reset_engine", 0200, debugfs_root, vdev,
&ivpu_reset_engine_fops);

View File

@ -102,6 +102,8 @@ static void file_priv_release(struct kref *ref)
pm_runtime_get_sync(vdev->drm.dev);
mutex_lock(&vdev->context_list_lock);
file_priv_unbind(vdev, file_priv);
drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa));
xa_destroy(&file_priv->cmdq_xa);
mutex_unlock(&vdev->context_list_lock);
pm_runtime_put_autosuspend(vdev->drm.dev);
@ -261,6 +263,10 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK;
xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1);
file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID;
file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID;
mutex_unlock(&vdev->context_list_lock);
drm_dev_exit(idx);

View File

@ -50,11 +50,11 @@
#define IVPU_JOB_ID_JOB_MASK GENMASK(7, 0)
#define IVPU_JOB_ID_CONTEXT_MASK GENMASK(31, 8)
#define IVPU_NUM_ENGINES 2
#define IVPU_NUM_PRIORITIES 4
#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_ENGINES * IVPU_NUM_PRIORITIES)
#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_PRIORITIES)
#define IVPU_CMDQ_INDEX(engine, priority) ((engine) * IVPU_NUM_PRIORITIES + (priority))
#define IVPU_CMDQ_MIN_ID 1
#define IVPU_CMDQ_MAX_ID 255
#define IVPU_PLATFORM_SILICON 0
#define IVPU_PLATFORM_SIMICS 2
@ -174,13 +174,15 @@ struct ivpu_file_priv {
struct kref ref;
struct ivpu_device *vdev;
struct mutex lock; /* Protects cmdq */
struct ivpu_cmdq *cmdq[IVPU_NUM_CMDQS_PER_CTX];
struct xarray cmdq_xa;
struct ivpu_mmu_context ctx;
struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */
struct list_head ms_instance_list;
struct ivpu_bo *ms_info_bo;
struct xa_limit job_limit;
u32 job_id_next;
struct xa_limit cmdq_limit;
u32 cmdq_id_next;
bool has_mmu_faults;
bool bound;
bool aborted;

View File

@ -110,6 +110,26 @@ static void timeouts_init(struct ivpu_device *vdev)
}
}
static void priority_bands_init(struct ivpu_device *vdev)
{
/* Idle */
vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 0;
vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 50000;
vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 160000;
/* Normal */
vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 300000;
/* Focus */
vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 200000;
/* Realtime */
vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 0;
vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 50000;
vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000;
}
static void memory_ranges_init(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
@ -248,6 +268,7 @@ int ivpu_hw_init(struct ivpu_device *vdev)
{
ivpu_hw_btrs_info_init(vdev);
ivpu_hw_btrs_freq_ratios_init(vdev);
priority_bands_init(vdev);
memory_ranges_init(vdev);
platform_init(vdev);
wa_init(vdev);

View File

@ -45,6 +45,11 @@ struct ivpu_hw_info {
u8 pn_ratio;
u32 profiling_freq;
} pll;
struct {
u32 grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
u32 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
} hws;
u32 tile_fuse;
u32 sku;
u16 config;

View File

@ -60,6 +60,7 @@ static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
err_free_primary:
ivpu_bo_free(cmdq->primary_preempt_buf);
cmdq->primary_preempt_buf = NULL;
return -ENOMEM;
}
@ -69,10 +70,10 @@ static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
return;
drm_WARN_ON(&vdev->drm, !cmdq->primary_preempt_buf);
drm_WARN_ON(&vdev->drm, !cmdq->secondary_preempt_buf);
ivpu_bo_free(cmdq->primary_preempt_buf);
ivpu_bo_free(cmdq->secondary_preempt_buf);
if (cmdq->primary_preempt_buf)
ivpu_bo_free(cmdq->primary_preempt_buf);
if (cmdq->secondary_preempt_buf)
ivpu_bo_free(cmdq->secondary_preempt_buf);
}
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
@ -85,27 +86,16 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
if (!cmdq)
return NULL;
ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
GFP_KERNEL);
if (ret < 0) {
ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
goto err_free_cmdq;
}
cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!cmdq->mem)
goto err_erase_xa;
goto err_free_cmdq;
ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
if (ret)
goto err_free_cmdq_mem;
ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n");
return cmdq;
err_free_cmdq_mem:
ivpu_bo_free(cmdq->mem);
err_erase_xa:
xa_erase(&vdev->db_xa, cmdq->db_id);
err_free_cmdq:
kfree(cmdq);
return NULL;
@ -128,13 +118,13 @@ static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq
struct ivpu_device *vdev = file_priv->vdev;
int ret;
ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->db_id,
ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->id,
task_pid_nr(current), engine,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
if (ret)
return ret;
ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->db_id,
ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->id,
priority);
if (ret)
return ret;
@ -148,20 +138,21 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
int ret;
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->db_id,
ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
else
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
if (!ret)
ivpu_dbg(vdev, JOB, "DB %d registered to ctx %d\n", cmdq->db_id, file_priv->ctx.id);
ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d\n",
cmdq->db_id, cmdq->id, file_priv->ctx.id);
return ret;
}
static int
ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine, u8 priority)
ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u8 priority)
{
struct ivpu_device *vdev = file_priv->vdev;
struct vpu_job_queue_header *jobq_header;
@ -177,13 +168,13 @@ ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 eng
cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
jobq_header = &cmdq->jobq->header;
jobq_header->engine_idx = engine;
jobq_header->engine_idx = VPU_ENGINE_COMPUTE;
jobq_header->head = 0;
jobq_header->tail = 0;
wmb(); /* Flush WC buffer for jobq->header */
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_hws_cmdq_init(file_priv, cmdq, engine, priority);
ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, priority);
if (ret)
return ret;
}
@ -210,9 +201,9 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
cmdq->db_registered = false;
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->db_id);
ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
if (!ret)
ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->db_id);
ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id);
}
ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
@ -222,53 +213,102 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
return 0;
}
static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine,
u8 priority)
static int ivpu_db_id_alloc(struct ivpu_device *vdev, u32 *db_id)
{
int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
int ret;
u32 id;
ret = xa_alloc_cyclic(&vdev->db_xa, &id, NULL, vdev->db_limit, &vdev->db_next, GFP_KERNEL);
if (ret < 0)
return ret;
*db_id = id;
return 0;
}
static int ivpu_cmdq_id_alloc(struct ivpu_file_priv *file_priv, u32 *cmdq_id)
{
int ret;
u32 id;
ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &id, NULL, file_priv->cmdq_limit,
&file_priv->cmdq_id_next, GFP_KERNEL);
if (ret < 0)
return ret;
*cmdq_id = id;
return 0;
}
static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u8 priority)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_cmdq *cmdq;
unsigned long id;
int ret;
lockdep_assert_held(&file_priv->lock);
xa_for_each(&file_priv->cmdq_xa, id, cmdq)
if (cmdq->priority == priority)
break;
if (!cmdq) {
cmdq = ivpu_cmdq_alloc(file_priv);
if (!cmdq)
if (!cmdq) {
ivpu_err(vdev, "Failed to allocate command queue\n");
return NULL;
file_priv->cmdq[cmdq_idx] = cmdq;
}
ret = ivpu_db_id_alloc(vdev, &cmdq->db_id);
if (ret) {
ivpu_err(file_priv->vdev, "Failed to allocate doorbell ID: %d\n", ret);
goto err_free_cmdq;
}
ret = ivpu_cmdq_id_alloc(file_priv, &cmdq->id);
if (ret) {
ivpu_err(vdev, "Failed to allocate command queue ID: %d\n", ret);
goto err_erase_db_id;
}
cmdq->priority = priority;
ret = xa_err(xa_store(&file_priv->cmdq_xa, cmdq->id, cmdq, GFP_KERNEL));
if (ret) {
ivpu_err(vdev, "Failed to store command queue in cmdq_xa: %d\n", ret);
goto err_erase_cmdq_id;
}
}
ret = ivpu_cmdq_init(file_priv, cmdq, engine, priority);
if (ret)
return NULL;
ret = ivpu_cmdq_init(file_priv, cmdq, priority);
if (ret) {
ivpu_err(vdev, "Failed to initialize command queue: %d\n", ret);
goto err_free_cmdq;
}
return cmdq;
}
static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine, u8 priority)
{
int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
lockdep_assert_held(&file_priv->lock);
if (cmdq) {
file_priv->cmdq[cmdq_idx] = NULL;
ivpu_cmdq_fini(file_priv, cmdq);
ivpu_cmdq_free(file_priv, cmdq);
}
err_erase_cmdq_id:
xa_erase(&file_priv->cmdq_xa, cmdq->id);
err_erase_db_id:
xa_erase(&vdev->db_xa, cmdq->db_id);
err_free_cmdq:
ivpu_cmdq_free(file_priv, cmdq);
return NULL;
}
void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
{
u16 engine;
u8 priority;
struct ivpu_cmdq *cmdq;
unsigned long cmdq_id;
lockdep_assert_held(&file_priv->lock);
for (engine = 0; engine < IVPU_NUM_ENGINES; engine++)
for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++)
ivpu_cmdq_release_locked(file_priv, engine, priority);
xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
xa_erase(&file_priv->cmdq_xa, cmdq_id);
ivpu_cmdq_fini(file_priv, cmdq);
ivpu_cmdq_free(file_priv, cmdq);
}
}
/*
@ -279,20 +319,13 @@ void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
*/
static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
{
u16 engine;
u8 priority;
struct ivpu_cmdq *cmdq;
unsigned long cmdq_id;
mutex_lock(&file_priv->lock);
for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
if (cmdq)
cmdq->db_registered = false;
}
}
xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
cmdq->db_registered = false;
mutex_unlock(&file_priv->lock);
}
@ -312,17 +345,11 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv)
{
u16 engine;
u8 priority;
struct ivpu_cmdq *cmdq;
unsigned long cmdq_id;
for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
if (file_priv->cmdq[cmdq_idx])
ivpu_cmdq_fini(file_priv, file_priv->cmdq[cmdq_idx]);
}
}
xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
ivpu_cmdq_fini(file_priv, cmdq);
}
void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
@ -349,8 +376,8 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
/* Check if there is space left in job queue */
if (next_entry == header->head) {
ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n",
job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail);
ivpu_dbg(vdev, JOB, "Job queue full: ctx %d cmdq %d db %d head %d tail %d\n",
job->file_priv->ctx.id, cmdq->id, cmdq->db_id, header->head, tail);
return -EBUSY;
}
@ -363,10 +390,16 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW &&
(unlikely(!(ivpu_test_mode & IVPU_TEST_MODE_PREEMPTION_DISABLE)))) {
entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
entry->secondary_preempt_buf_size = ivpu_bo_size(cmdq->secondary_preempt_buf);
if (cmdq->primary_preempt_buf) {
entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
}
if (cmdq->secondary_preempt_buf) {
entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
entry->secondary_preempt_buf_size =
ivpu_bo_size(cmdq->secondary_preempt_buf);
}
}
wmb(); /* Ensure that tail is updated after filling entry */
@ -558,7 +591,7 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
mutex_lock(&vdev->submitted_jobs_lock);
mutex_lock(&file_priv->lock);
cmdq = ivpu_cmdq_acquire(file_priv, job->engine_idx, priority);
cmdq = ivpu_cmdq_acquire(file_priv, priority);
if (!cmdq) {
ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
file_priv->ctx.id, job->engine_idx, priority);
@ -698,7 +731,7 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
int idx, ret;
u8 priority;
if (params->engine > DRM_IVPU_ENGINE_COPY)
if (params->engine != DRM_IVPU_ENGINE_COMPUTE)
return -EINVAL;
if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
@ -816,7 +849,8 @@ void ivpu_context_abort_thread_handler(struct work_struct *work)
unsigned long id;
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
ivpu_jsm_reset_engine(vdev, 0);
if (ivpu_jsm_reset_engine(vdev, 0))
return;
mutex_lock(&vdev->context_list_lock);
xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
@ -832,7 +866,8 @@ void ivpu_context_abort_thread_handler(struct work_struct *work)
if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
return;
ivpu_jsm_hws_resume_engine(vdev, 0);
if (ivpu_jsm_hws_resume_engine(vdev, 0))
return;
/*
* In hardware scheduling mode NPU already has stopped processing jobs
* and won't send us any further notifications, thus we have to free job related resources

View File

@ -28,8 +28,10 @@ struct ivpu_cmdq {
struct ivpu_bo *secondary_preempt_buf;
struct ivpu_bo *mem;
u32 entry_count;
u32 id;
u32 db_id;
bool db_registered;
u8 priority;
};
/**

View File

@ -7,6 +7,8 @@
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_pm.h"
#include "vpu_jsm_api.h"
const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
{
@ -132,7 +134,7 @@ int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
struct vpu_jsm_msg resp;
int ret;
if (engine > VPU_ENGINE_COPY)
if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.query_engine_hb.engine_idx = engine;
@ -155,15 +157,17 @@ int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
struct vpu_jsm_msg resp;
int ret;
if (engine > VPU_ENGINE_COPY)
if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.engine_reset.engine_idx = engine;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
if (ret) {
ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
ivpu_pm_trigger_recovery(vdev, "Engine reset failed");
}
return ret;
}
@ -174,7 +178,7 @@ int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id
struct vpu_jsm_msg resp;
int ret;
if (engine > VPU_ENGINE_COPY)
if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.engine_preempt.engine_idx = engine;
@ -346,15 +350,17 @@ int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
struct vpu_jsm_msg resp;
int ret;
if (engine >= VPU_ENGINE_NB)
if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.hws_resume_engine.engine_idx = engine;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
if (ret) {
ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
ivpu_pm_trigger_recovery(vdev, "Engine resume failed");
}
return ret;
}
@ -409,26 +415,18 @@ int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
struct vpu_jsm_msg resp;
struct ivpu_hw_info *hw = vdev->hw;
struct vpu_ipc_msg_payload_hws_priority_band_setup *setup =
&req.payload.hws_priority_band_setup;
int ret;
/* Idle */
req.payload.hws_priority_band_setup.grace_period[0] = 0;
req.payload.hws_priority_band_setup.process_grace_period[0] = 50000;
req.payload.hws_priority_band_setup.process_quantum[0] = 160000;
/* Normal */
req.payload.hws_priority_band_setup.grace_period[1] = 50000;
req.payload.hws_priority_band_setup.process_grace_period[1] = 50000;
req.payload.hws_priority_band_setup.process_quantum[1] = 300000;
/* Focus */
req.payload.hws_priority_band_setup.grace_period[2] = 50000;
req.payload.hws_priority_band_setup.process_grace_period[2] = 50000;
req.payload.hws_priority_band_setup.process_quantum[2] = 200000;
/* Realtime */
req.payload.hws_priority_band_setup.grace_period[3] = 0;
req.payload.hws_priority_band_setup.process_grace_period[3] = 50000;
req.payload.hws_priority_band_setup.process_quantum[3] = 200000;
req.payload.hws_priority_band_setup.normal_band_percentage = 10;
for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
setup->grace_period[band] = hw->hws.grace_period[band];
setup->process_grace_period[band] = hw->hws.process_grace_period[band];
setup->process_quantum[band] = hw->hws.process_quantum[band];
}
setup->normal_band_percentage = 10;
ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
&resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);

View File

@ -1456,7 +1456,7 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_VERSION, "ASUSPRO D840MB_M840SA"),
DMI_MATCH(DMI_PRODUCT_NAME, "ASUSPRO D840MB_M840SA"),
},
/* 320 is broken, there is no known good version. */
},

View File

@ -1781,6 +1781,13 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range,
}
put_device(dev);
if (rc)
dev_err(port->uport_dev,
"failed to find %s:%s in target list of %s\n",
dev_name(&port->dev),
dev_name(port->parent_dport->dport_dev),
dev_name(&cxlsd->cxld.dev));
return rc;
}

View File

@ -354,7 +354,9 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
set_bit(h, evl->bmap);
h = (h + 1) % size;
}
drain_workqueue(wq->wq);
if (wq->wq)
drain_workqueue(wq->wq);
mutex_unlock(&evl->lock);
}

View File

@ -2906,6 +2906,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
return -EINVAL;
}
xdev->common.directions |= chan->direction;
/* Request the interrupt */
chan->irq = of_irq_get(node, chan->tdest);
if (chan->irq < 0)

View File

@ -1208,7 +1208,9 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
cs_mode |= CS_ODD_PRIMARY;
/* Asymmetric dual-rank DIMM support. */
if (csrow_sec_enabled(2 * dimm, ctrl, pvt))
cs_mode |= CS_EVEN_SECONDARY;
if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
cs_mode |= CS_ODD_SECONDARY;
@ -1229,12 +1231,13 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
return cs_mode;
}
static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
int csrow_nr, int dimm)
static int calculate_cs_size(u32 mask, unsigned int cs_mode)
{
u32 msb, weight, num_zero_bits;
u32 addr_mask_deinterleaved;
int size = 0;
int msb, weight, num_zero_bits;
u32 deinterleaved_mask;
if (!mask)
return 0;
/*
* The number of zero bits in the mask is equal to the number of bits
@ -1247,19 +1250,30 @@ static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
* without swapping with the most significant bit. This can be handled
* by keeping the MSB where it is and ignoring the single zero bit.
*/
msb = fls(addr_mask_orig) - 1;
weight = hweight_long(addr_mask_orig);
msb = fls(mask) - 1;
weight = hweight_long(mask);
num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
/* Take the number of zero bits off from the top of the mask. */
addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
deinterleaved_mask = GENMASK(msb - num_zero_bits, 1);
edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", deinterleaved_mask);
return (deinterleaved_mask >> 2) + 1;
}
static int __addr_mask_to_cs_size(u32 addr_mask, u32 addr_mask_sec,
unsigned int cs_mode, int csrow_nr, int dimm)
{
int size;
edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
edac_dbg(1, " Primary AddrMask: 0x%x\n", addr_mask);
/* Register [31:1] = Address [39:9]. Size is in kBs here. */
size = (addr_mask_deinterleaved >> 2) + 1;
size = calculate_cs_size(addr_mask, cs_mode);
edac_dbg(1, " Secondary AddrMask: 0x%x\n", addr_mask_sec);
size += calculate_cs_size(addr_mask_sec, cs_mode);
/* Return size in MBs. */
return size >> 10;
@ -1268,8 +1282,8 @@ static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
u32 addr_mask = 0, addr_mask_sec = 0;
int cs_mask_nr = csrow_nr;
u32 addr_mask_orig;
int dimm, size = 0;
/* No Chip Selects are enabled. */
@ -1307,13 +1321,13 @@ static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
if (!pvt->flags.zn_regs_v2)
cs_mask_nr >>= 1;
/* Asymmetric dual-rank DIMM support. */
if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
else
addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
if (cs_mode & (CS_EVEN_PRIMARY | CS_ODD_PRIMARY))
addr_mask = pvt->csels[umc].csmasks[cs_mask_nr];
return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, dimm);
if (cs_mode & (CS_EVEN_SECONDARY | CS_ODD_SECONDARY))
addr_mask_sec = pvt->csels[umc].csmasks_sec[cs_mask_nr];
return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, dimm);
}
static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
@ -3515,9 +3529,10 @@ static void gpu_get_err_info(struct mce *m, struct err_info *err)
static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr];
u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr];
u32 addr_mask_sec = pvt->csels[umc].csmasks_sec[csrow_nr];
return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, csrow_nr >> 1);
return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, csrow_nr >> 1);
}
static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)

View File

@ -1902,7 +1902,7 @@ no_preempt:
continue;
}
job = to_amdgpu_job(s_job);
if (preempted && (&job->hw_fence) == fence)
if (preempted && (&job->hw_fence.base) == fence)
/* mark the job as preempted */
job->preemption_status |= AMDGPU_IB_PREEMPTED;
}

View File

@ -5861,7 +5861,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*
* job->base holds a reference to parent fence
*/
if (job && dma_fence_is_signaled(&job->hw_fence)) {
if (job && dma_fence_is_signaled(&job->hw_fence.base)) {
job_signaled = true;
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
goto skip_hw_reset;

View File

@ -301,10 +301,12 @@ static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
const struct firmware *fw;
int r;
r = request_firmware(&fw, fw_name, adev->dev);
r = firmware_request_nowarn(&fw, fw_name, adev->dev);
if (r) {
dev_err(adev->dev, "can't load firmware \"%s\"\n",
fw_name);
if (amdgpu_discovery == 2)
dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name);
else
drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name);
return r;
}
@ -419,16 +421,12 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
/* Read from file if it is the preferred option */
fw_name = amdgpu_discovery_get_fw_name(adev);
if (fw_name != NULL) {
dev_info(adev->dev, "use ip discovery information from file");
drm_dbg(&adev->ddev, "use ip discovery information from file");
r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
if (r) {
dev_err(adev->dev, "failed to read ip discovery binary from file\n");
r = -EINVAL;
if (r)
goto out;
}
} else {
drm_dbg(&adev->ddev, "use ip discovery information from memory");
r = amdgpu_discovery_read_binary_from_mem(
adev, adev->mman.discovery_bin);
if (r)
@ -1286,10 +1284,8 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
int r;
r = amdgpu_discovery_init(adev);
if (r) {
DRM_ERROR("amdgpu_discovery_init failed\n");
if (r)
return r;
}
adev->gfx.xcc_mask = 0;
adev->sdma.sdma_mask = 0;
@ -2429,6 +2425,40 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
{
int r;
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_RAVEN:
case CHIP_VEGA20:
case CHIP_ARCTURUS:
case CHIP_ALDEBARAN:
/* this is not fatal. We have a fallback below
* if the new firmwares are not present. some of
* this will be overridden below to keep things
* consistent with the current behavior.
*/
r = amdgpu_discovery_reg_base_init(adev);
if (!r) {
amdgpu_discovery_harvest_ip(adev);
amdgpu_discovery_get_gfx_info(adev);
amdgpu_discovery_get_mall_info(adev);
amdgpu_discovery_get_vcn_info(adev);
}
break;
default:
r = amdgpu_discovery_reg_base_init(adev);
if (r) {
drm_err(&adev->ddev, "discovery failed: %d\n", r);
return r;
}
amdgpu_discovery_harvest_ip(adev);
amdgpu_discovery_get_gfx_info(adev);
amdgpu_discovery_get_mall_info(adev);
amdgpu_discovery_get_vcn_info(adev);
break;
}
switch (adev->asic_type) {
case CHIP_VEGA10:
vega10_reg_base_init(adev);
@ -2591,14 +2621,6 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
break;
default:
r = amdgpu_discovery_reg_base_init(adev);
if (r)
return -EINVAL;
amdgpu_discovery_harvest_ip(adev);
amdgpu_discovery_get_gfx_info(adev);
amdgpu_discovery_get_mall_info(adev);
amdgpu_discovery_get_vcn_info(adev);
break;
}

View File

@ -41,22 +41,6 @@
#include "amdgpu_trace.h"
#include "amdgpu_reset.h"
/*
* Fences mark an event in the GPUs pipeline and are used
* for GPU/CPU synchronization. When the fence is written,
* it is expected that all buffers associated with that fence
* are no longer in use by the associated ring on the GPU and
* that the relevant GPU caches have been flushed.
*/
struct amdgpu_fence {
struct dma_fence base;
/* RB, DMA, etc. */
struct amdgpu_ring *ring;
ktime_t start_timestamp;
};
static struct kmem_cache *amdgpu_fence_slab;
int amdgpu_fence_slab_init(void)
@ -151,12 +135,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
if (am_fence == NULL)
return -ENOMEM;
fence = &am_fence->base;
am_fence->ring = ring;
} else {
/* take use of job-embedded fence */
fence = &job->hw_fence;
am_fence = &job->hw_fence;
}
fence = &am_fence->base;
am_fence->ring = ring;
seq = ++ring->fence_drv.sync_seq;
if (job && job->job_run_counter) {
@ -718,7 +702,7 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
* it right here or we won't be able to track them in fence_drv
* and they will remain unsignaled during sa_bo free.
*/
job = container_of(old, struct amdgpu_job, hw_fence);
job = container_of(old, struct amdgpu_job, hw_fence.base);
if (!job->base.s_fence && !dma_fence_is_signaled(old))
dma_fence_signal(old);
RCU_INIT_POINTER(*ptr, NULL);
@ -780,7 +764,7 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
{
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
return (const char *)to_amdgpu_ring(job->base.sched)->name;
}
@ -810,7 +794,7 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
*/
static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
{
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
@ -845,7 +829,7 @@ static void amdgpu_job_fence_free(struct rcu_head *rcu)
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
/* free job if fence has a parent job */
kfree(container_of(f, struct amdgpu_job, hw_fence));
kfree(container_of(f, struct amdgpu_job, hw_fence.base));
}
/**

View File

@ -259,8 +259,8 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
/* Check if any fences where initialized */
if (job->base.s_fence && job->base.s_fence->finished.ops)
f = &job->base.s_fence->finished;
else if (job->hw_fence.ops)
f = &job->hw_fence;
else if (job->hw_fence.base.ops)
f = &job->hw_fence.base;
else
f = NULL;
@ -277,10 +277,10 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
amdgpu_sync_free(&job->explicit_sync);
/* only put the hw fence if has embedded fence */
if (!job->hw_fence.ops)
if (!job->hw_fence.base.ops)
kfree(job);
else
dma_fence_put(&job->hw_fence);
dma_fence_put(&job->hw_fence.base);
}
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
@ -309,10 +309,10 @@ void amdgpu_job_free(struct amdgpu_job *job)
if (job->gang_submit != &job->base.s_fence->scheduled)
dma_fence_put(job->gang_submit);
if (!job->hw_fence.ops)
if (!job->hw_fence.base.ops)
kfree(job);
else
dma_fence_put(&job->hw_fence);
dma_fence_put(&job->hw_fence.base);
}
struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)

View File

@ -48,7 +48,7 @@ struct amdgpu_job {
struct drm_sched_job base;
struct amdgpu_vm *vm;
struct amdgpu_sync explicit_sync;
struct dma_fence hw_fence;
struct amdgpu_fence hw_fence;
struct dma_fence *gang_submit;
uint32_t preamble_status;
uint32_t preemption_status;

View File

@ -126,6 +126,22 @@ struct amdgpu_fence_driver {
struct dma_fence **fences;
};
/*
* Fences mark an event in the GPUs pipeline and are used
* for GPU/CPU synchronization. When the fence is written,
* it is expected that all buffers associated with that fence
* are no longer in use by the associated ring on the GPU and
* that the relevant GPU caches have been flushed.
*/
struct amdgpu_fence {
struct dma_fence base;
/* RB, DMA, etc. */
struct amdgpu_ring *ring;
ktime_t start_timestamp;
};
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);

View File

@ -133,7 +133,7 @@ void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv)
vm = &fpriv->vm;
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))

View File

@ -30,6 +30,10 @@
#define AMDGPU_UCODE_NAME_MAX (128)
static const struct kicker_device kicker_device_list[] = {
{0x744B, 0x00},
};
static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
{
DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
@ -1383,6 +1387,19 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
return NULL;
}
bool amdgpu_is_kicker_fw(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) {
if (adev->pdev->device == kicker_device_list[i].device &&
adev->pdev->revision == kicker_device_list[i].revision)
return true;
}
return false;
}
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len)
{
int maj, min, rev;

View File

@ -595,6 +595,11 @@ struct amdgpu_firmware {
uint64_t fw_buf_mc;
};
struct kicker_device{
unsigned short device;
u8 revision;
};
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr);
@ -622,5 +627,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id);
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len);
bool amdgpu_is_kicker_fw(struct amdgpu_device *adev);
#endif

View File

@ -463,7 +463,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
int r;
lpfn = (u64)place->lpfn << PAGE_SHIFT;
if (!lpfn)
if (!lpfn || lpfn > man->size)
lpfn = man->size;
fpfn = (u64)place->fpfn << PAGE_SHIFT;

View File

@ -485,7 +485,7 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
{
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 doorbell_offset, doorbell;
u32 rb_cntl, ib_cntl;
u32 rb_cntl, ib_cntl, sdma_cntl;
int i;
for_each_inst(i, inst_mask) {
@ -497,6 +497,9 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
sdma_cntl = RREG32_SDMA(i, regSDMA_CNTL);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, UTC_L1_ENABLE, 0);
WREG32_SDMA(i, regSDMA_CNTL, sdma_cntl);
if (sdma[i]->use_doorbell) {
doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
@ -953,6 +956,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
/* set utc l1 enable flag always to 1 */
temp = RREG32_SDMA(i, regSDMA_CNTL);
temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
WREG32_SDMA(i, regSDMA_CNTL, temp);
if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) {
/* enable context empty interrupt during initialization */

View File

@ -3640,7 +3640,7 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
};
static const uint32_t cwsr_trap_gfx12_hex[] = {
0xbfa00001, 0xbfa0024b,
0xbfa00001, 0xbfa002a2,
0xb0804009, 0xb8f8f804,
0x9178ff78, 0x00008c00,
0xb8fbf811, 0x8b6eff78,
@ -3714,7 +3714,15 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0x00011677, 0xd7610000,
0x00011a79, 0xd7610000,
0x00011c7e, 0xd7610000,
0x00011e7f, 0xbefe00ff,
0x00011e7f, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xbefe00ff,
0x00003fff, 0xbeff0080,
0xee0a407a, 0x000c0000,
0x00004000, 0xd760007a,
@ -3751,38 +3759,46 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0x00000200, 0xbef600ff,
0x01000000, 0x7e000280,
0x7e020280, 0x7e040280,
0xbefd0080, 0xbe804ec2,
0xbf94fffe, 0xb8faf804,
0x8b7a847a, 0x91788478,
0x8c787a78, 0xd7610002,
0x0000fa71, 0x807d817d,
0xd7610002, 0x0000fa6c,
0x807d817d, 0x917aff6d,
0x80000000, 0xd7610002,
0x0000fa7a, 0x807d817d,
0xd7610002, 0x0000fa6e,
0x807d817d, 0xd7610002,
0x0000fa6f, 0x807d817d,
0xd7610002, 0x0000fa78,
0x807d817d, 0xb8faf811,
0xd7610002, 0x0000fa7a,
0x807d817d, 0xd7610002,
0x0000fa7b, 0x807d817d,
0xb8f1f801, 0xd7610002,
0x0000fa71, 0x807d817d,
0xb8f1f814, 0xd7610002,
0x0000fa71, 0x807d817d,
0xb8f1f815, 0xd7610002,
0x0000fa71, 0x807d817d,
0xb8f1f812, 0xd7610002,
0x0000fa71, 0x807d817d,
0xb8f1f813, 0xd7610002,
0x0000fa71, 0x807d817d,
0xbe804ec2, 0xbf94fffe,
0xb8faf804, 0x8b7a847a,
0x91788478, 0x8c787a78,
0x917aff6d, 0x80000000,
0xd7610002, 0x00010071,
0xd7610002, 0x0001026c,
0xd7610002, 0x0001047a,
0xd7610002, 0x0001066e,
0xd7610002, 0x0001086f,
0xd7610002, 0x00010a78,
0xd7610002, 0x00010e7b,
0xd8500000, 0x00000000,
0xd8500000, 0x00000000,
0xd8500000, 0x00000000,
0xd8500000, 0x00000000,
0xd8500000, 0x00000000,
0xd8500000, 0x00000000,
0xd8500000, 0x00000000,
0xd8500000, 0x00000000,
0xb8faf811, 0xd7610002,
0x00010c7a, 0xb8faf801,
0xd7610002, 0x0001107a,
0xb8faf814, 0xd7610002,
0x0001127a, 0xb8faf815,
0xd7610002, 0x0001147a,
0xb8faf812, 0xd7610002,
0x0001167a, 0xb8faf813,
0xd7610002, 0x0001187a,
0xb8faf802, 0xd7610002,
0x0000fa7a, 0x807d817d,
0xbefa50c1, 0xbfc70000,
0xd7610002, 0x0000fa7a,
0x807d817d, 0xbefe00ff,
0x00011a7a, 0xbefa50c1,
0xbfc70000, 0xd7610002,
0x00011c7a, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xbefe00ff,
0x0000ffff, 0xbeff0080,
0xc4068070, 0x008ce802,
0x00000000, 0xbefe00c1,
@ -3797,329 +3813,356 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0xbe824102, 0xbe844104,
0xbe864106, 0xbe884108,
0xbe8a410a, 0xbe8c410c,
0xbe8e410e, 0xd7610002,
0x0000f200, 0x80798179,
0xd7610002, 0x0000f201,
0x80798179, 0xd7610002,
0x0000f202, 0x80798179,
0xd7610002, 0x0000f203,
0x80798179, 0xd7610002,
0x0000f204, 0x80798179,
0xd7610002, 0x0000f205,
0x80798179, 0xd7610002,
0x0000f206, 0x80798179,
0xd7610002, 0x0000f207,
0x80798179, 0xd7610002,
0x0000f208, 0x80798179,
0xd7610002, 0x0000f209,
0x80798179, 0xd7610002,
0x0000f20a, 0x80798179,
0xd7610002, 0x0000f20b,
0x80798179, 0xd7610002,
0x0000f20c, 0x80798179,
0xd7610002, 0x0000f20d,
0x80798179, 0xd7610002,
0x0000f20e, 0x80798179,
0xd7610002, 0x0000f20f,
0x80798179, 0xbf06a079,
0xbfa10007, 0xc4068070,
0x008ce802, 0x00000000,
0x8070ff70, 0x00000080,
0xbef90080, 0x7e040280,
0x807d907d, 0xbf0aff7d,
0x00000060, 0xbfa2ffbb,
0xbe804100, 0xbe824102,
0xbe844104, 0xbe864106,
0xbe884108, 0xbe8a410a,
0xd7610002, 0x0000f200,
0x80798179, 0xd7610002,
0x0000f201, 0x80798179,
0xd7610002, 0x0000f202,
0x80798179, 0xd7610002,
0x0000f203, 0x80798179,
0xd7610002, 0x0000f204,
0x80798179, 0xd7610002,
0x0000f205, 0x80798179,
0xd7610002, 0x0000f206,
0x80798179, 0xd7610002,
0x0000f207, 0x80798179,
0xd7610002, 0x0000f208,
0x80798179, 0xd7610002,
0x0000f209, 0x80798179,
0xd7610002, 0x0000f20a,
0x80798179, 0xd7610002,
0x0000f20b, 0x80798179,
0xbe8e410e, 0xbf068079,
0xbfa10032, 0xd7610002,
0x00010000, 0xd7610002,
0x00010201, 0xd7610002,
0x00010402, 0xd7610002,
0x00010603, 0xd7610002,
0x00010804, 0xd7610002,
0x00010a05, 0xd7610002,
0x00010c06, 0xd7610002,
0x00010e07, 0xd7610002,
0x00011008, 0xd7610002,
0x00011209, 0xd7610002,
0x0001140a, 0xd7610002,
0x0001160b, 0xd7610002,
0x0001180c, 0xd7610002,
0x00011a0d, 0xd7610002,
0x00011c0e, 0xd7610002,
0x00011e0f, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0x80799079,
0xbfa00038, 0xd7610002,
0x00012000, 0xd7610002,
0x00012201, 0xd7610002,
0x00012402, 0xd7610002,
0x00012603, 0xd7610002,
0x00012804, 0xd7610002,
0x00012a05, 0xd7610002,
0x00012c06, 0xd7610002,
0x00012e07, 0xd7610002,
0x00013008, 0xd7610002,
0x00013209, 0xd7610002,
0x0001340a, 0xd7610002,
0x0001360b, 0xd7610002,
0x0001380c, 0xd7610002,
0x00013a0d, 0xd7610002,
0x00013c0e, 0xd7610002,
0x00013e0f, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0x80799079,
0xc4068070, 0x008ce802,
0x00000000, 0xbefe00c1,
0x857d9973, 0x8b7d817d,
0xbf06817d, 0xbfa20002,
0xbeff0080, 0xbfa00001,
0xbeff00c1, 0xb8fb4306,
0x8b7bc17b, 0xbfa10044,
0x8b7aff6d, 0x80000000,
0xbfa10041, 0x847b897b,
0xbef6007b, 0xb8f03b05,
0x80708170, 0xbf0d9973,
0xbfa20002, 0x84708970,
0xbfa00001, 0x84708a70,
0xb8fa1e06, 0x847a8a7a,
0x80707a70, 0x8070ff70,
0x00000200, 0x8070ff70,
0x00000080, 0xbef600ff,
0x01000000, 0xd71f0000,
0x000100c1, 0xd7200000,
0x000200c1, 0x16000084,
0x857d9973, 0x8b7d817d,
0xbf06817d, 0xbefd0080,
0xbfa20013, 0xbe8300ff,
0x00000080, 0xbf800000,
0xbf800000, 0xbf800000,
0xd8d80000, 0x01000000,
0xbf8a0000, 0xc4068070,
0x008ce801, 0x00000000,
0x807d037d, 0x80700370,
0xd5250000, 0x0001ff00,
0x00000080, 0xbf0a7b7d,
0xbfa2fff3, 0xbfa00012,
0xbe8300ff, 0x00000100,
0x00000000, 0x8070ff70,
0x00000080, 0xbef90080,
0x7e040280, 0x807d907d,
0xbf0aff7d, 0x00000060,
0xbfa2ff88, 0xbe804100,
0xbe824102, 0xbe844104,
0xbe864106, 0xbe884108,
0xbe8a410a, 0xd7610002,
0x00010000, 0xd7610002,
0x00010201, 0xd7610002,
0x00010402, 0xd7610002,
0x00010603, 0xd7610002,
0x00010804, 0xd7610002,
0x00010a05, 0xd7610002,
0x00010c06, 0xd7610002,
0x00010e07, 0xd7610002,
0x00011008, 0xd7610002,
0x00011209, 0xd7610002,
0x0001140a, 0xd7610002,
0x0001160b, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xd8500000,
0x00000000, 0xc4068070,
0x008ce802, 0x00000000,
0xbefe00c1, 0x857d9973,
0x8b7d817d, 0xbf06817d,
0xbfa20002, 0xbeff0080,
0xbfa00001, 0xbeff00c1,
0xb8fb4306, 0x8b7bc17b,
0xbfa10044, 0x8b7aff6d,
0x80000000, 0xbfa10041,
0x847b897b, 0xbef6007b,
0xb8f03b05, 0x80708170,
0xbf0d9973, 0xbfa20002,
0x84708970, 0xbfa00001,
0x84708a70, 0xb8fa1e06,
0x847a8a7a, 0x80707a70,
0x8070ff70, 0x00000200,
0x8070ff70, 0x00000080,
0xbef600ff, 0x01000000,
0xd71f0000, 0x000100c1,
0xd7200000, 0x000200c1,
0x16000084, 0x857d9973,
0x8b7d817d, 0xbf06817d,
0xbefd0080, 0xbfa20013,
0xbe8300ff, 0x00000080,
0xbf800000, 0xbf800000,
0xbf800000, 0xd8d80000,
0x01000000, 0xbf8a0000,
0xc4068070, 0x008ce801,
0x00000000, 0x807d037d,
0x80700370, 0xd5250000,
0x0001ff00, 0x00000100,
0x0001ff00, 0x00000080,
0xbf0a7b7d, 0xbfa2fff3,
0xbefe00c1, 0x857d9973,
0x8b7d817d, 0xbf06817d,
0xbfa20004, 0xbef000ff,
0x00000200, 0xbeff0080,
0xbfa00003, 0xbef000ff,
0x00000400, 0xbeff00c1,
0xb8fb3b05, 0x807b817b,
0x847b827b, 0x857d9973,
0x8b7d817d, 0xbf06817d,
0xbfa2001b, 0xbef600ff,
0x01000000, 0xbefd0084,
0xbf0a7b7d, 0xbfa10040,
0x7e008700, 0x7e028701,
0x7e048702, 0x7e068703,
0xc4068070, 0x008ce800,
0x00000000, 0xc4068070,
0x008ce801, 0x00008000,
0xc4068070, 0x008ce802,
0x00010000, 0xc4068070,
0x008ce803, 0x00018000,
0x807d847d, 0x8070ff70,
0x00000200, 0xbf0a7b7d,
0xbfa2ffeb, 0xbfa0002a,
0xbfa00012, 0xbe8300ff,
0x00000100, 0xbf800000,
0xbf800000, 0xbf800000,
0xd8d80000, 0x01000000,
0xbf8a0000, 0xc4068070,
0x008ce801, 0x00000000,
0x807d037d, 0x80700370,
0xd5250000, 0x0001ff00,
0x00000100, 0xbf0a7b7d,
0xbfa2fff3, 0xbefe00c1,
0x857d9973, 0x8b7d817d,
0xbf06817d, 0xbfa20004,
0xbef000ff, 0x00000200,
0xbeff0080, 0xbfa00003,
0xbef000ff, 0x00000400,
0xbeff00c1, 0xb8fb3b05,
0x807b817b, 0x847b827b,
0x857d9973, 0x8b7d817d,
0xbf06817d, 0xbfa2001b,
0xbef600ff, 0x01000000,
0xbefd0084, 0xbf0a7b7d,
0xbfa10015, 0x7e008700,
0xbfa10040, 0x7e008700,
0x7e028701, 0x7e048702,
0x7e068703, 0xc4068070,
0x008ce800, 0x00000000,
0xc4068070, 0x008ce801,
0x00010000, 0xc4068070,
0x008ce802, 0x00020000,
0x00008000, 0xc4068070,
0x008ce802, 0x00010000,
0xc4068070, 0x008ce803,
0x00030000, 0x807d847d,
0x8070ff70, 0x00000400,
0x00018000, 0x807d847d,
0x8070ff70, 0x00000200,
0xbf0a7b7d, 0xbfa2ffeb,
0xb8fb1e06, 0x8b7bc17b,
0xbfa1000d, 0x847b837b,
0x807b7d7b, 0xbefe00c1,
0xbeff0080, 0x7e008700,
0xbfa0002a, 0xbef600ff,
0x01000000, 0xbefd0084,
0xbf0a7b7d, 0xbfa10015,
0x7e008700, 0x7e028701,
0x7e048702, 0x7e068703,
0xc4068070, 0x008ce800,
0x00000000, 0x807d817d,
0x8070ff70, 0x00000080,
0xbf0a7b7d, 0xbfa2fff7,
0xbfa0016e, 0xbef4007e,
0x8b75ff7f, 0x0000ffff,
0x8c75ff75, 0x00040000,
0xbef60080, 0xbef700ff,
0x10807fac, 0xbef1007f,
0xb8f20742, 0x84729972,
0x8b6eff7f, 0x04000000,
0xbfa1003b, 0xbefe00c1,
0x857d9972, 0x8b7d817d,
0xbf06817d, 0xbfa20002,
0xbeff0080, 0xbfa00001,
0xbeff00c1, 0xb8ef4306,
0x8b6fc16f, 0xbfa10030,
0x846f896f, 0xbef6006f,
0x00000000, 0xc4068070,
0x008ce801, 0x00010000,
0xc4068070, 0x008ce802,
0x00020000, 0xc4068070,
0x008ce803, 0x00030000,
0x807d847d, 0x8070ff70,
0x00000400, 0xbf0a7b7d,
0xbfa2ffeb, 0xb8fb1e06,
0x8b7bc17b, 0xbfa1000d,
0x847b837b, 0x807b7d7b,
0xbefe00c1, 0xbeff0080,
0x7e008700, 0xc4068070,
0x008ce800, 0x00000000,
0x807d817d, 0x8070ff70,
0x00000080, 0xbf0a7b7d,
0xbfa2fff7, 0xbfa0016e,
0xbef4007e, 0x8b75ff7f,
0x0000ffff, 0x8c75ff75,
0x00040000, 0xbef60080,
0xbef700ff, 0x10807fac,
0xbef1007f, 0xb8f20742,
0x84729972, 0x8b6eff7f,
0x04000000, 0xbfa1003b,
0xbefe00c1, 0x857d9972,
0x8b7d817d, 0xbf06817d,
0xbfa20002, 0xbeff0080,
0xbfa00001, 0xbeff00c1,
0xb8ef4306, 0x8b6fc16f,
0xbfa10030, 0x846f896f,
0xbef6006f, 0xb8f83b05,
0x80788178, 0xbf0d9972,
0xbfa20002, 0x84788978,
0xbfa00001, 0x84788a78,
0xb8ee1e06, 0x846e8a6e,
0x80786e78, 0x8078ff78,
0x00000200, 0x8078ff78,
0x00000080, 0xbef600ff,
0x01000000, 0x857d9972,
0x8b7d817d, 0xbf06817d,
0xbefd0080, 0xbfa2000d,
0xc4050078, 0x0080e800,
0x00000000, 0xbf8a0000,
0xdac00000, 0x00000000,
0x807dff7d, 0x00000080,
0x8078ff78, 0x00000080,
0xbf0a6f7d, 0xbfa2fff4,
0xbfa0000c, 0xc4050078,
0x0080e800, 0x00000000,
0xbf8a0000, 0xdac00000,
0x00000000, 0x807dff7d,
0x00000100, 0x8078ff78,
0x00000100, 0xbf0a6f7d,
0xbfa2fff4, 0xbef80080,
0xbefe00c1, 0x857d9972,
0x8b7d817d, 0xbf06817d,
0xbfa20002, 0xbeff0080,
0xbfa00001, 0xbeff00c1,
0xb8ef3b05, 0x806f816f,
0x846f826f, 0x857d9972,
0x8b7d817d, 0xbf06817d,
0xbfa2002c, 0xbef600ff,
0x01000000, 0xbeee0078,
0x8078ff78, 0x00000200,
0xbefd0084, 0xbf0a6f7d,
0xbfa10061, 0xc4050078,
0x008ce800, 0x00000000,
0xc4050078, 0x008ce801,
0x00008000, 0xc4050078,
0x008ce802, 0x00010000,
0xc4050078, 0x008ce803,
0x00018000, 0xbf8a0000,
0x7e008500, 0x7e028501,
0x7e048502, 0x7e068503,
0x807d847d, 0x8078ff78,
0x00000200, 0xbf0a6f7d,
0xbfa2ffea, 0xc405006e,
0x008ce800, 0x00000000,
0xc405006e, 0x008ce801,
0x00008000, 0xc405006e,
0x008ce802, 0x00010000,
0xc405006e, 0x008ce803,
0x00018000, 0xbf8a0000,
0xbfa0003d, 0xbef600ff,
0x01000000, 0xbeee0078,
0x8078ff78, 0x00000400,
0xbefd0084, 0xbf0a6f7d,
0xbfa10016, 0xc4050078,
0x008ce800, 0x00000000,
0xc4050078, 0x008ce801,
0x00010000, 0xc4050078,
0x008ce802, 0x00020000,
0xc4050078, 0x008ce803,
0x00030000, 0xbf8a0000,
0x7e008500, 0x7e028501,
0x7e048502, 0x7e068503,
0x807d847d, 0x8078ff78,
0x00000400, 0xbf0a6f7d,
0xbfa2ffea, 0xb8ef1e06,
0x8b6fc16f, 0xbfa1000f,
0x846f836f, 0x806f7d6f,
0xbefe00c1, 0xbeff0080,
0xc4050078, 0x008ce800,
0x00000000, 0xbf8a0000,
0x7e008500, 0x807d817d,
0x8078ff78, 0x00000080,
0xbf0a6f7d, 0xbfa2fff6,
0xbeff00c1, 0xc405006e,
0x008ce800, 0x00000000,
0xc405006e, 0x008ce801,
0x00010000, 0xc405006e,
0x008ce802, 0x00020000,
0xc405006e, 0x008ce803,
0x00030000, 0xbf8a0000,
0xb8f83b05, 0x80788178,
0xbf0d9972, 0xbfa20002,
0x84788978, 0xbfa00001,
0x84788a78, 0xb8ee1e06,
0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
0x8078ff78, 0x00000080,
0x80f8ff78, 0x00000050,
0xbef600ff, 0x01000000,
0x857d9972, 0x8b7d817d,
0xbf06817d, 0xbefd0080,
0xbfa2000d, 0xc4050078,
0x0080e800, 0x00000000,
0xbf8a0000, 0xdac00000,
0x00000000, 0x807dff7d,
0x00000080, 0x8078ff78,
0x00000080, 0xbf0a6f7d,
0xbfa2fff4, 0xbfa0000c,
0xc4050078, 0x0080e800,
0x00000000, 0xbf8a0000,
0xdac00000, 0x00000000,
0x807dff7d, 0x00000100,
0x8078ff78, 0x00000100,
0xbf0a6f7d, 0xbfa2fff4,
0xbef80080, 0xbefe00c1,
0x857d9972, 0x8b7d817d,
0xbf06817d, 0xbfa20002,
0xbeff0080, 0xbfa00001,
0xbeff00c1, 0xb8ef3b05,
0x806f816f, 0x846f826f,
0x857d9972, 0x8b7d817d,
0xbf06817d, 0xbfa2002c,
0xbef600ff, 0x01000000,
0xbeee0078, 0x8078ff78,
0x00000200, 0xbefd0084,
0xbf0a6f7d, 0xbfa10061,
0xc4050078, 0x008ce800,
0x00000000, 0xc4050078,
0x008ce801, 0x00008000,
0xc4050078, 0x008ce802,
0x00010000, 0xc4050078,
0x008ce803, 0x00018000,
0xbf8a0000, 0x7e008500,
0x7e028501, 0x7e048502,
0x7e068503, 0x807d847d,
0x8078ff78, 0x00000200,
0xbf0a6f7d, 0xbfa2ffea,
0xc405006e, 0x008ce800,
0x00000000, 0xc405006e,
0x008ce801, 0x00008000,
0xc405006e, 0x008ce802,
0x00010000, 0xc405006e,
0x008ce803, 0x00018000,
0xbf8a0000, 0xbfa0003d,
0xbef600ff, 0x01000000,
0xbeee0078, 0x8078ff78,
0x00000400, 0xbefd0084,
0xbf0a6f7d, 0xbfa10016,
0xc4050078, 0x008ce800,
0x00000000, 0xc4050078,
0x008ce801, 0x00010000,
0xc4050078, 0x008ce802,
0x00020000, 0xc4050078,
0x008ce803, 0x00030000,
0xbf8a0000, 0x7e008500,
0x7e028501, 0x7e048502,
0x7e068503, 0x807d847d,
0x8078ff78, 0x00000400,
0xbf0a6f7d, 0xbfa2ffea,
0xb8ef1e06, 0x8b6fc16f,
0xbfa1000f, 0x846f836f,
0x806f7d6f, 0xbefe00c1,
0xbeff0080, 0xc4050078,
0x008ce800, 0x00000000,
0xbf8a0000, 0x7e008500,
0x807d817d, 0x8078ff78,
0x00000080, 0xbf0a6f7d,
0xbfa2fff6, 0xbeff00c1,
0xc405006e, 0x008ce800,
0x00000000, 0xc405006e,
0x008ce801, 0x00010000,
0xc405006e, 0x008ce802,
0x00020000, 0xc405006e,
0x008ce803, 0x00030000,
0xbf8a0000, 0xb8f83b05,
0x80788178, 0xbf0d9972,
0xbfa20002, 0x84788978,
0xbfa00001, 0x84788a78,
0xb8ee1e06, 0x846e8a6e,
0x80786e78, 0x8078ff78,
0x00000200, 0x80f8ff78,
0x00000050, 0xbef600ff,
0x01000000, 0xbefd00ff,
0x0000006c, 0x80f89078,
0xf462403a, 0xf0000000,
0xbf8a0000, 0x80fd847d,
0xbf800000, 0xbe804300,
0xbe824302, 0x80f8a078,
0xf462603a, 0xf0000000,
0xbf8a0000, 0x80fd887d,
0xbf800000, 0xbe804300,
0xbe824302, 0xbe844304,
0xbe864306, 0x80f8c078,
0xf462803a, 0xf0000000,
0xbf8a0000, 0x80fd907d,
0xbf800000, 0xbe804300,
0xbe824302, 0xbe844304,
0xbe864306, 0xbe884308,
0xbe8a430a, 0xbe8c430c,
0xbe8e430e, 0xbf06807d,
0xbfa1fff0, 0xb980f801,
0x00000000, 0xb8f83b05,
0x80788178, 0xbf0d9972,
0xbfa20002, 0x84788978,
0xbfa00001, 0x84788a78,
0xb8ee1e06, 0x846e8a6e,
0x80786e78, 0x8078ff78,
0x00000200, 0xbef600ff,
0x01000000, 0xbeff0071,
0xf4621bfa, 0xf0000000,
0x80788478, 0xf4621b3a,
0xf0000000, 0x80788478,
0xf4621b7a, 0xf0000000,
0x80788478, 0xf4621c3a,
0xf0000000, 0x80788478,
0xf4621c7a, 0xf0000000,
0x80788478, 0xf4621eba,
0xf0000000, 0x80788478,
0xf4621efa, 0xf0000000,
0x80788478, 0xf4621e7a,
0xf0000000, 0x80788478,
0xf4621cfa, 0xf0000000,
0x80788478, 0xf4621bba,
0xf0000000, 0x80788478,
0xbf8a0000, 0xb96ef814,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
0xb96ef815, 0xf4621bba,
0xf0000000, 0x80788478,
0xbf8a0000, 0xb96ef812,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
0xb96ef813, 0x8b6eff7f,
0x04000000, 0xbfa1000d,
0x80788478, 0xf4621bba,
0xf0000000, 0x80788478,
0xbf8a0000, 0xbf0d806e,
0xbfa10006, 0x856e906e,
0x8b6e6e6e, 0xbfa10003,
0xbe804ec1, 0x816ec16e,
0xbfa0fffb, 0xbefd006f,
0xbefe0070, 0xbeff0071,
0xb97b2011, 0x857b867b,
0xb97b0191, 0x857b827b,
0xb97bba11, 0xb973f801,
0xb8ee3b05, 0x806e816e,
0xbefd00ff, 0x0000006c,
0x80f89078, 0xf462403a,
0xf0000000, 0xbf8a0000,
0x80fd847d, 0xbf800000,
0xbe804300, 0xbe824302,
0x80f8a078, 0xf462603a,
0xf0000000, 0xbf8a0000,
0x80fd887d, 0xbf800000,
0xbe804300, 0xbe824302,
0xbe844304, 0xbe864306,
0x80f8c078, 0xf462803a,
0xf0000000, 0xbf8a0000,
0x80fd907d, 0xbf800000,
0xbe804300, 0xbe824302,
0xbe844304, 0xbe864306,
0xbe884308, 0xbe8a430a,
0xbe8c430c, 0xbe8e430e,
0xbf06807d, 0xbfa1fff0,
0xb980f801, 0x00000000,
0xb8f83b05, 0x80788178,
0xbf0d9972, 0xbfa20002,
0x846e896e, 0xbfa00001,
0x846e8a6e, 0xb8ef1e06,
0x846f8a6f, 0x806e6f6e,
0x806eff6e, 0x00000200,
0x806e746e, 0x826f8075,
0x8b6fff6f, 0x0000ffff,
0xf4605c37, 0xf8000050,
0xf4605d37, 0xf8000060,
0xf4601e77, 0xf8000074,
0xbf8a0000, 0x8b6dff6d,
0x0000ffff, 0x8bfe7e7e,
0x8bea6a6a, 0xb97af804,
0x84788978, 0xbfa00001,
0x84788a78, 0xb8ee1e06,
0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
0xbef600ff, 0x01000000,
0xbeff0071, 0xf4621bfa,
0xf0000000, 0x80788478,
0xf4621b3a, 0xf0000000,
0x80788478, 0xf4621b7a,
0xf0000000, 0x80788478,
0xf4621c3a, 0xf0000000,
0x80788478, 0xf4621c7a,
0xf0000000, 0x80788478,
0xf4621eba, 0xf0000000,
0x80788478, 0xf4621efa,
0xf0000000, 0x80788478,
0xf4621e7a, 0xf0000000,
0x80788478, 0xf4621cfa,
0xf0000000, 0x80788478,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
0xb96ef814, 0xf4621bba,
0xf0000000, 0x80788478,
0xbf8a0000, 0xb96ef815,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
0xb96ef812, 0xf4621bba,
0xf0000000, 0x80788478,
0xbf8a0000, 0xb96ef813,
0x8b6eff7f, 0x04000000,
0xbfa1000d, 0x80788478,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
0xbf0d806e, 0xbfa10006,
0x856e906e, 0x8b6e6e6e,
0xbfa10003, 0xbe804ec1,
0x816ec16e, 0xbfa0fffb,
0xbefd006f, 0xbefe0070,
0xbeff0071, 0xb97b2011,
0x857b867b, 0xb97b0191,
0x857b827b, 0xb97bba11,
0xb973f801, 0xb8ee3b05,
0x806e816e, 0xbf0d9972,
0xbfa20002, 0x846e896e,
0xbfa00001, 0x846e8a6e,
0xb8ef1e06, 0x846f8a6f,
0x806e6f6e, 0x806eff6e,
0x00000200, 0x806e746e,
0x826f8075, 0x8b6fff6f,
0x0000ffff, 0xf4605c37,
0xf8000050, 0xf4605d37,
0xf8000060, 0xf4601e77,
0xf8000074, 0xbf8a0000,
0x8b6dff6d, 0x0000ffff,
0x8bfe7e7e, 0x8bea6a6a,
0xb97af804, 0xbe804ec2,
0xbf94fffe, 0xbe804a6c,
0xbe804ec2, 0xbf94fffe,
0xbe804a6c, 0xbe804ec2,
0xbf94fffe, 0xbfb10000,
0xbfb10000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
};

View File

@ -30,6 +30,7 @@
#define CHIP_GFX12 37
#define SINGLE_STEP_MISSED_WORKAROUND 1 //workaround for lost TRAP_AFTER_INST exception when SAVECTX raised
#define HAVE_VALU_SGPR_HAZARD (ASIC_FAMILY == CHIP_GFX12)
var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK = 0x4
var SQ_WAVE_STATE_PRIV_SCC_SHIFT = 9
@ -351,6 +352,7 @@ L_HAVE_VGPRS:
v_writelane_b32 v0, ttmp13, 0xD
v_writelane_b32 v0, exec_lo, 0xE
v_writelane_b32 v0, exec_hi, 0xF
valu_sgpr_hazard()
s_mov_b32 exec_lo, 0x3FFF
s_mov_b32 exec_hi, 0x0
@ -417,7 +419,6 @@ L_SAVE_HWREG:
v_mov_b32 v0, 0x0 //Offset[31:0] from buffer resource
v_mov_b32 v1, 0x0 //Offset[63:32] from buffer resource
v_mov_b32 v2, 0x0 //Set of SGPRs for TCP store
s_mov_b32 m0, 0x0 //Next lane of v2 to write to
// Ensure no further changes to barrier or LDS state.
// STATE_PRIV.BARRIER_COMPLETE may change up to this point.
@ -430,40 +431,41 @@ L_SAVE_HWREG:
s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
s_or_b32 s_save_state_priv, s_save_state_priv, s_save_tmp
write_hwreg_to_v2(s_save_m0)
write_hwreg_to_v2(s_save_pc_lo)
s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
write_hwreg_to_v2(s_save_tmp)
write_hwreg_to_v2(s_save_exec_lo)
write_hwreg_to_v2(s_save_exec_hi)
write_hwreg_to_v2(s_save_state_priv)
v_writelane_b32 v2, s_save_m0, 0x0
v_writelane_b32 v2, s_save_pc_lo, 0x1
v_writelane_b32 v2, s_save_tmp, 0x2
v_writelane_b32 v2, s_save_exec_lo, 0x3
v_writelane_b32 v2, s_save_exec_hi, 0x4
v_writelane_b32 v2, s_save_state_priv, 0x5
v_writelane_b32 v2, s_save_xnack_mask, 0x7
valu_sgpr_hazard()
s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
write_hwreg_to_v2(s_save_tmp)
v_writelane_b32 v2, s_save_tmp, 0x6
write_hwreg_to_v2(s_save_xnack_mask)
s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_MODE)
v_writelane_b32 v2, s_save_tmp, 0x8
s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_MODE)
write_hwreg_to_v2(s_save_m0)
s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
v_writelane_b32 v2, s_save_tmp, 0x9
s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
write_hwreg_to_v2(s_save_m0)
s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
v_writelane_b32 v2, s_save_tmp, 0xA
s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
write_hwreg_to_v2(s_save_m0)
s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
v_writelane_b32 v2, s_save_tmp, 0xB
s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
write_hwreg_to_v2(s_save_m0)
s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL)
write_hwreg_to_v2(s_save_m0)
s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_TRAP_CTRL)
v_writelane_b32 v2, s_save_tmp, 0xC
s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
write_hwreg_to_v2(s_save_tmp)
v_writelane_b32 v2, s_save_tmp, 0xD
s_get_barrier_state s_save_tmp, -1
s_wait_kmcnt (0)
write_hwreg_to_v2(s_save_tmp)
v_writelane_b32 v2, s_save_tmp, 0xE
valu_sgpr_hazard()
// Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
s_mov_b32 exec_lo, 0xFFFF
@ -497,10 +499,12 @@ L_SAVE_SGPR_LOOP:
s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0]
s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0]
write_16sgpr_to_v2(s0)
s_cmp_eq_u32 ttmp13, 0x20 //have 32 VGPR lanes filled?
s_cbranch_scc0 L_SAVE_SGPR_SKIP_TCP_STORE
s_cmp_eq_u32 ttmp13, 0x0
s_cbranch_scc0 L_WRITE_V2_SECOND_HALF
write_16sgpr_to_v2(s0, 0x0)
s_branch L_SAVE_SGPR_SKIP_TCP_STORE
L_WRITE_V2_SECOND_HALF:
write_16sgpr_to_v2(s0, 0x10)
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
s_add_u32 s_save_mem_offset, s_save_mem_offset, 0x80
@ -1056,27 +1060,21 @@ L_END_PGM:
s_endpgm_saved
end
function write_hwreg_to_v2(s)
// Copy into VGPR for later TCP store.
v_writelane_b32 v2, s, m0
s_add_u32 m0, m0, 0x1
end
function write_16sgpr_to_v2(s)
function write_16sgpr_to_v2(s, lane_offset)
// Copy into VGPR for later TCP store.
for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++
v_writelane_b32 v2, s[sgpr_idx], ttmp13
s_add_u32 ttmp13, ttmp13, 0x1
v_writelane_b32 v2, s[sgpr_idx], sgpr_idx + lane_offset
end
valu_sgpr_hazard()
s_add_u32 ttmp13, ttmp13, 0x10
end
function write_12sgpr_to_v2(s)
// Copy into VGPR for later TCP store.
for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++
v_writelane_b32 v2, s[sgpr_idx], ttmp13
s_add_u32 ttmp13, ttmp13, 0x1
v_writelane_b32 v2, s[sgpr_idx], sgpr_idx
end
valu_sgpr_hazard()
end
function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
@ -1128,3 +1126,11 @@ function get_wave_size2(s_reg)
s_getreg_b32 s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE
end
function valu_sgpr_hazard
#if HAVE_VALU_SGPR_HAZARD
for var rep = 0; rep < 8; rep ++
ds_nop
end
#endif
end

View File

@ -537,7 +537,8 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
kfd->cwsr_isa = cwsr_trap_gfx11_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
} else {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex) > PAGE_SIZE);
BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex)
> KFD_CWSR_TMA_OFFSET);
kfd->cwsr_isa = cwsr_trap_gfx12_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx12_hex);
}

View File

@ -1315,6 +1315,7 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
if (unlikely(user_gpu_id == -EINVAL)) {
WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
kfd_unref_process(p);
return;
}

View File

@ -237,7 +237,7 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
packet->bitfields2.engine_sel =
engine_sel__mes_map_queues__compute_vi;
packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
packet->bitfields2.gws_control_queue = q->properties.is_gws ? 1 : 0;
packet->bitfields2.extended_engine_sel =
extended_engine_sel__mes_map_queues__legacy_engine_sel;
packet->bitfields2.queue_type =

View File

@ -762,6 +762,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->pixel_format = dml2_420_10;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
plane->pixel_format = dml2_444_64;

View File

@ -4651,7 +4651,10 @@ static void calculate_tdlut_setting(
//the tdlut is fetched during the 2 row times of prefetch.
if (p->setup_for_tdlut) {
*p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
*p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
if (*p->tdlut_bytes_per_frame > p->cursor_buffer_size * 1024)
*p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
else
*p->tdlut_opt_time = 0;
*p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate;
}

View File

@ -909,6 +909,7 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
out->SourcePixelFormat[location] = dml_420_10;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
out->SourcePixelFormat[location] = dml_444_64;

View File

@ -951,8 +951,8 @@ void dce110_edp_backlight_control(
struct dc_context *ctx = link->ctx;
struct bp_transmitter_control cntl = { 0 };
uint8_t pwrseq_instance = 0;
unsigned int pre_T11_delay = OLED_PRE_T11_DELAY;
unsigned int post_T7_delay = OLED_POST_T7_DELAY;
unsigned int pre_T11_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_PRE_T11_DELAY : 0);
unsigned int post_T7_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_POST_T7_DELAY : 0);
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
!= CONNECTOR_ID_EDP) {
@ -1067,7 +1067,8 @@ void dce110_edp_backlight_control(
if (!enable) {
/*follow oem panel config's requirement*/
pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms;
msleep(pre_T11_delay);
if (pre_T11_delay)
msleep(pre_T11_delay);
}
}
@ -1216,7 +1217,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
struct dce_hwseq *hws = link->dc->hwseq;
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
if (!link->skip_implict_edp_power_control)
if (!link->skip_implict_edp_power_control && hws)
hws->funcs.edp_backlight_control(link, false);
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}

View File

@ -368,6 +368,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
struct mod_hdcp_display *display = get_first_active_display(hdcp);
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));

View File

@ -1319,9 +1319,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s
/*
* Concurrent operations could possibly trigger a call to
* drm_connector_helper_funcs.get_modes by trying to read the
* display modes. Protect access to I/O registers by acquiring
* the I/O-register lock. Released in atomic_flush().
* drm_connector_helper_funcs.get_modes by reading the display
* modes. Protect access to registers by acquiring the modeset
* lock.
*/
mutex_lock(&ast->modeset_lock);
drm_atomic_helper_commit_tail(state);

View File

@ -568,15 +568,18 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
unsigned long dsi_hss_hsa_hse_hbp;
unsigned int nlanes = output->dev->lanes;
int mode_clock = (mode_valid_check ? mode->clock : mode->crtc_clock);
int ret;
ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check);
if (ret)
return ret;
phy_mipi_dphy_get_default_config(mode->crtc_clock * 1000,
mipi_dsi_pixel_format_to_bpp(output->dev->format),
nlanes, phy_cfg);
ret = phy_mipi_dphy_get_default_config(mode_clock * 1000,
mipi_dsi_pixel_format_to_bpp(output->dev->format),
nlanes, phy_cfg);
if (ret)
return ret;
ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check);
if (ret)
@ -680,6 +683,11 @@ static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge)
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
dsi->phy_initialized = false;
dsi->link_initialized = false;
phy_power_off(dsi->dphy);
phy_exit(dsi->dphy);
pm_runtime_put(dsi->base.dev);
}
@ -761,7 +769,7 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
unsigned long tx_byte_period;
struct cdns_dsi_cfg dsi_cfg;
u32 tmp, reg_wakeup, div;
u32 tmp, reg_wakeup, div, status;
int nlanes;
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
@ -778,6 +786,19 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
cdns_dsi_hs_init(dsi);
cdns_dsi_init_link(dsi);
/*
* Now that the DSI Link and DSI Phy are initialized,
* wait for the CLK and Data Lanes to be ready.
*/
tmp = CLK_LANE_RDY;
for (int i = 0; i < nlanes; i++)
tmp |= DATA_LANE_RDY(i);
if (readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status,
(tmp == (status & tmp)), 100, 500000))
dev_err(dsi->base.dev,
"Timed Out: DSI-DPhy Clock and Data Lanes not ready.\n");
writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa),
dsi->regs + VID_HSIZE1);
writel(HFP_LEN(dsi_cfg.hfp) | HACT_LEN(dsi_cfg.hact),
@ -952,7 +973,7 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
bridge = drm_panel_bridge_add_typed(panel,
DRM_MODE_CONNECTOR_DSI);
} else {
bridge = of_drm_find_bridge(dev->dev.of_node);
bridge = of_drm_find_bridge(np);
if (!bridge)
bridge = ERR_PTR(-EINVAL);
}
@ -1152,7 +1173,6 @@ static int __maybe_unused cdns_dsi_suspend(struct device *dev)
clk_disable_unprepare(dsi->dsi_sys_clk);
clk_disable_unprepare(dsi->dsi_p_clk);
reset_control_assert(dsi->dsi_p_rst);
dsi->link_initialized = false;
return 0;
}

View File

@ -331,12 +331,18 @@ static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata)
* 200 ms. We'll assume that the panel driver will have the hardcoded
* delay in its prepare and always disable HPD.
*
* If HPD somehow makes sense on some future panel we'll have to
* change this to be conditional on someone specifying that HPD should
* be used.
* For DisplayPort bridge type, we need HPD. So we use the bridge type
* to conditionally disable HPD.
* NOTE: The bridge type is set in ti_sn_bridge_probe() but enable_comms()
* can be called before. So for DisplayPort, HPD will be enabled once
* bridge type is set. We are using bridge type instead of "no-hpd"
* property because it is not used properly in devicetree description
* and hence is unreliable.
*/
regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
HPD_DISABLE);
if (pdata->bridge.type != DRM_MODE_CONNECTOR_DisplayPort)
regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
HPD_DISABLE);
pdata->comms_enabled = true;
@ -424,36 +430,8 @@ static int status_show(struct seq_file *s, void *data)
return 0;
}
DEFINE_SHOW_ATTRIBUTE(status);
static void ti_sn65dsi86_debugfs_remove(void *data)
{
debugfs_remove_recursive(data);
}
static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata)
{
struct device *dev = pdata->dev;
struct dentry *debugfs;
int ret;
debugfs = debugfs_create_dir(dev_name(dev), NULL);
/*
* We might get an error back if debugfs wasn't enabled in the kernel
* so let's just silently return upon failure.
*/
if (IS_ERR_OR_NULL(debugfs))
return;
ret = devm_add_action_or_reset(dev, ti_sn65dsi86_debugfs_remove, debugfs);
if (ret)
return;
debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
}
/* -----------------------------------------------------------------------------
* Auxiliary Devices (*not* AUX)
*/
@ -1201,9 +1179,14 @@ static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge)
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
int val = 0;
pm_runtime_get_sync(pdata->dev);
/*
* Runtime reference is grabbed in ti_sn_bridge_hpd_enable()
* as the chip won't report HPD just after being powered on.
* HPD_DEBOUNCED_STATE reflects correct state only after the
* debounce time (~100-400 ms).
*/
regmap_read(pdata->regmap, SN_HPD_DISABLE_REG, &val);
pm_runtime_put_autosuspend(pdata->dev);
return val & HPD_DEBOUNCED_STATE ? connector_status_connected
: connector_status_disconnected;
@ -1217,6 +1200,35 @@ static const struct drm_edid *ti_sn_bridge_edid_read(struct drm_bridge *bridge,
return drm_edid_read_ddc(connector, &pdata->aux.ddc);
}
static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
struct dentry *debugfs;
debugfs = debugfs_create_dir(dev_name(pdata->dev), root);
debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
}
static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
/*
* Device needs to be powered on before reading the HPD state
* for reliable hpd detection in ti_sn_bridge_detect() due to
* the high debounce time.
*/
pm_runtime_get_sync(pdata->dev);
}
static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
pm_runtime_put_autosuspend(pdata->dev);
}
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
@ -1230,6 +1242,9 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.debugfs_init = ti_sn65dsi86_debugfs_init,
.hpd_enable = ti_sn_bridge_hpd_enable,
.hpd_disable = ti_sn_bridge_hpd_disable,
};
static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata,
@ -1318,8 +1333,26 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort
? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP;
if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort)
pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT;
if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) {
pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_HPD;
/*
* If comms were already enabled they would have been enabled
* with the wrong value of HPD_DISABLE. Update it now. Comms
* could be enabled if anyone is holding a pm_runtime reference
* (like if a GPIO is in use). Note that in most cases nobody
* is doing AUX channel xfers before the bridge is added so
* HPD doesn't _really_ matter then. The only exception is in
* the eDP case where the panel wants to read the EDID before
* the bridge is added. We always consistently have HPD disabled
* for eDP.
*/
mutex_lock(&pdata->comms_mutex);
if (pdata->comms_enabled)
regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG,
HPD_DISABLE, 0);
mutex_unlock(&pdata->comms_mutex);
};
drm_bridge_add(&pdata->bridge);
@ -1938,8 +1971,6 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
if (ret)
return ret;
ti_sn65dsi86_debugfs_init(pdata);
/*
* Break ourselves up into a collection of aux devices. The only real
* motiviation here is to solve the chicken-and-egg problem of probe

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: MIT
#include <linux/fb.h>
#include <linux/vmalloc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
@ -72,43 +73,108 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = {
.fb_destroy = drm_fbdev_dma_fb_destroy,
};
FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
drm_fb_helper_damage_range,
drm_fb_helper_damage_area);
static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
void *shadow = info->screen_buffer;
if (!dma->map_noncoherent)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
if (!fb_helper->dev)
return;
return fb_deferred_io_mmap(info, vma);
if (info->fbdefio)
fb_deferred_io_cleanup(info);
drm_fb_helper_fini(fb_helper);
vfree(shadow);
drm_client_buffer_vunmap(fb_helper->buffer);
drm_client_framebuffer_delete(fb_helper->buffer);
drm_client_release(&fb_helper->client);
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
.owner = THIS_MODULE,
.fb_open = drm_fbdev_dma_fb_open,
.fb_release = drm_fbdev_dma_fb_release,
__FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
DRM_FB_HELPER_DEFAULT_OPS,
__FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
.fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
.fb_destroy = drm_fbdev_dma_fb_destroy,
.fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
};
/*
* struct drm_fb_helper
*/
static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
struct drm_clip_rect *clip,
struct iosys_map *dst)
{
struct drm_framebuffer *fb = fb_helper->fb;
size_t offset = clip->y1 * fb->pitches[0];
size_t len = clip->x2 - clip->x1;
unsigned int y;
void *src;
switch (drm_format_info_bpp(fb->format, 0)) {
case 1:
offset += clip->x1 / 8;
len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
break;
case 2:
offset += clip->x1 / 4;
len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
break;
case 4:
offset += clip->x1 / 2;
len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
break;
default:
offset += clip->x1 * fb->format->cpp[0];
len *= fb->format->cpp[0];
break;
}
src = fb_helper->info->screen_buffer + offset;
iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
for (y = clip->y1; y < clip->y2; y++) {
iosys_map_memcpy_to(dst, 0, src, len);
iosys_map_incr(dst, fb->pitches[0]);
src += fb->pitches[0];
}
}
static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
struct drm_clip_rect *clip)
{
struct drm_client_buffer *buffer = fb_helper->buffer;
struct iosys_map dst;
/*
* For fbdev emulation, we only have to protect against fbdev modeset
* operations. Nothing else will involve the client buffer's BO. So it
* is sufficient to acquire struct drm_fb_helper.lock here.
*/
mutex_lock(&fb_helper->lock);
dst = buffer->map;
drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
mutex_unlock(&fb_helper->lock);
return 0;
}
static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
return drm_fbdev_dma_driver_fbdev_probe(fb_helper, sizes);
}
static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
struct drm_clip_rect *clip)
{
@ -120,6 +186,10 @@ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
return 0;
if (helper->fb->funcs->dirty) {
ret = drm_fbdev_dma_damage_blit(helper, clip);
if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
return ret;
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
return ret;
@ -137,14 +207,80 @@ static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
* struct drm_fb_helper
*/
static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fb_helper->dev;
struct drm_client_buffer *buffer = fb_helper->buffer;
struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
struct drm_framebuffer *fb = fb_helper->fb;
struct fb_info *info = fb_helper->info;
struct iosys_map map = buffer->map;
info->fbops = &drm_fbdev_dma_fb_ops;
/* screen */
info->flags |= FBINFO_VIRTFB; /* system memory */
if (dma_obj->map_noncoherent)
info->flags |= FBINFO_READS_FAST; /* signal caching */
info->screen_size = sizes->surface_height * fb->pitches[0];
info->screen_buffer = map.vaddr;
if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
}
info->fix.smem_len = info->screen_size;
return 0;
}
static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_buffer *buffer = fb_helper->buffer;
struct fb_info *info = fb_helper->info;
size_t screen_size = buffer->gem->size;
void *screen_buffer;
int ret;
/*
* Deferred I/O requires struct page for framebuffer memory,
* which is not guaranteed for all DMA ranges. We thus create
* a shadow buffer in system memory.
*/
screen_buffer = vzalloc(screen_size);
if (!screen_buffer)
return -ENOMEM;
info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
/* screen */
info->flags |= FBINFO_VIRTFB; /* system memory */
info->flags |= FBINFO_READS_FAST; /* signal caching */
info->screen_buffer = screen_buffer;
info->fix.smem_len = screen_size;
fb_helper->fbdefio.delay = HZ / 20;
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
info->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(info);
if (ret)
goto err_vfree;
return 0;
err_vfree:
vfree(screen_buffer);
return ret;
}
int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
bool use_deferred_io = false;
struct drm_client_buffer *buffer;
struct drm_gem_dma_object *dma_obj;
struct drm_framebuffer *fb;
struct fb_info *info;
u32 format;
@ -161,19 +297,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
sizes->surface_height, format);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
dma_obj = to_drm_gem_dma_obj(buffer->gem);
fb = buffer->fb;
/*
* Deferred I/O requires struct page for framebuffer memory,
* which is not guaranteed for all DMA ranges. We thus only
* install deferred I/O if we have a framebuffer that requires
* it.
*/
if (fb->funcs->dirty)
use_deferred_io = true;
ret = drm_client_buffer_vmap(buffer, &map);
if (ret) {
goto err_drm_client_buffer_delete;
@ -194,45 +320,12 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
drm_fb_helper_fill_info(info, fb_helper, sizes);
if (use_deferred_io)
info->fbops = &drm_fbdev_dma_deferred_fb_ops;
if (fb->funcs->dirty)
ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
else
info->fbops = &drm_fbdev_dma_fb_ops;
/* screen */
info->flags |= FBINFO_VIRTFB; /* system memory */
if (dma_obj->map_noncoherent)
info->flags |= FBINFO_READS_FAST; /* signal caching */
info->screen_size = sizes->surface_height * fb->pitches[0];
info->screen_buffer = map.vaddr;
if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
}
info->fix.smem_len = info->screen_size;
/*
* Only set up deferred I/O if the screen buffer supports
* it. If this disagrees with the previous test for ->dirty,
* mmap on the /dev/fb file might not work correctly.
*/
if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
use_deferred_io = false;
}
/* deferred I/O */
if (use_deferred_io) {
fb_helper->fbdefio.delay = HZ / 20;
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
info->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(info);
if (ret)
goto err_drm_fb_helper_release_info;
}
ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
if (ret)
goto err_drm_fb_helper_release_info;
return 0;

View File

@ -34,6 +34,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
*sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
struct drm_gpu_scheduler *sched = sched_job->sched;
struct etnaviv_gpu *gpu = submit->gpu;
u32 dma_addr;
int change;
@ -76,7 +77,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
return DRM_GPU_SCHED_STAT_NOMINAL;
out_no_timeout:
list_add(&sched_job->list, &sched_job->sched->pending_list);
spin_lock(&sched->job_list_lock);
list_add(&sched_job->list, &sched->pending_list);
spin_unlock(&sched->job_list_lock);
return DRM_GPU_SCHED_STAT_NOMINAL;
}

View File

@ -1059,7 +1059,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
BXT_MIPI_TRANS_VACTIVE(port));
adjusted_mode->crtc_vtotal =
intel_de_read(display,
BXT_MIPI_TRANS_VTOTAL(port));
BXT_MIPI_TRANS_VTOTAL(port)) + 1;
hactive = adjusted_mode->crtc_hdisplay;
hfp = intel_de_read(display, MIPI_HFP_COUNT(display, port));
@ -1264,7 +1264,7 @@ static void set_dsi_timings(struct intel_encoder *encoder,
intel_de_write(display, BXT_MIPI_TRANS_VACTIVE(port),
adjusted_mode->crtc_vdisplay);
intel_de_write(display, BXT_MIPI_TRANS_VTOTAL(port),
adjusted_mode->crtc_vtotal);
adjusted_mode->crtc_vtotal - 1);
}
intel_de_write(display, MIPI_HACTIVE_AREA_COUNT(display, port),

View File

@ -111,7 +111,7 @@ static unsigned int config_bit(const u64 config)
return other_bit(config);
}
static u32 config_mask(const u64 config)
static __always_inline u32 config_mask(const u64 config)
{
unsigned int bit = config_bit(config);

View File

@ -928,16 +928,17 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
return -EINVAL;
}
if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
return MODE_CLOCK_HIGH;
dp_display = container_of(dp, struct dp_display_private, dp_display);
link_info = &dp_display->panel->link_info;
if (drm_mode_is_420_only(&dp->connector->display_info, mode) &&
dp_display->panel->vsc_sdp_supported)
if ((drm_mode_is_420_only(&dp->connector->display_info, mode) &&
dp_display->panel->vsc_sdp_supported) ||
msm_dp_wide_bus_available(dp))
mode_pclk_khz /= 2;
if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ)
return MODE_CLOCK_HIGH;
mode_bpp = dp->connector->display_info.bpc * num_components;
if (!mode_bpp)
mode_bpp = default_bpp;

View File

@ -257,7 +257,10 @@ static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge,
return -EINVAL;
}
if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
if (msm_dp_wide_bus_available(dp))
mode_pclk_khz /= 2;
if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ)
return MODE_CLOCK_HIGH;
/*

View File

@ -156,6 +156,7 @@ void msm_devfreq_init(struct msm_gpu *gpu)
priv->gpu_devfreq_config.downdifferential = 10;
mutex_init(&df->lock);
df->suspended = true;
ret = dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
DEV_PM_QOS_MIN_FREQUENCY, 0);

View File

@ -189,6 +189,7 @@ static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
{
struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
drm_sched_fence_scheduled(job->s_fence, NULL);
drm_sched_fence_finished(job->s_fence, -ESRCH);
WARN_ON(job->s_fence->parent);
job->sched->ops->free_job(job);

View File

@ -1320,10 +1320,16 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
if (wgrp->dc == dc->pipe) {
for (j = 0; j < wgrp->num_windows; j++) {
unsigned int index = wgrp->windows[j];
enum drm_plane_type type;
if (primary)
type = DRM_PLANE_TYPE_OVERLAY;
else
type = DRM_PLANE_TYPE_PRIMARY;
plane = tegra_shared_plane_create(drm, dc,
wgrp->index,
index);
index, type);
if (IS_ERR(plane))
return plane;
@ -1331,10 +1337,8 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
* Choose the first shared plane owned by this
* head as the primary plane.
*/
if (!primary) {
plane->type = DRM_PLANE_TYPE_PRIMARY;
if (!primary)
primary = plane;
}
}
}
}
@ -1388,7 +1392,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
tegra_crtc_atomic_destroy_state(crtc, crtc->state);
__drm_atomic_helper_crtc_reset(crtc, &state->base);
if (state)
__drm_atomic_helper_crtc_reset(crtc, &state->base);
else
__drm_atomic_helper_crtc_reset(crtc, NULL);
}
static struct drm_crtc_state *

View File

@ -755,9 +755,9 @@ static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int wgrp,
unsigned int index)
unsigned int index,
enum drm_plane_type type)
{
enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
struct tegra_drm *tegra = drm->dev_private;
struct tegra_display_hub *hub = tegra->hub;
struct tegra_shared_plane *plane;

View File

@ -80,7 +80,8 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub);
struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int wgrp,
unsigned int index);
unsigned int index,
enum drm_plane_type type);
int tegra_display_hub_atomic_check(struct drm_device *drm,
struct drm_atomic_state *state);

View File

@ -318,7 +318,6 @@ static void cirrus_pitch_set(struct cirrus_device *cirrus, unsigned int pitch)
/* Enable extended blanking and pitch bits, and enable full memory */
cr1b = 0x22;
cr1b |= (pitch >> 7) & 0x10;
cr1b |= (pitch >> 6) & 0x40;
wreg_crt(cirrus, 0x1b, cr1b);
cirrus_set_start_address(cirrus, 0);

View File

@ -126,9 +126,9 @@ static void udl_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
drm_dev_unplug(dev);
drm_kms_helper_poll_fini(dev);
udl_drop_usb(dev);
drm_dev_unplug(dev);
}
/*

View File

@ -96,6 +96,8 @@ int xe_display_create(struct xe_device *xe)
spin_lock_init(&xe->display.fb_tracking.lock);
xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
if (!xe->display.hotplug.dp_wq)
return -ENOMEM;
return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
}

View File

@ -198,6 +198,13 @@ static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
.ggtt_set_pte = xe_ggtt_set_pte_and_flush,
};
static void dev_fini_ggtt(void *arg)
{
struct xe_ggtt *ggtt = arg;
drain_workqueue(ggtt->wq);
}
/**
* xe_ggtt_init_early - Early GGTT initialization
* @ggtt: the &xe_ggtt to be initialized
@ -254,6 +261,10 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
if (err)
return err;
err = devm_add_action_or_reset(xe->drm.dev, dev_fini_ggtt, ggtt);
if (err)
return err;
if (IS_SRIOV_VF(xe)) {
err = xe_gt_sriov_vf_prepare_ggtt(xe_tile_get_gt(ggtt->tile, 0));
if (err)

View File

@ -51,7 +51,15 @@ static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
{
drm_sched_resubmit_jobs(&sched->base);
struct drm_sched_job *s_job;
list_for_each_entry(s_job, &sched->base.pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *hw_fence = s_fence->parent;
if (hw_fence && !dma_fence_is_signaled(hw_fence))
sched->base.ops->run_job(s_job);
}
}
static inline bool

View File

@ -137,6 +137,14 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
struct xe_gt_tlb_invalidation_fence *fence, *next;
int pending_seqno;
/*
* we can get here before the CTs are even initialized if we're wedging
* very early, in which case there are not going to be any pending
* fences so we can bail immediately.
*/
if (!xe_guc_ct_initialized(&gt->uc.guc.ct))
return;
/*
* CT channel is already disabled at this point. No new TLB requests can
* appear.

View File

@ -454,6 +454,9 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct)
*/
void xe_guc_ct_stop(struct xe_guc_ct *ct)
{
if (!xe_guc_ct_initialized(ct))
return;
xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
stop_g2h_handler(ct);
}
@ -638,7 +641,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u16 seqno;
int ret;
xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
xe_gt_assert(gt, xe_guc_ct_initialized(ct));
xe_gt_assert(gt, !g2h_len || !g2h_fence);
xe_gt_assert(gt, !num_g2h || !g2h_fence);
xe_gt_assert(gt, !g2h_len || num_g2h);
@ -1209,7 +1212,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
u32 action;
u32 *hxg;
xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
xe_gt_assert(gt, xe_guc_ct_initialized(ct));
lockdep_assert_held(&ct->fast_lock);
if (ct->state == XE_GUC_CT_STATE_DISABLED)

View File

@ -23,6 +23,11 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic);
static inline bool xe_guc_ct_initialized(struct xe_guc_ct *ct)
{
return ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED;
}
static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct)
{
return ct->state == XE_GUC_CT_STATE_ENABLED;

View File

@ -975,7 +975,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
goto out;
}
memset(pc->bo->vmap.vaddr, 0, size);
xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
slpc_shared_data_write(pc, header.size, size);
ret = pc_action_reset(pc);

View File

@ -227,6 +227,17 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
static void guc_submit_fini(struct drm_device *drm, void *arg)
{
struct xe_guc *guc = arg;
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
int ret;
ret = wait_event_timeout(guc->submission_state.fini_wq,
xa_empty(&guc->submission_state.exec_queue_lookup),
HZ * 5);
drain_workqueue(xe->destroy_wq);
xe_gt_assert(gt, ret);
xa_destroy(&guc->submission_state.exec_queue_lookup);
}
@ -298,6 +309,8 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
primelockdep(guc);
guc->submission_state.initialized = true;
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
}
@ -826,6 +839,13 @@ void xe_guc_submit_wedge(struct xe_guc *guc)
xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
/*
* If device is being wedged even before submission_state is
* initialized, there's nothing to do here.
*/
if (!guc->submission_state.initialized)
return;
err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
guc_submit_wedged_fini, guc);
if (err) {
@ -1702,6 +1722,9 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc)
{
int ret;
if (!guc->submission_state.initialized)
return 0;
/*
* Using an atomic here rather than submission_state.lock as this
* function can be called while holding the CT lock (engine reset

View File

@ -74,6 +74,11 @@ struct xe_guc {
struct mutex lock;
/** @submission_state.enabled: submission is enabled */
bool enabled;
/**
* @submission_state.initialized: mark when submission state is
* even initialized - before that not even the lock is valid
*/
bool initialized;
/** @submission_state.fini_wq: submit fini wait queue */
wait_queue_head_t fini_wq;
} submission_state;

View File

@ -57,38 +57,6 @@ bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe)
return GRAPHICS_VERx100(xe) < 1270 && !IS_DGFX(xe);
}
static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_gt *mmio = xe_root_mmio_gt(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
u64 stolen_size;
u64 tile_offset;
u64 tile_size;
tile_offset = tile->mem.vram.io_start - xe->mem.vram.io_start;
tile_size = tile->mem.vram.actual_physical_size;
/* Use DSM base address instead for stolen memory */
mgr->stolen_base = (xe_mmio_read64_2x32(mmio, DSMBASE) & BDSM_MASK) - tile_offset;
if (drm_WARN_ON(&xe->drm, tile_size < mgr->stolen_base))
return 0;
stolen_size = tile_size - mgr->stolen_base;
/* Verify usage fits in the actual resource available */
if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, LMEM_BAR))
mgr->io_base = tile->mem.vram.io_start + mgr->stolen_base;
/*
* There may be few KB of platform dependent reserved memory at the end
* of vram which is not part of the DSM. Such reserved memory portion is
* always less then DSM granularity so align down the stolen_size to DSM
* granularity to accommodate such reserve vram portion.
*/
return ALIGN_DOWN(stolen_size, SZ_1M);
}
static u32 get_wopcm_size(struct xe_device *xe)
{
u32 wopcm_size;
@ -112,6 +80,44 @@ static u32 get_wopcm_size(struct xe_device *xe)
return wopcm_size;
}
static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_gt *mmio = xe_root_mmio_gt(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
u64 stolen_size, wopcm_size;
u64 tile_offset;
u64 tile_size;
tile_offset = tile->mem.vram.io_start - xe->mem.vram.io_start;
tile_size = tile->mem.vram.actual_physical_size;
/* Use DSM base address instead for stolen memory */
mgr->stolen_base = (xe_mmio_read64_2x32(mmio, DSMBASE) & BDSM_MASK) - tile_offset;
if (drm_WARN_ON(&xe->drm, tile_size < mgr->stolen_base))
return 0;
/* Carve out the top of DSM as it contains the reserved WOPCM region */
wopcm_size = get_wopcm_size(xe);
if (drm_WARN_ON(&xe->drm, !wopcm_size))
return 0;
stolen_size = tile_size - mgr->stolen_base;
stolen_size -= wopcm_size;
/* Verify usage fits in the actual resource available */
if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, LMEM_BAR))
mgr->io_base = tile->mem.vram.io_start + mgr->stolen_base;
/*
* There may be few KB of platform dependent reserved memory at the end
* of vram which is not part of the DSM. Such reserved memory portion is
* always less then DSM granularity so align down the stolen_size to DSM
* granularity to accommodate such reserve vram portion.
*/
return ALIGN_DOWN(stolen_size, SZ_1M);
}
static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);

View File

@ -1477,8 +1477,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
* scheduler drops all the references of it, hence protecting the VM
* for this case is necessary.
*/
if (flags & XE_VM_FLAG_LR_MODE)
if (flags & XE_VM_FLAG_LR_MODE) {
INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
xe_pm_runtime_get_noresume(xe);
}
vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
if (!vm_resv_obj) {
@ -1523,10 +1525,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->batch_invalidate_tlb = true;
}
if (vm->flags & XE_VM_FLAG_LR_MODE) {
INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
if (vm->flags & XE_VM_FLAG_LR_MODE)
vm->batch_invalidate_tlb = false;
}
/* Fill pt_root after allocating scratch tables */
for_each_tile(tile, xe, id) {

View File

@ -529,11 +529,14 @@ static void lenovo_features_set_cptkbd(struct hid_device *hdev)
/*
* Tell the keyboard a driver understands it, and turn F7, F9, F11 into
* regular keys
* regular keys (Compact only)
*/
ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
if (ret)
hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD ||
hdev->product == USB_DEVICE_ID_LENOVO_CBTKBD) {
ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
if (ret)
hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
}
/* Switch middle button to native mode */
ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);

View File

@ -2021,14 +2021,18 @@ static int wacom_initialize_remotes(struct wacom *wacom)
remote->remote_dir = kobject_create_and_add("wacom_remote",
&wacom->hdev->dev.kobj);
if (!remote->remote_dir)
if (!remote->remote_dir) {
kfifo_free(&remote->remote_fifo);
return -ENOMEM;
}
error = sysfs_create_files(remote->remote_dir, remote_unpair_attrs);
if (error) {
hid_err(wacom->hdev,
"cannot create sysfs group err: %d\n", error);
kfifo_free(&remote->remote_fifo);
kobject_put(remote->remote_dir);
return error;
}
@ -2874,6 +2878,7 @@ static void wacom_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
cancel_delayed_work_sync(&wacom->init_work);
cancel_delayed_work_sync(&wacom->aes_battery_work);
cancel_work_sync(&wacom->wireless_work);
cancel_work_sync(&wacom->battery_work);
cancel_work_sync(&wacom->remote_work);

View File

@ -34,16 +34,21 @@ enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
/*
* The whole max344* family have IOUT_OC_WARN_LIMIT and IOUT_OC_FAULT_LIMIT
* swapped from the standard pmbus spec addresses.
* For max34451, version MAX34451ETNA6+ and later has this issue fixed.
*/
#define MAX34440_IOUT_OC_WARN_LIMIT 0x46
#define MAX34440_IOUT_OC_FAULT_LIMIT 0x4A
#define MAX34451ETNA6_MFR_REV 0x0012
#define MAX34451_MFR_CHANNEL_CONFIG 0xe4
#define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK 0x3f
struct max34440_data {
int id;
struct pmbus_driver_info info;
u8 iout_oc_warn_limit;
u8 iout_oc_fault_limit;
};
#define to_max34440_data(x) container_of(x, struct max34440_data, info)
@ -60,11 +65,11 @@ static int max34440_read_word_data(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_IOUT_OC_FAULT_LIMIT:
ret = pmbus_read_word_data(client, page, phase,
MAX34440_IOUT_OC_FAULT_LIMIT);
data->iout_oc_fault_limit);
break;
case PMBUS_IOUT_OC_WARN_LIMIT:
ret = pmbus_read_word_data(client, page, phase,
MAX34440_IOUT_OC_WARN_LIMIT);
data->iout_oc_warn_limit);
break;
case PMBUS_VIRT_READ_VOUT_MIN:
ret = pmbus_read_word_data(client, page, phase,
@ -133,11 +138,11 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_IOUT_OC_FAULT_LIMIT:
ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_FAULT_LIMIT,
ret = pmbus_write_word_data(client, page, data->iout_oc_fault_limit,
word);
break;
case PMBUS_IOUT_OC_WARN_LIMIT:
ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_WARN_LIMIT,
ret = pmbus_write_word_data(client, page, data->iout_oc_warn_limit,
word);
break;
case PMBUS_VIRT_RESET_POUT_HISTORY:
@ -235,6 +240,25 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
*/
int page, rv;
bool max34451_na6 = false;
rv = i2c_smbus_read_word_data(client, PMBUS_MFR_REVISION);
if (rv < 0)
return rv;
if (rv >= MAX34451ETNA6_MFR_REV) {
max34451_na6 = true;
data->info.format[PSC_VOLTAGE_IN] = direct;
data->info.format[PSC_CURRENT_IN] = direct;
data->info.m[PSC_VOLTAGE_IN] = 1;
data->info.b[PSC_VOLTAGE_IN] = 0;
data->info.R[PSC_VOLTAGE_IN] = 3;
data->info.m[PSC_CURRENT_IN] = 1;
data->info.b[PSC_CURRENT_IN] = 0;
data->info.R[PSC_CURRENT_IN] = 2;
data->iout_oc_fault_limit = PMBUS_IOUT_OC_FAULT_LIMIT;
data->iout_oc_warn_limit = PMBUS_IOUT_OC_WARN_LIMIT;
}
for (page = 0; page < 16; page++) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
@ -251,16 +275,30 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
case 0x20:
data->info.func[page] = PMBUS_HAVE_VOUT |
PMBUS_HAVE_STATUS_VOUT;
if (max34451_na6)
data->info.func[page] |= PMBUS_HAVE_VIN |
PMBUS_HAVE_STATUS_INPUT;
break;
case 0x21:
data->info.func[page] = PMBUS_HAVE_VOUT;
if (max34451_na6)
data->info.func[page] |= PMBUS_HAVE_VIN;
break;
case 0x22:
data->info.func[page] = PMBUS_HAVE_IOUT |
PMBUS_HAVE_STATUS_IOUT;
if (max34451_na6)
data->info.func[page] |= PMBUS_HAVE_IIN |
PMBUS_HAVE_STATUS_INPUT;
break;
case 0x23:
data->info.func[page] = PMBUS_HAVE_IOUT;
if (max34451_na6)
data->info.func[page] |= PMBUS_HAVE_IIN;
break;
default:
break;
@ -494,6 +532,8 @@ static int max34440_probe(struct i2c_client *client)
return -ENOMEM;
data->id = i2c_match_id(max34440_id, client)->driver_data;
data->info = max34440_info[data->id];
data->iout_oc_fault_limit = MAX34440_IOUT_OC_FAULT_LIMIT;
data->iout_oc_warn_limit = MAX34440_IOUT_OC_WARN_LIMIT;
if (data->id == max34451) {
rv = max34451_set_supported_funcs(client, data);

View File

@ -97,7 +97,8 @@ coresight_find_out_connection(struct coresight_device *src_dev,
static inline u32 coresight_read_claim_tags(struct coresight_device *csdev)
{
return csdev_access_relaxed_read32(&csdev->access, CORESIGHT_CLAIMCLR);
return FIELD_GET(CORESIGHT_CLAIM_MASK,
csdev_access_relaxed_read32(&csdev->access, CORESIGHT_CLAIMCLR));
}
static inline bool coresight_is_claimed_self_hosted(struct coresight_device *csdev)

View File

@ -35,6 +35,7 @@ extern const struct device_type coresight_dev_type[];
* Coresight device CLAIM protocol.
* See PSCI - ARM DEN 0022D, Section: 6.8.1 Debug and Trace save and restore.
*/
#define CORESIGHT_CLAIM_MASK GENMASK(1, 0)
#define CORESIGHT_CLAIM_SELF_HOSTED BIT(1)
#define TIMEOUT_US 100

View File

@ -111,6 +111,11 @@ static u32 osif_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
/* prevent invalid 0-length usb_control_msg */
static const struct i2c_adapter_quirks osif_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN_READ,
};
static const struct i2c_algorithm osif_algorithm = {
.xfer = osif_xfer,
.functionality = osif_func,
@ -143,6 +148,7 @@ static int osif_probe(struct usb_interface *interface,
priv->adapter.owner = THIS_MODULE;
priv->adapter.class = I2C_CLASS_HWMON;
priv->adapter.quirks = &osif_quirks;
priv->adapter.algo = &osif_algorithm;
priv->adapter.algo_data = priv;
snprintf(priv->adapter.name, sizeof(priv->adapter.name),

View File

@ -138,6 +138,11 @@ out:
return ret;
}
/* prevent invalid 0-length usb_control_msg */
static const struct i2c_adapter_quirks usb_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN_READ,
};
/* This is the actual algorithm we define */
static const struct i2c_algorithm usb_algorithm = {
.xfer = usb_xfer,
@ -246,6 +251,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
/* setup i2c adapter description */
dev->adapter.owner = THIS_MODULE;
dev->adapter.class = I2C_CLASS_HWMON;
dev->adapter.quirks = &usb_quirks;
dev->adapter.algo = &usb_algorithm;
dev->adapter.algo_data = dev;
snprintf(dev->adapter.name, sizeof(dev->adapter.name),

View File

@ -477,6 +477,10 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
* byte set to zero. */
ad_sd_read_reg_raw(sigma_delta, data_reg, transfer_size, &data[1]);
break;
default:
dev_err_ratelimited(&indio_dev->dev, "Unsupported reg_size: %u\n", reg_size);
goto irq_handled;
}
/*

View File

@ -4,7 +4,7 @@
#
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AD3552R) += ad3552r.o
obj-$(CONFIG_AD3552R) += ad3552r.o ad3552r-common.o
obj-$(CONFIG_AD5360) += ad5360.o
obj-$(CONFIG_AD5380) += ad5380.o
obj-$(CONFIG_AD5421) += ad5421.o

View File

@ -0,0 +1,248 @@
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (c) 2010-2024 Analog Devices Inc.
// Copyright (c) 2024 Baylibre, SAS
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include "ad3552r.h"
const s32 ad3552r_ch_ranges[AD3552R_MAX_RANGES][2] = {
[AD3552R_CH_OUTPUT_RANGE_0__2P5V] = { 0, 2500 },
[AD3552R_CH_OUTPUT_RANGE_0__5V] = { 0, 5000 },
[AD3552R_CH_OUTPUT_RANGE_0__10V] = { 0, 10000 },
[AD3552R_CH_OUTPUT_RANGE_NEG_5__5V] = { -5000, 5000 },
[AD3552R_CH_OUTPUT_RANGE_NEG_10__10V] = { -10000, 10000 }
};
EXPORT_SYMBOL_NS_GPL(ad3552r_ch_ranges, IIO_AD3552R);
const s32 ad3542r_ch_ranges[AD3542R_MAX_RANGES][2] = {
[AD3542R_CH_OUTPUT_RANGE_0__2P5V] = { 0, 2500 },
[AD3542R_CH_OUTPUT_RANGE_0__5V] = { 0, 5000 },
[AD3542R_CH_OUTPUT_RANGE_0__10V] = { 0, 10000 },
[AD3542R_CH_OUTPUT_RANGE_NEG_5__5V] = { -5000, 5000 },
[AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V] = { -2500, 7500 }
};
EXPORT_SYMBOL_NS_GPL(ad3542r_ch_ranges, IIO_AD3552R);
/* Gain * AD3552R_GAIN_SCALE */
static const s32 gains_scaling_table[] = {
[AD3552R_CH_GAIN_SCALING_1] = 1000,
[AD3552R_CH_GAIN_SCALING_0_5] = 500,
[AD3552R_CH_GAIN_SCALING_0_25] = 250,
[AD3552R_CH_GAIN_SCALING_0_125] = 125
};
u16 ad3552r_calc_custom_gain(u8 p, u8 n, s16 goffs)
{
return FIELD_PREP(AD3552R_MASK_CH_RANGE_OVERRIDE, 1) |
FIELD_PREP(AD3552R_MASK_CH_GAIN_SCALING_P, p) |
FIELD_PREP(AD3552R_MASK_CH_GAIN_SCALING_N, n) |
FIELD_PREP(AD3552R_MASK_CH_OFFSET_BIT_8, abs(goffs)) |
FIELD_PREP(AD3552R_MASK_CH_OFFSET_POLARITY, goffs < 0);
}
EXPORT_SYMBOL_NS_GPL(ad3552r_calc_custom_gain, IIO_AD3552R);
static void ad3552r_get_custom_range(struct ad3552r_ch_data *ch_data,
s32 *v_min, s32 *v_max)
{
s64 vref, tmp, common, offset, gn, gp;
/*
* From datasheet formula (In Volts):
* Vmin = 2.5 + [(GainN + Offset / 1024) * 2.5 * Rfb * 1.03]
* Vmax = 2.5 - [(GainP + Offset / 1024) * 2.5 * Rfb * 1.03]
* Calculus are converted to milivolts
*/
vref = 2500;
/* 2.5 * 1.03 * 1000 (To mV) */
common = 2575 * ch_data->rfb;
offset = ch_data->gain_offset;
gn = gains_scaling_table[ch_data->n];
tmp = (1024 * gn + AD3552R_GAIN_SCALE * offset) * common;
tmp = div_s64(tmp, 1024 * AD3552R_GAIN_SCALE);
*v_max = vref + tmp;
gp = gains_scaling_table[ch_data->p];
tmp = (1024 * gp - AD3552R_GAIN_SCALE * offset) * common;
tmp = div_s64(tmp, 1024 * AD3552R_GAIN_SCALE);
*v_min = vref - tmp;
}
void ad3552r_calc_gain_and_offset(struct ad3552r_ch_data *ch_data,
const struct ad3552r_model_data *model_data)
{
s32 idx, v_max, v_min, span, rem;
s64 tmp;
if (ch_data->range_override) {
ad3552r_get_custom_range(ch_data, &v_min, &v_max);
} else {
/* Normal range */
idx = ch_data->range;
v_min = model_data->ranges_table[idx][0];
v_max = model_data->ranges_table[idx][1];
}
/*
* From datasheet formula:
* Vout = Span * (D / 65536) + Vmin
* Converted to scale and offset:
* Scale = Span / 65536
* Offset = 65536 * Vmin / Span
*
* Reminders are in micros in order to be printed as
* IIO_VAL_INT_PLUS_MICRO
*/
span = v_max - v_min;
ch_data->scale_int = div_s64_rem(span, 65536, &rem);
/* Do operations in microvolts */
ch_data->scale_dec = DIV_ROUND_CLOSEST((s64)rem * 1000000, 65536);
ch_data->offset_int = div_s64_rem(v_min * 65536, span, &rem);
tmp = (s64)rem * 1000000;
ch_data->offset_dec = div_s64(tmp, span);
}
EXPORT_SYMBOL_NS_GPL(ad3552r_calc_gain_and_offset, IIO_AD3552R);
int ad3552r_get_ref_voltage(struct device *dev, u32 *val)
{
int voltage;
int delta = 100000;
voltage = devm_regulator_get_enable_read_voltage(dev, "vref");
if (voltage < 0 && voltage != -ENODEV)
return dev_err_probe(dev, voltage,
"Error getting vref voltage\n");
if (voltage == -ENODEV) {
if (device_property_read_bool(dev, "adi,vref-out-en"))
*val = AD3552R_INTERNAL_VREF_PIN_2P5V;
else
*val = AD3552R_INTERNAL_VREF_PIN_FLOATING;
return 0;
}
if (voltage > 2500000 + delta || voltage < 2500000 - delta) {
dev_warn(dev, "vref-supply must be 2.5V");
return -EINVAL;
}
*val = AD3552R_EXTERNAL_VREF_PIN_INPUT;
return 0;
}
EXPORT_SYMBOL_NS_GPL(ad3552r_get_ref_voltage, IIO_AD3552R);
int ad3552r_get_drive_strength(struct device *dev, u32 *val)
{
int err;
u32 drive_strength;
err = device_property_read_u32(dev, "adi,sdo-drive-strength",
&drive_strength);
if (err)
return err;
if (drive_strength > 3) {
dev_err_probe(dev, -EINVAL,
"adi,sdo-drive-strength must be less than 4\n");
return -EINVAL;
}
*val = drive_strength;
return 0;
}
EXPORT_SYMBOL_NS_GPL(ad3552r_get_drive_strength, IIO_AD3552R);
int ad3552r_get_custom_gain(struct device *dev, struct fwnode_handle *child,
u8 *gs_p, u8 *gs_n, u16 *rfb, s16 *goffs)
{
int err;
u32 val;
struct fwnode_handle *gain_child __free(fwnode_handle) =
fwnode_get_named_child_node(child,
"custom-output-range-config");
if (!gain_child)
return dev_err_probe(dev, -EINVAL,
"custom-output-range-config mandatory\n");
err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-p", &val);
if (err)
return dev_err_probe(dev, err,
"adi,gain-scaling-p mandatory\n");
*gs_p = val;
err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-n", &val);
if (err)
return dev_err_probe(dev, err,
"adi,gain-scaling-n property mandatory\n");
*gs_n = val;
err = fwnode_property_read_u32(gain_child, "adi,rfb-ohms", &val);
if (err)
return dev_err_probe(dev, err,
"adi,rfb-ohms mandatory\n");
*rfb = val;
err = fwnode_property_read_u32(gain_child, "adi,gain-offset", &val);
if (err)
return dev_err_probe(dev, err,
"adi,gain-offset mandatory\n");
*goffs = val;
return 0;
}
EXPORT_SYMBOL_NS_GPL(ad3552r_get_custom_gain, IIO_AD3552R);
static int ad3552r_find_range(const struct ad3552r_model_data *model_info,
s32 *vals)
{
int i;
for (i = 0; i < model_info->num_ranges; i++)
if (vals[0] == model_info->ranges_table[i][0] * 1000 &&
vals[1] == model_info->ranges_table[i][1] * 1000)
return i;
return -EINVAL;
}
int ad3552r_get_output_range(struct device *dev,
const struct ad3552r_model_data *model_info,
struct fwnode_handle *child, u32 *val)
{
int ret;
s32 vals[2];
/* This property is optional, so returning -ENOENT if missing */
if (!fwnode_property_present(child, "adi,output-range-microvolt"))
return -ENOENT;
ret = fwnode_property_read_u32_array(child,
"adi,output-range-microvolt",
vals, 2);
if (ret)
return dev_err_probe(dev, ret,
"invalid adi,output-range-microvolt\n");
ret = ad3552r_find_range(model_info, vals);
if (ret < 0)
return dev_err_probe(dev, ret,
"invalid adi,output-range-microvolt value\n");
*val = ret;
return 0;
}
EXPORT_SYMBOL_NS_GPL(ad3552r_get_output_range, IIO_AD3552R);
MODULE_DESCRIPTION("ad3552r common functions");
MODULE_LICENSE("GPL");

View File

@ -6,271 +6,15 @@
* Copyright 2021 Analog Devices Inc.
*/
#include <linux/unaligned.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
/* Register addresses */
/* Primary address space */
#define AD3552R_REG_ADDR_INTERFACE_CONFIG_A 0x00
#define AD3552R_MASK_SOFTWARE_RESET (BIT(7) | BIT(0))
#define AD3552R_MASK_ADDR_ASCENSION BIT(5)
#define AD3552R_MASK_SDO_ACTIVE BIT(4)
#define AD3552R_REG_ADDR_INTERFACE_CONFIG_B 0x01
#define AD3552R_MASK_SINGLE_INST BIT(7)
#define AD3552R_MASK_SHORT_INSTRUCTION BIT(3)
#define AD3552R_REG_ADDR_DEVICE_CONFIG 0x02
#define AD3552R_MASK_DEVICE_STATUS(n) BIT(4 + (n))
#define AD3552R_MASK_CUSTOM_MODES GENMASK(3, 2)
#define AD3552R_MASK_OPERATING_MODES GENMASK(1, 0)
#define AD3552R_REG_ADDR_CHIP_TYPE 0x03
#define AD3552R_MASK_CLASS GENMASK(7, 0)
#define AD3552R_REG_ADDR_PRODUCT_ID_L 0x04
#define AD3552R_REG_ADDR_PRODUCT_ID_H 0x05
#define AD3552R_REG_ADDR_CHIP_GRADE 0x06
#define AD3552R_MASK_GRADE GENMASK(7, 4)
#define AD3552R_MASK_DEVICE_REVISION GENMASK(3, 0)
#define AD3552R_REG_ADDR_SCRATCH_PAD 0x0A
#define AD3552R_REG_ADDR_SPI_REVISION 0x0B
#define AD3552R_REG_ADDR_VENDOR_L 0x0C
#define AD3552R_REG_ADDR_VENDOR_H 0x0D
#define AD3552R_REG_ADDR_STREAM_MODE 0x0E
#define AD3552R_MASK_LENGTH GENMASK(7, 0)
#define AD3552R_REG_ADDR_TRANSFER_REGISTER 0x0F
#define AD3552R_MASK_MULTI_IO_MODE GENMASK(7, 6)
#define AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE BIT(2)
#define AD3552R_REG_ADDR_INTERFACE_CONFIG_C 0x10
#define AD3552R_MASK_CRC_ENABLE (GENMASK(7, 6) |\
GENMASK(1, 0))
#define AD3552R_MASK_STRICT_REGISTER_ACCESS BIT(5)
#define AD3552R_REG_ADDR_INTERFACE_STATUS_A 0x11
#define AD3552R_MASK_INTERFACE_NOT_READY BIT(7)
#define AD3552R_MASK_CLOCK_COUNTING_ERROR BIT(5)
#define AD3552R_MASK_INVALID_OR_NO_CRC BIT(3)
#define AD3552R_MASK_WRITE_TO_READ_ONLY_REGISTER BIT(2)
#define AD3552R_MASK_PARTIAL_REGISTER_ACCESS BIT(1)
#define AD3552R_MASK_REGISTER_ADDRESS_INVALID BIT(0)
#define AD3552R_REG_ADDR_INTERFACE_CONFIG_D 0x14
#define AD3552R_MASK_ALERT_ENABLE_PULLUP BIT(6)
#define AD3552R_MASK_MEM_CRC_EN BIT(4)
#define AD3552R_MASK_SDO_DRIVE_STRENGTH GENMASK(3, 2)
#define AD3552R_MASK_DUAL_SPI_SYNCHROUNOUS_EN BIT(1)
#define AD3552R_MASK_SPI_CONFIG_DDR BIT(0)
#define AD3552R_REG_ADDR_SH_REFERENCE_CONFIG 0x15
#define AD3552R_MASK_IDUMP_FAST_MODE BIT(6)
#define AD3552R_MASK_SAMPLE_HOLD_DIFFERENTIAL_USER_EN BIT(5)
#define AD3552R_MASK_SAMPLE_HOLD_USER_TRIM GENMASK(4, 3)
#define AD3552R_MASK_SAMPLE_HOLD_USER_ENABLE BIT(2)
#define AD3552R_MASK_REFERENCE_VOLTAGE_SEL GENMASK(1, 0)
#define AD3552R_REG_ADDR_ERR_ALARM_MASK 0x16
#define AD3552R_MASK_REF_RANGE_ALARM BIT(6)
#define AD3552R_MASK_CLOCK_COUNT_ERR_ALARM BIT(5)
#define AD3552R_MASK_MEM_CRC_ERR_ALARM BIT(4)
#define AD3552R_MASK_SPI_CRC_ERR_ALARM BIT(3)
#define AD3552R_MASK_WRITE_TO_READ_ONLY_ALARM BIT(2)
#define AD3552R_MASK_PARTIAL_REGISTER_ACCESS_ALARM BIT(1)
#define AD3552R_MASK_REGISTER_ADDRESS_INVALID_ALARM BIT(0)
#define AD3552R_REG_ADDR_ERR_STATUS 0x17
#define AD3552R_MASK_REF_RANGE_ERR_STATUS BIT(6)
#define AD3552R_MASK_DUAL_SPI_STREAM_EXCEEDS_DAC_ERR_STATUS BIT(5)
#define AD3552R_MASK_MEM_CRC_ERR_STATUS BIT(4)
#define AD3552R_MASK_RESET_STATUS BIT(0)
#define AD3552R_REG_ADDR_POWERDOWN_CONFIG 0x18
#define AD3552R_MASK_CH_DAC_POWERDOWN(ch) BIT(4 + (ch))
#define AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(ch) BIT(ch)
#define AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE 0x19
#define AD3552R_MASK_CH_OUTPUT_RANGE_SEL(ch) ((ch) ? GENMASK(7, 4) :\
GENMASK(3, 0))
#define AD3552R_REG_ADDR_CH_OFFSET(ch) (0x1B + (ch) * 2)
#define AD3552R_MASK_CH_OFFSET_BITS_0_7 GENMASK(7, 0)
#define AD3552R_REG_ADDR_CH_GAIN(ch) (0x1C + (ch) * 2)
#define AD3552R_MASK_CH_RANGE_OVERRIDE BIT(7)
#define AD3552R_MASK_CH_GAIN_SCALING_N GENMASK(6, 5)
#define AD3552R_MASK_CH_GAIN_SCALING_P GENMASK(4, 3)
#define AD3552R_MASK_CH_OFFSET_POLARITY BIT(2)
#define AD3552R_MASK_CH_OFFSET_BIT_8 BIT(0)
/*
* Secondary region
* For multibyte registers specify the highest address because the access is
* done in descending order
*/
#define AD3552R_SECONDARY_REGION_START 0x28
#define AD3552R_REG_ADDR_HW_LDAC_16B 0x28
#define AD3552R_REG_ADDR_CH_DAC_16B(ch) (0x2C - (1 - ch) * 2)
#define AD3552R_REG_ADDR_DAC_PAGE_MASK_16B 0x2E
#define AD3552R_REG_ADDR_CH_SELECT_16B 0x2F
#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_16B 0x31
#define AD3552R_REG_ADDR_SW_LDAC_16B 0x32
#define AD3552R_REG_ADDR_CH_INPUT_16B(ch) (0x36 - (1 - ch) * 2)
/* 3 bytes registers */
#define AD3552R_REG_START_24B 0x37
#define AD3552R_REG_ADDR_HW_LDAC_24B 0x37
#define AD3552R_REG_ADDR_CH_DAC_24B(ch) (0x3D - (1 - ch) * 3)
#define AD3552R_REG_ADDR_DAC_PAGE_MASK_24B 0x40
#define AD3552R_REG_ADDR_CH_SELECT_24B 0x41
#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_24B 0x44
#define AD3552R_REG_ADDR_SW_LDAC_24B 0x45
#define AD3552R_REG_ADDR_CH_INPUT_24B(ch) (0x4B - (1 - ch) * 3)
/* Useful defines */
#define AD3552R_MAX_CH 2
#define AD3552R_MASK_CH(ch) BIT(ch)
#define AD3552R_MASK_ALL_CH GENMASK(1, 0)
#define AD3552R_MAX_REG_SIZE 3
#define AD3552R_READ_BIT BIT(7)
#define AD3552R_ADDR_MASK GENMASK(6, 0)
#define AD3552R_MASK_DAC_12B 0xFFF0
#define AD3552R_DEFAULT_CONFIG_B_VALUE 0x8
#define AD3552R_SCRATCH_PAD_TEST_VAL1 0x34
#define AD3552R_SCRATCH_PAD_TEST_VAL2 0xB2
#define AD3552R_GAIN_SCALE 1000
#define AD3552R_LDAC_PULSE_US 100
enum ad3552r_ch_vref_select {
/* Internal source with Vref I/O floating */
AD3552R_INTERNAL_VREF_PIN_FLOATING,
/* Internal source with Vref I/O at 2.5V */
AD3552R_INTERNAL_VREF_PIN_2P5V,
/* External source with Vref I/O as input */
AD3552R_EXTERNAL_VREF_PIN_INPUT
};
enum ad3552r_id {
AD3541R_ID = 0x400b,
AD3542R_ID = 0x4009,
AD3551R_ID = 0x400a,
AD3552R_ID = 0x4008,
};
enum ad3552r_ch_output_range {
/* Range from 0 V to 2.5 V. Requires Rfb1x connection */
AD3552R_CH_OUTPUT_RANGE_0__2P5V,
/* Range from 0 V to 5 V. Requires Rfb1x connection */
AD3552R_CH_OUTPUT_RANGE_0__5V,
/* Range from 0 V to 10 V. Requires Rfb2x connection */
AD3552R_CH_OUTPUT_RANGE_0__10V,
/* Range from -5 V to 5 V. Requires Rfb2x connection */
AD3552R_CH_OUTPUT_RANGE_NEG_5__5V,
/* Range from -10 V to 10 V. Requires Rfb4x connection */
AD3552R_CH_OUTPUT_RANGE_NEG_10__10V,
};
static const s32 ad3552r_ch_ranges[][2] = {
[AD3552R_CH_OUTPUT_RANGE_0__2P5V] = {0, 2500},
[AD3552R_CH_OUTPUT_RANGE_0__5V] = {0, 5000},
[AD3552R_CH_OUTPUT_RANGE_0__10V] = {0, 10000},
[AD3552R_CH_OUTPUT_RANGE_NEG_5__5V] = {-5000, 5000},
[AD3552R_CH_OUTPUT_RANGE_NEG_10__10V] = {-10000, 10000}
};
enum ad3542r_ch_output_range {
/* Range from 0 V to 2.5 V. Requires Rfb1x connection */
AD3542R_CH_OUTPUT_RANGE_0__2P5V,
/* Range from 0 V to 3 V. Requires Rfb1x connection */
AD3542R_CH_OUTPUT_RANGE_0__3V,
/* Range from 0 V to 5 V. Requires Rfb1x connection */
AD3542R_CH_OUTPUT_RANGE_0__5V,
/* Range from 0 V to 10 V. Requires Rfb2x connection */
AD3542R_CH_OUTPUT_RANGE_0__10V,
/* Range from -2.5 V to 7.5 V. Requires Rfb2x connection */
AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V,
/* Range from -5 V to 5 V. Requires Rfb2x connection */
AD3542R_CH_OUTPUT_RANGE_NEG_5__5V,
};
static const s32 ad3542r_ch_ranges[][2] = {
[AD3542R_CH_OUTPUT_RANGE_0__2P5V] = {0, 2500},
[AD3542R_CH_OUTPUT_RANGE_0__3V] = {0, 3000},
[AD3542R_CH_OUTPUT_RANGE_0__5V] = {0, 5000},
[AD3542R_CH_OUTPUT_RANGE_0__10V] = {0, 10000},
[AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V] = {-2500, 7500},
[AD3542R_CH_OUTPUT_RANGE_NEG_5__5V] = {-5000, 5000}
};
enum ad3552r_ch_gain_scaling {
/* Gain scaling of 1 */
AD3552R_CH_GAIN_SCALING_1,
/* Gain scaling of 0.5 */
AD3552R_CH_GAIN_SCALING_0_5,
/* Gain scaling of 0.25 */
AD3552R_CH_GAIN_SCALING_0_25,
/* Gain scaling of 0.125 */
AD3552R_CH_GAIN_SCALING_0_125,
};
/* Gain * AD3552R_GAIN_SCALE */
static const s32 gains_scaling_table[] = {
[AD3552R_CH_GAIN_SCALING_1] = 1000,
[AD3552R_CH_GAIN_SCALING_0_5] = 500,
[AD3552R_CH_GAIN_SCALING_0_25] = 250,
[AD3552R_CH_GAIN_SCALING_0_125] = 125
};
enum ad3552r_dev_attributes {
/* - Direct register values */
/* From 0-3 */
AD3552R_SDO_DRIVE_STRENGTH,
/*
* 0 -> Internal Vref, vref_io pin floating (default)
* 1 -> Internal Vref, vref_io driven by internal vref
* 2 or 3 -> External Vref
*/
AD3552R_VREF_SELECT,
/* Read registers in ascending order if set. Else descending */
AD3552R_ADDR_ASCENSION,
};
enum ad3552r_ch_attributes {
/* DAC powerdown */
AD3552R_CH_DAC_POWERDOWN,
/* DAC amplifier powerdown */
AD3552R_CH_AMPLIFIER_POWERDOWN,
/* Select the output range. Select from enum ad3552r_ch_output_range */
AD3552R_CH_OUTPUT_RANGE_SEL,
/*
* Over-rider the range selector in order to manually set the output
* voltage range
*/
AD3552R_CH_RANGE_OVERRIDE,
/* Manually set the offset voltage */
AD3552R_CH_GAIN_OFFSET,
/* Sets the polarity of the offset. */
AD3552R_CH_GAIN_OFFSET_POLARITY,
/* PDAC gain scaling */
AD3552R_CH_GAIN_SCALING_P,
/* NDAC gain scaling */
AD3552R_CH_GAIN_SCALING_N,
/* Rfb value */
AD3552R_CH_RFB,
/* Channel select. When set allow Input -> DAC and Mask -> DAC */
AD3552R_CH_SELECT,
};
struct ad3552r_ch_data {
s32 scale_int;
s32 scale_dec;
s32 offset_int;
s32 offset_dec;
s16 gain_offset;
u16 rfb;
u8 n;
u8 p;
u8 range;
bool range_override;
};
struct ad3552r_model_data {
const char *model_name;
enum ad3552r_id chip_id;
unsigned int num_hw_channels;
const s32 (*ranges_table)[2];
int num_ranges;
bool requires_output_range;
};
#include "ad3552r.h"
struct ad3552r_desc {
const struct ad3552r_model_data *model_data;
@ -285,45 +29,6 @@ struct ad3552r_desc {
unsigned int num_ch;
};
static const u16 addr_mask_map[][2] = {
[AD3552R_ADDR_ASCENSION] = {
AD3552R_REG_ADDR_INTERFACE_CONFIG_A,
AD3552R_MASK_ADDR_ASCENSION
},
[AD3552R_SDO_DRIVE_STRENGTH] = {
AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
AD3552R_MASK_SDO_DRIVE_STRENGTH
},
[AD3552R_VREF_SELECT] = {
AD3552R_REG_ADDR_SH_REFERENCE_CONFIG,
AD3552R_MASK_REFERENCE_VOLTAGE_SEL
},
};
/* 0 -> reg addr, 1->ch0 mask, 2->ch1 mask */
static const u16 addr_mask_map_ch[][3] = {
[AD3552R_CH_DAC_POWERDOWN] = {
AD3552R_REG_ADDR_POWERDOWN_CONFIG,
AD3552R_MASK_CH_DAC_POWERDOWN(0),
AD3552R_MASK_CH_DAC_POWERDOWN(1)
},
[AD3552R_CH_AMPLIFIER_POWERDOWN] = {
AD3552R_REG_ADDR_POWERDOWN_CONFIG,
AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(0),
AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(1)
},
[AD3552R_CH_OUTPUT_RANGE_SEL] = {
AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE,
AD3552R_MASK_CH_OUTPUT_RANGE_SEL(0),
AD3552R_MASK_CH_OUTPUT_RANGE_SEL(1)
},
[AD3552R_CH_SELECT] = {
AD3552R_REG_ADDR_CH_SELECT_16B,
AD3552R_MASK_CH(0),
AD3552R_MASK_CH(1)
}
};
static u8 _ad3552r_reg_len(u8 addr)
{
switch (addr) {
@ -399,11 +104,6 @@ static int ad3552r_read_reg(struct ad3552r_desc *dac, u8 addr, u16 *val)
return 0;
}
static u16 ad3552r_field_prep(u16 val, u16 mask)
{
return (val << __ffs(mask)) & mask;
}
/* Update field of a register, shift val if needed */
static int ad3552r_update_reg_field(struct ad3552r_desc *dac, u8 addr, u16 mask,
u16 val)
@ -416,21 +116,11 @@ static int ad3552r_update_reg_field(struct ad3552r_desc *dac, u8 addr, u16 mask,
return ret;
reg &= ~mask;
reg |= ad3552r_field_prep(val, mask);
reg |= val;
return ad3552r_write_reg(dac, addr, reg);
}
static int ad3552r_set_ch_value(struct ad3552r_desc *dac,
enum ad3552r_ch_attributes attr,
u8 ch,
u16 val)
{
/* Update register related to attributes in chip */
return ad3552r_update_reg_field(dac, addr_mask_map_ch[attr][0],
addr_mask_map_ch[attr][ch + 1], val);
}
#define AD3552R_CH_DAC(_idx) ((struct iio_chan_spec) { \
.type = IIO_VOLTAGE, \
.output = true, \
@ -510,8 +200,14 @@ static int ad3552r_write_raw(struct iio_dev *indio_dev,
val);
break;
case IIO_CHAN_INFO_ENABLE:
err = ad3552r_set_ch_value(dac, AD3552R_CH_DAC_POWERDOWN,
chan->channel, !val);
if (chan->channel == 0)
val = FIELD_PREP(AD3552R_MASK_CH_DAC_POWERDOWN(0), !val);
else
val = FIELD_PREP(AD3552R_MASK_CH_DAC_POWERDOWN(1), !val);
err = ad3552r_update_reg_field(dac, AD3552R_REG_ADDR_POWERDOWN_CONFIG,
AD3552R_MASK_CH_DAC_POWERDOWN(chan->channel),
val);
break;
default:
err = -EINVAL;
@ -721,83 +417,9 @@ static int ad3552r_reset(struct ad3552r_desc *dac)
return ret;
return ad3552r_update_reg_field(dac,
addr_mask_map[AD3552R_ADDR_ASCENSION][0],
addr_mask_map[AD3552R_ADDR_ASCENSION][1],
val);
}
static void ad3552r_get_custom_range(struct ad3552r_desc *dac, s32 i, s32 *v_min,
s32 *v_max)
{
s64 vref, tmp, common, offset, gn, gp;
/*
* From datasheet formula (In Volts):
* Vmin = 2.5 + [(GainN + Offset / 1024) * 2.5 * Rfb * 1.03]
* Vmax = 2.5 - [(GainP + Offset / 1024) * 2.5 * Rfb * 1.03]
* Calculus are converted to milivolts
*/
vref = 2500;
/* 2.5 * 1.03 * 1000 (To mV) */
common = 2575 * dac->ch_data[i].rfb;
offset = dac->ch_data[i].gain_offset;
gn = gains_scaling_table[dac->ch_data[i].n];
tmp = (1024 * gn + AD3552R_GAIN_SCALE * offset) * common;
tmp = div_s64(tmp, 1024 * AD3552R_GAIN_SCALE);
*v_max = vref + tmp;
gp = gains_scaling_table[dac->ch_data[i].p];
tmp = (1024 * gp - AD3552R_GAIN_SCALE * offset) * common;
tmp = div_s64(tmp, 1024 * AD3552R_GAIN_SCALE);
*v_min = vref - tmp;
}
static void ad3552r_calc_gain_and_offset(struct ad3552r_desc *dac, s32 ch)
{
s32 idx, v_max, v_min, span, rem;
s64 tmp;
if (dac->ch_data[ch].range_override) {
ad3552r_get_custom_range(dac, ch, &v_min, &v_max);
} else {
/* Normal range */
idx = dac->ch_data[ch].range;
v_min = dac->model_data->ranges_table[idx][0];
v_max = dac->model_data->ranges_table[idx][1];
}
/*
* From datasheet formula:
* Vout = Span * (D / 65536) + Vmin
* Converted to scale and offset:
* Scale = Span / 65536
* Offset = 65536 * Vmin / Span
*
* Reminders are in micros in order to be printed as
* IIO_VAL_INT_PLUS_MICRO
*/
span = v_max - v_min;
dac->ch_data[ch].scale_int = div_s64_rem(span, 65536, &rem);
/* Do operations in microvolts */
dac->ch_data[ch].scale_dec = DIV_ROUND_CLOSEST((s64)rem * 1000000,
65536);
dac->ch_data[ch].offset_int = div_s64_rem(v_min * 65536, span, &rem);
tmp = (s64)rem * 1000000;
dac->ch_data[ch].offset_dec = div_s64(tmp, span);
}
static int ad3552r_find_range(const struct ad3552r_model_data *model_data,
s32 *vals)
{
int i;
for (i = 0; i < model_data->num_ranges; i++)
if (vals[0] == model_data->ranges_table[i][0] * 1000 &&
vals[1] == model_data->ranges_table[i][1] * 1000)
return i;
return -EINVAL;
AD3552R_REG_ADDR_INTERFACE_CONFIG_A,
AD3552R_MASK_ADDR_ASCENSION,
FIELD_PREP(AD3552R_MASK_ADDR_ASCENSION, val));
}
static int ad3552r_configure_custom_gain(struct ad3552r_desc *dac,
@ -805,57 +427,30 @@ static int ad3552r_configure_custom_gain(struct ad3552r_desc *dac,
u32 ch)
{
struct device *dev = &dac->spi->dev;
u32 val;
int err;
u8 addr;
u16 reg = 0, offset;
u16 reg;
struct fwnode_handle *gain_child __free(fwnode_handle)
= fwnode_get_named_child_node(child,
"custom-output-range-config");
if (!gain_child)
return dev_err_probe(dev, -EINVAL,
"mandatory custom-output-range-config property missing\n");
err = ad3552r_get_custom_gain(dev, child,
&dac->ch_data[ch].p,
&dac->ch_data[ch].n,
&dac->ch_data[ch].rfb,
&dac->ch_data[ch].gain_offset);
if (err)
return err;
dac->ch_data[ch].range_override = 1;
reg |= ad3552r_field_prep(1, AD3552R_MASK_CH_RANGE_OVERRIDE);
err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-p", &val);
if (err)
return dev_err_probe(dev, err,
"mandatory adi,gain-scaling-p property missing\n");
reg |= ad3552r_field_prep(val, AD3552R_MASK_CH_GAIN_SCALING_P);
dac->ch_data[ch].p = val;
err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-n", &val);
if (err)
return dev_err_probe(dev, err,
"mandatory adi,gain-scaling-n property missing\n");
reg |= ad3552r_field_prep(val, AD3552R_MASK_CH_GAIN_SCALING_N);
dac->ch_data[ch].n = val;
err = fwnode_property_read_u32(gain_child, "adi,rfb-ohms", &val);
if (err)
return dev_err_probe(dev, err,
"mandatory adi,rfb-ohms property missing\n");
dac->ch_data[ch].rfb = val;
err = fwnode_property_read_u32(gain_child, "adi,gain-offset", &val);
if (err)
return dev_err_probe(dev, err,
"mandatory adi,gain-offset property missing\n");
dac->ch_data[ch].gain_offset = val;
offset = abs((s32)val);
reg |= ad3552r_field_prep((offset >> 8), AD3552R_MASK_CH_OFFSET_BIT_8);
reg |= ad3552r_field_prep((s32)val < 0, AD3552R_MASK_CH_OFFSET_POLARITY);
addr = AD3552R_REG_ADDR_CH_GAIN(ch);
err = ad3552r_write_reg(dac, addr,
offset & AD3552R_MASK_CH_OFFSET_BITS_0_7);
abs((s32)dac->ch_data[ch].gain_offset) &
AD3552R_MASK_CH_OFFSET_BITS_0_7);
if (err)
return dev_err_probe(dev, err, "Error writing register\n");
reg = ad3552r_calc_custom_gain(dac->ch_data[ch].p, dac->ch_data[ch].n,
dac->ch_data[ch].gain_offset);
err = ad3552r_write_reg(dac, addr, reg);
if (err)
return dev_err_probe(dev, err, "Error writing register\n");
@ -866,49 +461,31 @@ static int ad3552r_configure_custom_gain(struct ad3552r_desc *dac,
static int ad3552r_configure_device(struct ad3552r_desc *dac)
{
struct device *dev = &dac->spi->dev;
int err, cnt = 0, voltage, delta = 100000;
u32 vals[2], val, ch;
int err, cnt = 0;
u32 val, ch;
dac->gpio_ldac = devm_gpiod_get_optional(dev, "ldac", GPIOD_OUT_HIGH);
if (IS_ERR(dac->gpio_ldac))
return dev_err_probe(dev, PTR_ERR(dac->gpio_ldac),
"Error getting gpio ldac");
voltage = devm_regulator_get_enable_read_voltage(dev, "vref");
if (voltage < 0 && voltage != -ENODEV)
return dev_err_probe(dev, voltage, "Error getting vref voltage\n");
if (voltage == -ENODEV) {
if (device_property_read_bool(dev, "adi,vref-out-en"))
val = AD3552R_INTERNAL_VREF_PIN_2P5V;
else
val = AD3552R_INTERNAL_VREF_PIN_FLOATING;
} else {
if (voltage > 2500000 + delta || voltage < 2500000 - delta) {
dev_warn(dev, "vref-supply must be 2.5V");
return -EINVAL;
}
val = AD3552R_EXTERNAL_VREF_PIN_INPUT;
}
err = ad3552r_get_ref_voltage(dev, &val);
if (err < 0)
return err;
err = ad3552r_update_reg_field(dac,
addr_mask_map[AD3552R_VREF_SELECT][0],
addr_mask_map[AD3552R_VREF_SELECT][1],
val);
AD3552R_REG_ADDR_SH_REFERENCE_CONFIG,
AD3552R_MASK_REFERENCE_VOLTAGE_SEL,
FIELD_PREP(AD3552R_MASK_REFERENCE_VOLTAGE_SEL, val));
if (err)
return err;
err = device_property_read_u32(dev, "adi,sdo-drive-strength", &val);
err = ad3552r_get_drive_strength(dev, &val);
if (!err) {
if (val > 3) {
dev_err(dev, "adi,sdo-drive-strength must be less than 4\n");
return -EINVAL;
}
err = ad3552r_update_reg_field(dac,
addr_mask_map[AD3552R_SDO_DRIVE_STRENGTH][0],
addr_mask_map[AD3552R_SDO_DRIVE_STRENGTH][1],
val);
AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
AD3552R_MASK_SDO_DRIVE_STRENGTH,
FIELD_PREP(AD3552R_MASK_SDO_DRIVE_STRENGTH, val));
if (err)
return err;
}
@ -929,24 +506,21 @@ static int ad3552r_configure_device(struct ad3552r_desc *dac)
"reg must be less than %d\n",
dac->model_data->num_hw_channels);
if (fwnode_property_present(child, "adi,output-range-microvolt")) {
err = fwnode_property_read_u32_array(child,
"adi,output-range-microvolt",
vals,
2);
if (err)
return dev_err_probe(dev, err,
"adi,output-range-microvolt property could not be parsed\n");
err = ad3552r_get_output_range(dev, dac->model_data,
child, &val);
if (err && err != -ENOENT)
return err;
err = ad3552r_find_range(dac->model_data, vals);
if (err < 0)
return dev_err_probe(dev, err,
"Invalid adi,output-range-microvolt value\n");
if (!err) {
if (ch == 0)
val = FIELD_PREP(AD3552R_MASK_CH_OUTPUT_RANGE_SEL(0), val);
else
val = FIELD_PREP(AD3552R_MASK_CH_OUTPUT_RANGE_SEL(1), val);
val = err;
err = ad3552r_set_ch_value(dac,
AD3552R_CH_OUTPUT_RANGE_SEL,
ch, val);
err = ad3552r_update_reg_field(dac,
AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE,
AD3552R_MASK_CH_OUTPUT_RANGE_SEL(ch),
val);
if (err)
return err;
@ -961,10 +535,17 @@ static int ad3552r_configure_device(struct ad3552r_desc *dac)
return err;
}
ad3552r_calc_gain_and_offset(dac, ch);
ad3552r_calc_gain_and_offset(&dac->ch_data[ch], dac->model_data);
dac->enabled_ch |= BIT(ch);
err = ad3552r_set_ch_value(dac, AD3552R_CH_SELECT, ch, 1);
if (ch == 0)
val = FIELD_PREP(AD3552R_MASK_CH(0), 1);
else
val = FIELD_PREP(AD3552R_MASK_CH(1), 1);
err = ad3552r_update_reg_field(dac,
AD3552R_REG_ADDR_CH_SELECT_16B,
AD3552R_MASK_CH(ch), val);
if (err < 0)
return err;
@ -976,8 +557,15 @@ static int ad3552r_configure_device(struct ad3552r_desc *dac)
/* Disable unused channels */
for_each_clear_bit(ch, &dac->enabled_ch,
dac->model_data->num_hw_channels) {
err = ad3552r_set_ch_value(dac, AD3552R_CH_AMPLIFIER_POWERDOWN,
ch, 1);
if (ch == 0)
val = FIELD_PREP(AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(0), 1);
else
val = FIELD_PREP(AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(1), 1);
err = ad3552r_update_reg_field(dac,
AD3552R_REG_ADDR_POWERDOWN_CONFIG,
AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(ch),
val);
if (err)
return err;
}
@ -1146,3 +734,4 @@ module_spi_driver(ad3552r_driver);
MODULE_AUTHOR("Mihail Chindris <mihail.chindris@analog.com>");
MODULE_DESCRIPTION("Analog Device AD3552R DAC");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(IIO_AD3552R);

223
drivers/iio/dac/ad3552r.h Normal file
View File

@ -0,0 +1,223 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AD3552R Digital <-> Analog converters common header
*
* Copyright 2021-2024 Analog Devices Inc.
* Author: Angelo Dureghello <adureghello@baylibre.com>
*/
#ifndef __DRIVERS_IIO_DAC_AD3552R_H__
#define __DRIVERS_IIO_DAC_AD3552R_H__
/* Register addresses */
/* Primary address space */
#define AD3552R_REG_ADDR_INTERFACE_CONFIG_A 0x00
#define AD3552R_MASK_SOFTWARE_RESET (BIT(7) | BIT(0))
#define AD3552R_MASK_ADDR_ASCENSION BIT(5)
#define AD3552R_MASK_SDO_ACTIVE BIT(4)
#define AD3552R_REG_ADDR_INTERFACE_CONFIG_B 0x01
#define AD3552R_MASK_SINGLE_INST BIT(7)
#define AD3552R_MASK_SHORT_INSTRUCTION BIT(3)
#define AD3552R_REG_ADDR_DEVICE_CONFIG 0x02
#define AD3552R_MASK_DEVICE_STATUS(n) BIT(4 + (n))
#define AD3552R_MASK_CUSTOM_MODES GENMASK(3, 2)
#define AD3552R_MASK_OPERATING_MODES GENMASK(1, 0)
#define AD3552R_REG_ADDR_CHIP_TYPE 0x03
#define AD3552R_MASK_CLASS GENMASK(7, 0)
#define AD3552R_REG_ADDR_PRODUCT_ID_L 0x04
#define AD3552R_REG_ADDR_PRODUCT_ID_H 0x05
#define AD3552R_REG_ADDR_CHIP_GRADE 0x06
#define AD3552R_MASK_GRADE GENMASK(7, 4)
#define AD3552R_MASK_DEVICE_REVISION GENMASK(3, 0)
#define AD3552R_REG_ADDR_SCRATCH_PAD 0x0A
#define AD3552R_REG_ADDR_SPI_REVISION 0x0B
#define AD3552R_REG_ADDR_VENDOR_L 0x0C
#define AD3552R_REG_ADDR_VENDOR_H 0x0D
#define AD3552R_REG_ADDR_STREAM_MODE 0x0E
#define AD3552R_MASK_LENGTH GENMASK(7, 0)
#define AD3552R_REG_ADDR_TRANSFER_REGISTER 0x0F
#define AD3552R_MASK_MULTI_IO_MODE GENMASK(7, 6)
#define AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE BIT(2)
#define AD3552R_REG_ADDR_INTERFACE_CONFIG_C 0x10
#define AD3552R_MASK_CRC_ENABLE \
(GENMASK(7, 6) | GENMASK(1, 0))
#define AD3552R_MASK_STRICT_REGISTER_ACCESS BIT(5)
#define AD3552R_REG_ADDR_INTERFACE_STATUS_A 0x11
#define AD3552R_MASK_INTERFACE_NOT_READY BIT(7)
#define AD3552R_MASK_CLOCK_COUNTING_ERROR BIT(5)
#define AD3552R_MASK_INVALID_OR_NO_CRC BIT(3)
#define AD3552R_MASK_WRITE_TO_READ_ONLY_REGISTER BIT(2)
#define AD3552R_MASK_PARTIAL_REGISTER_ACCESS BIT(1)
#define AD3552R_MASK_REGISTER_ADDRESS_INVALID BIT(0)
#define AD3552R_REG_ADDR_INTERFACE_CONFIG_D 0x14
#define AD3552R_MASK_ALERT_ENABLE_PULLUP BIT(6)
#define AD3552R_MASK_MEM_CRC_EN BIT(4)
#define AD3552R_MASK_SDO_DRIVE_STRENGTH GENMASK(3, 2)
#define AD3552R_MASK_DUAL_SPI_SYNCHROUNOUS_EN BIT(1)
#define AD3552R_MASK_SPI_CONFIG_DDR BIT(0)
#define AD3552R_REG_ADDR_SH_REFERENCE_CONFIG 0x15
#define AD3552R_MASK_IDUMP_FAST_MODE BIT(6)
#define AD3552R_MASK_SAMPLE_HOLD_DIFF_USER_EN BIT(5)
#define AD3552R_MASK_SAMPLE_HOLD_USER_TRIM GENMASK(4, 3)
#define AD3552R_MASK_SAMPLE_HOLD_USER_ENABLE BIT(2)
#define AD3552R_MASK_REFERENCE_VOLTAGE_SEL GENMASK(1, 0)
#define AD3552R_REG_ADDR_ERR_ALARM_MASK 0x16
#define AD3552R_MASK_REF_RANGE_ALARM BIT(6)
#define AD3552R_MASK_CLOCK_COUNT_ERR_ALARM BIT(5)
#define AD3552R_MASK_MEM_CRC_ERR_ALARM BIT(4)
#define AD3552R_MASK_SPI_CRC_ERR_ALARM BIT(3)
#define AD3552R_MASK_WRITE_TO_READ_ONLY_ALARM BIT(2)
#define AD3552R_MASK_PARTIAL_REGISTER_ACCESS_ALARM BIT(1)
#define AD3552R_MASK_REGISTER_ADDRESS_INVALID_ALARM BIT(0)
#define AD3552R_REG_ADDR_ERR_STATUS 0x17
#define AD3552R_MASK_REF_RANGE_ERR_STATUS BIT(6)
#define AD3552R_MASK_STREAM_EXCEEDS_DAC_ERR_STATUS BIT(5)
#define AD3552R_MASK_MEM_CRC_ERR_STATUS BIT(4)
#define AD3552R_MASK_RESET_STATUS BIT(0)
#define AD3552R_REG_ADDR_POWERDOWN_CONFIG 0x18
#define AD3552R_MASK_CH_DAC_POWERDOWN(ch) BIT(4 + (ch))
#define AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(ch) BIT(ch)
#define AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE 0x19
#define AD3552R_MASK_CH0_RANGE GENMASK(2, 0)
#define AD3552R_MASK_CH1_RANGE GENMASK(6, 4)
#define AD3552R_MASK_CH_OUTPUT_RANGE GENMASK(7, 0)
#define AD3552R_MASK_CH_OUTPUT_RANGE_SEL(ch) \
((ch) ? GENMASK(7, 4) : GENMASK(3, 0))
#define AD3552R_REG_ADDR_CH_OFFSET(ch) (0x1B + (ch) * 2)
#define AD3552R_MASK_CH_OFFSET_BITS_0_7 GENMASK(7, 0)
#define AD3552R_REG_ADDR_CH_GAIN(ch) (0x1C + (ch) * 2)
#define AD3552R_MASK_CH_RANGE_OVERRIDE BIT(7)
#define AD3552R_MASK_CH_GAIN_SCALING_N GENMASK(6, 5)
#define AD3552R_MASK_CH_GAIN_SCALING_P GENMASK(4, 3)
#define AD3552R_MASK_CH_OFFSET_POLARITY BIT(2)
#define AD3552R_MASK_CH_OFFSET_BIT_8 BIT(8)
/*
* Secondary region
* For multibyte registers specify the highest address because the access is
* done in descending order
*/
#define AD3552R_SECONDARY_REGION_START 0x28
#define AD3552R_REG_ADDR_HW_LDAC_16B 0x28
#define AD3552R_REG_ADDR_CH_DAC_16B(ch) (0x2C - (1 - (ch)) * 2)
#define AD3552R_REG_ADDR_DAC_PAGE_MASK_16B 0x2E
#define AD3552R_REG_ADDR_CH_SELECT_16B 0x2F
#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_16B 0x31
#define AD3552R_REG_ADDR_SW_LDAC_16B 0x32
#define AD3552R_REG_ADDR_CH_INPUT_16B(ch) (0x36 - (1 - (ch)) * 2)
/* 3 bytes registers */
#define AD3552R_REG_START_24B 0x37
#define AD3552R_REG_ADDR_HW_LDAC_24B 0x37
#define AD3552R_REG_ADDR_CH_DAC_24B(ch) (0x3D - (1 - (ch)) * 3)
#define AD3552R_REG_ADDR_DAC_PAGE_MASK_24B 0x40
#define AD3552R_REG_ADDR_CH_SELECT_24B 0x41
#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_24B 0x44
#define AD3552R_REG_ADDR_SW_LDAC_24B 0x45
#define AD3552R_REG_ADDR_CH_INPUT_24B(ch) (0x4B - (1 - (ch)) * 3)
#define AD3552R_MAX_CH 2
#define AD3552R_MASK_CH(ch) BIT(ch)
#define AD3552R_MASK_ALL_CH GENMASK(1, 0)
#define AD3552R_MAX_REG_SIZE 3
#define AD3552R_READ_BIT BIT(7)
#define AD3552R_ADDR_MASK GENMASK(6, 0)
#define AD3552R_MASK_DAC_12B GENMASK(15, 4)
#define AD3552R_DEFAULT_CONFIG_B_VALUE 0x8
#define AD3552R_SCRATCH_PAD_TEST_VAL1 0x34
#define AD3552R_SCRATCH_PAD_TEST_VAL2 0xB2
#define AD3552R_GAIN_SCALE 1000
#define AD3552R_LDAC_PULSE_US 100
#define AD3552R_MAX_RANGES 5
#define AD3542R_MAX_RANGES 5
#define AD3552R_QUAD_SPI 2
extern const s32 ad3552r_ch_ranges[AD3552R_MAX_RANGES][2];
extern const s32 ad3542r_ch_ranges[AD3542R_MAX_RANGES][2];
enum ad3552r_id {
AD3541R_ID = 0x400b,
AD3542R_ID = 0x4009,
AD3551R_ID = 0x400a,
AD3552R_ID = 0x4008,
};
struct ad3552r_model_data {
const char *model_name;
enum ad3552r_id chip_id;
unsigned int num_hw_channels;
const s32 (*ranges_table)[2];
int num_ranges;
bool requires_output_range;
};
struct ad3552r_ch_data {
s32 scale_int;
s32 scale_dec;
s32 offset_int;
s32 offset_dec;
s16 gain_offset;
u16 rfb;
u8 n;
u8 p;
u8 range;
bool range_override;
};
enum ad3552r_ch_gain_scaling {
/* Gain scaling of 1 */
AD3552R_CH_GAIN_SCALING_1,
/* Gain scaling of 0.5 */
AD3552R_CH_GAIN_SCALING_0_5,
/* Gain scaling of 0.25 */
AD3552R_CH_GAIN_SCALING_0_25,
/* Gain scaling of 0.125 */
AD3552R_CH_GAIN_SCALING_0_125,
};
enum ad3552r_ch_vref_select {
/* Internal source with Vref I/O floating */
AD3552R_INTERNAL_VREF_PIN_FLOATING,
/* Internal source with Vref I/O at 2.5V */
AD3552R_INTERNAL_VREF_PIN_2P5V,
/* External source with Vref I/O as input */
AD3552R_EXTERNAL_VREF_PIN_INPUT
};
enum ad3542r_ch_output_range {
/* Range from 0 V to 2.5 V. Requires Rfb1x connection */
AD3542R_CH_OUTPUT_RANGE_0__2P5V,
/* Range from 0 V to 5 V. Requires Rfb1x connection */
AD3542R_CH_OUTPUT_RANGE_0__5V,
/* Range from 0 V to 10 V. Requires Rfb2x connection */
AD3542R_CH_OUTPUT_RANGE_0__10V,
/* Range from -5 V to 5 V. Requires Rfb2x connection */
AD3542R_CH_OUTPUT_RANGE_NEG_5__5V,
/* Range from -2.5 V to 7.5 V. Requires Rfb2x connection */
AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V,
};
enum ad3552r_ch_output_range {
/* Range from 0 V to 2.5 V. Requires Rfb1x connection */
AD3552R_CH_OUTPUT_RANGE_0__2P5V,
/* Range from 0 V to 5 V. Requires Rfb1x connection */
AD3552R_CH_OUTPUT_RANGE_0__5V,
/* Range from 0 V to 10 V. Requires Rfb2x connection */
AD3552R_CH_OUTPUT_RANGE_0__10V,
/* Range from -5 V to 5 V. Requires Rfb2x connection */
AD3552R_CH_OUTPUT_RANGE_NEG_5__5V,
/* Range from -10 V to 10 V. Requires Rfb4x connection */
AD3552R_CH_OUTPUT_RANGE_NEG_10__10V,
};
int ad3552r_get_output_range(struct device *dev,
const struct ad3552r_model_data *model_info,
struct fwnode_handle *child, u32 *val);
int ad3552r_get_custom_gain(struct device *dev, struct fwnode_handle *child,
u8 *gs_p, u8 *gs_n, u16 *rfb, s16 *goffs);
u16 ad3552r_calc_custom_gain(u8 p, u8 n, s16 goffs);
int ad3552r_get_ref_voltage(struct device *dev, u32 *val);
int ad3552r_get_drive_strength(struct device *dev, u32 *val);
void ad3552r_calc_gain_and_offset(struct ad3552r_ch_data *ch_data,
const struct ad3552r_model_data *model_data);
#endif /* __DRIVERS_IIO_DAC_AD3552R_H__ */

View File

@ -582,7 +582,7 @@ static int zpa2326_fill_sample_buffer(struct iio_dev *indio_dev,
struct {
u32 pressure;
u16 temperature;
u64 timestamp;
aligned_s64 timestamp;
} sample;
int err;

View File

@ -61,7 +61,8 @@ static ssize_t multi_intensity_store(struct device *dev,
for (i = 0; i < mcled_cdev->num_colors; i++)
mcled_cdev->subled_info[i].intensity = intensity_value[i];
led_set_brightness(led_cdev, led_cdev->brightness);
if (!test_bit(LED_BLINK_SW, &led_cdev->work_flags))
led_set_brightness(led_cdev, led_cdev->brightness);
ret = size;
err_out:
mutex_unlock(&led_cdev->led_access);

View File

@ -490,8 +490,8 @@ void mbox_free_channel(struct mbox_chan *chan)
if (chan->txdone_method == TXDONE_BY_ACK)
chan->txdone_method = TXDONE_BY_POLL;
module_put(chan->mbox->dev->driver->owner);
spin_unlock_irqrestore(&chan->lock, flags);
module_put(chan->mbox->dev->driver->owner);
}
EXPORT_SYMBOL_GPL(mbox_free_channel);

View File

@ -1733,7 +1733,12 @@ static CLOSURE_CALLBACK(cache_set_flush)
mutex_unlock(&b->write_lock);
}
if (ca->alloc_thread)
/*
* If the register_cache_set() call to bch_cache_set_alloc() failed,
* ca has not been assigned a value and return error.
* So we need check ca is not NULL during bch_cache_set_unregister().
*/
if (ca && ca->alloc_thread)
kthread_stop(ca->alloc_thread);
if (c->journal.cur) {

View File

@ -2410,7 +2410,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
*/
sb_retrieve_failed_devices(sb, failed_devices);
rdev_for_each(r, mddev) {
if (test_bit(Journal, &rdev->flags) ||
if (test_bit(Journal, &r->flags) ||
!r->sb_page)
continue;
sb2 = page_address(r->sb_page);

View File

@ -754,10 +754,11 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
u32 physical_page, struct cached_page **page_ptr)
{
struct cached_page *page;
unsigned int zone_number = request->zone_number;
get_page_from_cache(&volume->page_cache, physical_page, &page);
if (page != NULL) {
if (request->zone_number == 0) {
if (zone_number == 0) {
/* Only one zone is allowed to update the LRU. */
make_page_most_recent(&volume->page_cache, page);
}
@ -767,7 +768,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
}
/* Prepare to enqueue a read for the page. */
end_pending_search(&volume->page_cache, request->zone_number);
end_pending_search(&volume->page_cache, zone_number);
mutex_lock(&volume->read_threads_mutex);
/*
@ -787,8 +788,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
* the order does not matter for correctness as it does below.
*/
mutex_unlock(&volume->read_threads_mutex);
begin_pending_search(&volume->page_cache, physical_page,
request->zone_number);
begin_pending_search(&volume->page_cache, physical_page, zone_number);
return UDS_QUEUED;
}
@ -797,7 +797,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
* "search pending" state in careful order so no other thread can mess with the data before
* the caller gets to look at it.
*/
begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
begin_pending_search(&volume->page_cache, physical_page, zone_number);
mutex_unlock(&volume->read_threads_mutex);
*page_ptr = page;
return UDS_SUCCESS;
@ -849,6 +849,7 @@ static int search_cached_index_page(struct volume *volume, struct uds_request *r
{
int result;
struct cached_page *page = NULL;
unsigned int zone_number = request->zone_number;
u32 physical_page = map_to_physical_page(volume->geometry, chapter,
index_page_number);
@ -858,18 +859,18 @@ static int search_cached_index_page(struct volume *volume, struct uds_request *r
* invalidation by the reader thread, before the reader thread has noticed that the
* invalidate_counter has been incremented.
*/
begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
begin_pending_search(&volume->page_cache, physical_page, zone_number);
result = get_volume_page_protected(volume, request, physical_page, &page);
if (result != UDS_SUCCESS) {
end_pending_search(&volume->page_cache, request->zone_number);
end_pending_search(&volume->page_cache, zone_number);
return result;
}
result = uds_search_chapter_index_page(&page->index_page, volume->geometry,
&request->record_name,
record_page_number);
end_pending_search(&volume->page_cache, request->zone_number);
end_pending_search(&volume->page_cache, zone_number);
return result;
}
@ -882,6 +883,7 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
{
struct cached_page *record_page;
struct index_geometry *geometry = volume->geometry;
unsigned int zone_number = request->zone_number;
int result;
u32 physical_page, page_number;
@ -905,11 +907,11 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
* invalidation by the reader thread, before the reader thread has noticed that the
* invalidate_counter has been incremented.
*/
begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
begin_pending_search(&volume->page_cache, physical_page, zone_number);
result = get_volume_page_protected(volume, request, physical_page, &record_page);
if (result != UDS_SUCCESS) {
end_pending_search(&volume->page_cache, request->zone_number);
end_pending_search(&volume->page_cache, zone_number);
return result;
}
@ -917,7 +919,7 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
&request->record_name, geometry, &request->old_metadata))
*found = true;
end_pending_search(&volume->page_cache, request->zone_number);
end_pending_search(&volume->page_cache, zone_number);
return UDS_SUCCESS;
}

View File

@ -787,7 +787,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
* is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
*/
write_behind = bitmap->mddev->bitmap_info.max_write_behind;
if (write_behind > COUNTER_MAX)
if (write_behind > COUNTER_MAX / 2)
write_behind = COUNTER_MAX / 2;
sb->write_behind = cpu_to_le32(write_behind);
bitmap->mddev->bitmap_info.max_write_behind = write_behind;

View File

@ -2105,7 +2105,7 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
unsigned int processed_ctrls = 0;
struct uvc_control *ctrl;
unsigned int i;
int ret;
int ret = 0;
if (entity == NULL)
return 0;
@ -2134,8 +2134,6 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
ctrl->info.size);
else
ret = 0;
if (!ret)
processed_ctrls++;
@ -2147,17 +2145,24 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
ctrl->dirty = 0;
if (ret < 0) {
if (ret < 0 && !rollback) {
if (err_ctrl)
*err_ctrl = ctrl;
return ret;
/*
* If we fail to set a control, we need to rollback
* the next ones.
*/
rollback = 1;
}
if (!rollback && handle &&
if (!rollback && handle && !ret &&
ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
uvc_ctrl_set_handle(handle, ctrl, handle);
}
if (ret)
return ret;
return processed_ctrls;
}
@ -2188,7 +2193,8 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
struct uvc_video_chain *chain = handle->chain;
struct uvc_control *err_ctrl;
struct uvc_entity *entity;
int ret = 0;
int ret_out = 0;
int ret;
/* Find the control. */
list_for_each_entry(entity, &chain->entities, chain) {
@ -2199,17 +2205,23 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
ctrls->error_idx =
uvc_ctrl_find_ctrl_idx(entity, ctrls,
err_ctrl);
goto done;
/*
* When we fail to commit an entity, we need to
* restore the UVC_CTRL_DATA_BACKUP for all the
* controls in the other entities, otherwise our cache
* and the hardware will be out of sync.
*/
rollback = 1;
ret_out = ret;
} else if (ret > 0 && !rollback) {
uvc_ctrl_send_events(handle, entity,
ctrls->controls, ctrls->count);
}
}
ret = 0;
done:
mutex_unlock(&chain->ctrl_mutex);
return ret;
return ret_out;
}
static int uvc_mapping_get_xctrl_compound(struct uvc_video_chain *chain,

View File

@ -456,6 +456,7 @@ static void max14577_i2c_remove(struct i2c_client *i2c)
{
struct max14577 *max14577 = i2c_get_clientdata(i2c);
device_init_wakeup(max14577->dev, false);
mfd_remove_devices(max14577->dev);
regmap_del_irq_chip(max14577->irq, max14577->irq_data);
if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836)

Some files were not shown because too many files have changed in this diff Show More