mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-19 07:39:54 +02:00
This is the 6.6.53 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmb6tY0ACgkQONu9yGCS aT44+A/+KxriHSrR7Gc+iHmrBQPJf82/lywxBmBDrTn5Rs5X1u81riN58WOHmxZp TCW4V0IKvCLukcNR3hPDrRQp2drvvXAUgxnzYpoa3HNf1sgoRdUlEXUc/tF2lKmB 5ihupU/mJiSetXHyHajxBnkrRq6xb4zYnypQdDfZE7Vins2VtZ2hFnTAFmvAFrle hNJTDf2BQenqPgQO50ecW3Urpit+2FDDTGY4k1AhbaKOmKWBajeGEHahFOQG7RUu 5nSH9aG86E2k4CGzZCc/jmgBJyPJTDgw31AmRwRjNvMIrcz3uYhLhqbSLaXFYQi6 L5Yz9cJk/UpiPa3U0wQmjl3v+B/UZZzxRiytBJgoErLwTFBX/bPNflzLYMvdsJx9 wlNEGWOgm+oJ7qAGIKjUK7jd66dKRfxE8t7Mo5ukWF3TufyGGPgt39woSB3uImxc maPVIB+zd7Szeocu8mvDQaLoTxs7bhxmMbkPsHR9C1oqPwO5RJrZSHJoqD/ZWuy4 rHab6ozA3mHfoqPeA+cKQOZRRyEulyL0ybDvcbrkJK4QLfn3+wgyuFOasrjlWmmC 68Npm2mOmIk6wdX5Pfv7caPqwS0Wdvr9WjJrfO/0IjwPSHSZK+WmGXdG+YMLlUHn kOlux9HlNDI9CDrzmAV6NOXQGOu+HMCevMielElx1YTIgxu8sMQ= =hhxu -----END PGP SIGNATURE----- Merge 6.6.53 into android15-6.6-lts Changes in 6.6.53 ASoC: SOF: mediatek: Add missing board compatible ASoC: mediatek: mt8188: Mark AFE_DAC_CON0 register as volatile ASoC: allow module autoloading for table db1200_pids ASoC: allow module autoloading for table board_ids ALSA: hda/realtek - Fixed ALC256 headphone no sound ALSA: hda/realtek - FIxed ALC285 headphone no sound scsi: lpfc: Fix overflow build issue pinctrl: at91: make it work with current gpiolib hwmon: (asus-ec-sensors) remove VRM temp X570-E GAMING microblaze: don't treat zero reserved memory regions as error platform/x86: x86-android-tablets: Make Lenovo Yoga Tab 3 X90F DMI match less strict net: ftgmac100: Ensure tx descriptor updates are visible LoongArch: Define ARCH_IRQ_INIT_FLAGS as IRQ_NOPROBE wifi: iwlwifi: lower message level for FW buffer destination wifi: iwlwifi: mvm: fix iwl_mvm_scan_fits() calculation wifi: iwlwifi: mvm: fix iwl_mvm_max_scan_ie_fw_cmd_room() wifi: iwlwifi: mvm: pause TCM when the firmware is stopped wifi: iwlwifi: mvm: don't wait for tx queues if firmware is dead wifi: mac80211: free skb on error path in ieee80211_beacon_get_ap() wifi: iwlwifi: clear trans->state earlier upon error can: mcp251xfd: mcp251xfd_ring_init(): check TX-coalescing configuration ASoC: Intel: soc-acpi-cht: Make Lenovo Yoga Tab 3 X90F DMI match less strict ASoC: intel: fix module autoloading ASoC: google: fix module autoloading ASoC: tda7419: fix module autoloading ASoC: fix module autoloading spi: spidev: Add an entry for elgin,jg10309-01 ASoC: amd: yc: Add a quirk for MSI Bravo 17 (D7VEK) ALSA: hda: add HDMI codec ID for Intel PTL drm: komeda: Fix an issue related to normalized zpos spi: bcm63xx: Enable module autoloading smb: client: fix hang in wait_for_response() for negproto x86/hyperv: Set X86_FEATURE_TSC_KNOWN_FREQ when Hyper-V provides frequency tools: hv: rm .*.cmd when make clean block: Fix where bio IO priority gets set spi: spidev: Add missing spi_device_id for jg10309-01 ocfs2: add bounds checking to ocfs2_xattr_find_entry() ocfs2: strict bound check before memcmp in ocfs2_xattr_find_entry() drm: Use XArray instead of IDR for minors accel: Use XArray instead of IDR for minors drm: Expand max DRM device number to full MINORBITS powercap/intel_rapl: Add support for AMD family 1Ah netfilter: nft_socket: make cgroupsv2 matching work with namespaces netfilter: nft_socket: Fix a NULL vs IS_ERR() bug in nft_socket_cgroup_subtree_level() netfilter: nft_set_pipapo: walk over current view on netlink dump netfilter: nf_tables: missing iterator type in lookup walk Revert "wifi: cfg80211: check wiphy mutex is held for wdev mutex" gpiolib: cdev: Ignore reconfiguration without direction nvme-pci: qdepth 1 quirk x86/mm: Switch to new Intel CPU model defines can: mcp251xfd: properly indent labels can: mcp251xfd: move mcp251xfd_timestamp_start()/stop() into mcp251xfd_chip_start/stop() USB: serial: pl2303: add device id for Macrosilicon MS3020 USB: usbtmc: prevent kernel-usb-infoleak Linux 6.6.53 Change-Id: I8cfbe593a8652fb96cd65838bbdc7e4fe3add08f Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
5611cd3d91
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 52
|
||||
SUBLEVEL = 53
|
||||
EXTRAVERSION =
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
|
||||
extern atomic_t irq_err_count;
|
||||
|
||||
#define ARCH_IRQ_INIT_FLAGS IRQ_NOPROBE
|
||||
|
||||
/*
|
||||
* interrupt-retrigger: NOP for now. This may not be appropriate for all
|
||||
* machines, we'll see ...
|
||||
|
|
|
@ -122,9 +122,6 @@ void __init init_IRQ(void)
|
|||
panic("IPI IRQ request failed\n");
|
||||
#endif
|
||||
|
||||
for (i = 0; i < NR_IRQS; i++)
|
||||
irq_set_noprobe(i);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order);
|
||||
|
||||
|
|
|
@ -193,11 +193,6 @@ asmlinkage void __init mmu_init(void)
|
|||
{
|
||||
unsigned int kstart, ksize;
|
||||
|
||||
if (!memblock.reserved.cnt) {
|
||||
pr_emerg("Error memory count\n");
|
||||
machine_restart(NULL);
|
||||
}
|
||||
|
||||
if ((u32) memblock.memory.regions[0].size < 0x400000) {
|
||||
pr_emerg("Memory must be greater than 4MB\n");
|
||||
machine_restart(NULL);
|
||||
|
|
|
@ -423,6 +423,7 @@ static void __init ms_hyperv_init_platform(void)
|
|||
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
|
||||
x86_platform.calibrate_tsc = hv_get_tsc_khz;
|
||||
x86_platform.calibrate_cpu = hv_get_tsc_khz;
|
||||
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
|
||||
}
|
||||
|
||||
if (ms_hyperv.priv_high & HV_ISOLATION) {
|
||||
|
|
|
@ -267,21 +267,17 @@ static void __init probe_page_size_mask(void)
|
|||
}
|
||||
}
|
||||
|
||||
#define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \
|
||||
.family = 6, \
|
||||
.model = _model, \
|
||||
}
|
||||
/*
|
||||
* INVLPG may not properly flush Global entries
|
||||
* on these CPUs when PCIDs are enabled.
|
||||
*/
|
||||
static const struct x86_cpu_id invlpg_miss_ids[] = {
|
||||
INTEL_MATCH(INTEL_FAM6_ALDERLAKE ),
|
||||
INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ),
|
||||
INTEL_MATCH(INTEL_FAM6_ATOM_GRACEMONT ),
|
||||
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ),
|
||||
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P),
|
||||
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 0),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 0),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, 0),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, 0),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, 0),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, 0),
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/xarray.h>
|
||||
|
||||
#include <drm/drm_accel.h>
|
||||
#include <drm/drm_debugfs.h>
|
||||
|
@ -17,8 +17,7 @@
|
|||
#include <drm/drm_ioctl.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
static DEFINE_SPINLOCK(accel_minor_lock);
|
||||
static struct idr accel_minors_idr;
|
||||
DEFINE_XARRAY_ALLOC(accel_minors_xa);
|
||||
|
||||
static struct dentry *accel_debugfs_root;
|
||||
static struct class *accel_class;
|
||||
|
@ -120,99 +119,6 @@ void accel_set_device_instance_params(struct device *kdev, int index)
|
|||
kdev->type = &accel_sysfs_device_minor;
|
||||
}
|
||||
|
||||
/**
|
||||
* accel_minor_alloc() - Allocates a new accel minor
|
||||
*
|
||||
* This function access the accel minors idr and allocates from it
|
||||
* a new id to represent a new accel minor
|
||||
*
|
||||
* Return: A new id on success or error code in case idr_alloc failed
|
||||
*/
|
||||
int accel_minor_alloc(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
int r;
|
||||
|
||||
spin_lock_irqsave(&accel_minor_lock, flags);
|
||||
r = idr_alloc(&accel_minors_idr, NULL, 0, ACCEL_MAX_MINORS, GFP_NOWAIT);
|
||||
spin_unlock_irqrestore(&accel_minor_lock, flags);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* accel_minor_remove() - Remove an accel minor
|
||||
* @index: The minor id to remove.
|
||||
*
|
||||
* This function access the accel minors idr and removes from
|
||||
* it the member with the id that is passed to this function.
|
||||
*/
|
||||
void accel_minor_remove(int index)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&accel_minor_lock, flags);
|
||||
idr_remove(&accel_minors_idr, index);
|
||||
spin_unlock_irqrestore(&accel_minor_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* accel_minor_replace() - Replace minor pointer in accel minors idr.
|
||||
* @minor: Pointer to the new minor.
|
||||
* @index: The minor id to replace.
|
||||
*
|
||||
* This function access the accel minors idr structure and replaces the pointer
|
||||
* that is associated with an existing id. Because the minor pointer can be
|
||||
* NULL, we need to explicitly pass the index.
|
||||
*
|
||||
* Return: 0 for success, negative value for error
|
||||
*/
|
||||
void accel_minor_replace(struct drm_minor *minor, int index)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&accel_minor_lock, flags);
|
||||
idr_replace(&accel_minors_idr, minor, index);
|
||||
spin_unlock_irqrestore(&accel_minor_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Looks up the given minor-ID and returns the respective DRM-minor object. The
|
||||
* refence-count of the underlying device is increased so you must release this
|
||||
* object with accel_minor_release().
|
||||
*
|
||||
* The object can be only a drm_minor that represents an accel device.
|
||||
*
|
||||
* As long as you hold this minor, it is guaranteed that the object and the
|
||||
* minor->dev pointer will stay valid! However, the device may get unplugged and
|
||||
* unregistered while you hold the minor.
|
||||
*/
|
||||
static struct drm_minor *accel_minor_acquire(unsigned int minor_id)
|
||||
{
|
||||
struct drm_minor *minor;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&accel_minor_lock, flags);
|
||||
minor = idr_find(&accel_minors_idr, minor_id);
|
||||
if (minor)
|
||||
drm_dev_get(minor->dev);
|
||||
spin_unlock_irqrestore(&accel_minor_lock, flags);
|
||||
|
||||
if (!minor) {
|
||||
return ERR_PTR(-ENODEV);
|
||||
} else if (drm_dev_is_unplugged(minor->dev)) {
|
||||
drm_dev_put(minor->dev);
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
return minor;
|
||||
}
|
||||
|
||||
static void accel_minor_release(struct drm_minor *minor)
|
||||
{
|
||||
drm_dev_put(minor->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* accel_open - open method for ACCEL file
|
||||
* @inode: device inode
|
||||
|
@ -230,7 +136,7 @@ int accel_open(struct inode *inode, struct file *filp)
|
|||
struct drm_minor *minor;
|
||||
int retcode;
|
||||
|
||||
minor = accel_minor_acquire(iminor(inode));
|
||||
minor = drm_minor_acquire(&accel_minors_xa, iminor(inode));
|
||||
if (IS_ERR(minor))
|
||||
return PTR_ERR(minor);
|
||||
|
||||
|
@ -249,7 +155,7 @@ int accel_open(struct inode *inode, struct file *filp)
|
|||
|
||||
err_undo:
|
||||
atomic_dec(&dev->open_count);
|
||||
accel_minor_release(minor);
|
||||
drm_minor_release(minor);
|
||||
return retcode;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(accel_open);
|
||||
|
@ -260,7 +166,7 @@ static int accel_stub_open(struct inode *inode, struct file *filp)
|
|||
struct drm_minor *minor;
|
||||
int err;
|
||||
|
||||
minor = accel_minor_acquire(iminor(inode));
|
||||
minor = drm_minor_acquire(&accel_minors_xa, iminor(inode));
|
||||
if (IS_ERR(minor))
|
||||
return PTR_ERR(minor);
|
||||
|
||||
|
@ -277,7 +183,7 @@ static int accel_stub_open(struct inode *inode, struct file *filp)
|
|||
err = 0;
|
||||
|
||||
out:
|
||||
accel_minor_release(minor);
|
||||
drm_minor_release(minor);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -293,15 +199,13 @@ void accel_core_exit(void)
|
|||
unregister_chrdev(ACCEL_MAJOR, "accel");
|
||||
debugfs_remove(accel_debugfs_root);
|
||||
accel_sysfs_destroy();
|
||||
idr_destroy(&accel_minors_idr);
|
||||
WARN_ON(!xa_empty(&accel_minors_xa));
|
||||
}
|
||||
|
||||
int __init accel_core_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
idr_init(&accel_minors_idr);
|
||||
|
||||
ret = accel_sysfs_init();
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("Cannot create ACCEL class: %d\n", ret);
|
||||
|
|
|
@ -1565,12 +1565,14 @@ static long linereq_set_config_unlocked(struct linereq *lr,
|
|||
line = &lr->lines[i];
|
||||
desc = lr->lines[i].desc;
|
||||
flags = gpio_v2_line_config_flags(lc, i);
|
||||
/*
|
||||
* Lines not explicitly reconfigured as input or output
|
||||
* are left unchanged.
|
||||
*/
|
||||
if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
|
||||
continue;
|
||||
gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
|
||||
edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
|
||||
/*
|
||||
* Lines have to be requested explicitly for input
|
||||
* or output, else the line will be treated "as is".
|
||||
*/
|
||||
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
|
||||
int val = gpio_v2_line_config_output_value(lc, i);
|
||||
|
||||
|
@ -1578,7 +1580,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
|
|||
ret = gpiod_direction_output(desc, val);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
|
||||
} else {
|
||||
ret = gpiod_direction_input(desc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -160,6 +160,7 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
|
|||
struct drm_plane *plane;
|
||||
struct list_head zorder_list;
|
||||
int order = 0, err;
|
||||
u32 slave_zpos = 0;
|
||||
|
||||
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
|
||||
crtc->base.id, crtc->name);
|
||||
|
@ -199,10 +200,13 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
|
|||
plane_st->zpos, plane_st->normalized_zpos);
|
||||
|
||||
/* calculate max slave zorder */
|
||||
if (has_bit(drm_plane_index(plane), kcrtc->slave_planes))
|
||||
if (has_bit(drm_plane_index(plane), kcrtc->slave_planes)) {
|
||||
slave_zpos = plane_st->normalized_zpos;
|
||||
if (to_kplane_st(plane_st)->layer_split)
|
||||
slave_zpos++;
|
||||
kcrtc_st->max_slave_zorder =
|
||||
max(plane_st->normalized_zpos,
|
||||
kcrtc_st->max_slave_zorder);
|
||||
max(slave_zpos, kcrtc_st->max_slave_zorder);
|
||||
}
|
||||
}
|
||||
|
||||
crtc_st->zpos_changed = true;
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <linux/pseudo_fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/srcu.h>
|
||||
#include <linux/xarray.h>
|
||||
|
||||
#include <drm/drm_accel.h>
|
||||
#include <drm/drm_cache.h>
|
||||
|
@ -54,8 +55,7 @@ MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
|
|||
MODULE_DESCRIPTION("DRM shared core routines");
|
||||
MODULE_LICENSE("GPL and additional rights");
|
||||
|
||||
static DEFINE_SPINLOCK(drm_minor_lock);
|
||||
static struct idr drm_minors_idr;
|
||||
DEFINE_XARRAY_ALLOC(drm_minors_xa);
|
||||
|
||||
/*
|
||||
* If the drm core fails to init for whatever reason,
|
||||
|
@ -83,6 +83,18 @@ DEFINE_STATIC_SRCU(drm_unplug_srcu);
|
|||
* registered and unregistered dynamically according to device-state.
|
||||
*/
|
||||
|
||||
static struct xarray *drm_minor_get_xa(enum drm_minor_type type)
|
||||
{
|
||||
if (type == DRM_MINOR_PRIMARY || type == DRM_MINOR_RENDER)
|
||||
return &drm_minors_xa;
|
||||
#if IS_ENABLED(CONFIG_DRM_ACCEL)
|
||||
else if (type == DRM_MINOR_ACCEL)
|
||||
return &accel_minors_xa;
|
||||
#endif
|
||||
else
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
|
||||
enum drm_minor_type type)
|
||||
{
|
||||
|
@ -101,25 +113,31 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
|
|||
static void drm_minor_alloc_release(struct drm_device *dev, void *data)
|
||||
{
|
||||
struct drm_minor *minor = data;
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(dev != minor->dev);
|
||||
|
||||
put_device(minor->kdev);
|
||||
|
||||
if (minor->type == DRM_MINOR_ACCEL) {
|
||||
accel_minor_remove(minor->index);
|
||||
} else {
|
||||
spin_lock_irqsave(&drm_minor_lock, flags);
|
||||
idr_remove(&drm_minors_idr, minor->index);
|
||||
spin_unlock_irqrestore(&drm_minor_lock, flags);
|
||||
}
|
||||
xa_erase(drm_minor_get_xa(minor->type), minor->index);
|
||||
}
|
||||
|
||||
/*
|
||||
* DRM used to support 64 devices, for backwards compatibility we need to maintain the
|
||||
* minor allocation scheme where minors 0-63 are primary nodes, 64-127 are control nodes,
|
||||
* and 128-191 are render nodes.
|
||||
* After reaching the limit, we're allocating minors dynamically - first-come, first-serve.
|
||||
* Accel nodes are using a distinct major, so the minors are allocated in continuous 0-MAX
|
||||
* range.
|
||||
*/
|
||||
#define DRM_MINOR_LIMIT(t) ({ \
|
||||
typeof(t) _t = (t); \
|
||||
_t == DRM_MINOR_ACCEL ? XA_LIMIT(0, ACCEL_MAX_MINORS) : XA_LIMIT(64 * _t, 64 * _t + 63); \
|
||||
})
|
||||
#define DRM_EXTENDED_MINOR_LIMIT XA_LIMIT(192, (1 << MINORBITS) - 1)
|
||||
|
||||
static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
|
||||
{
|
||||
struct drm_minor *minor;
|
||||
unsigned long flags;
|
||||
int r;
|
||||
|
||||
minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
|
||||
|
@ -129,25 +147,14 @@ static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
|
|||
minor->type = type;
|
||||
minor->dev = dev;
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
if (type == DRM_MINOR_ACCEL) {
|
||||
r = accel_minor_alloc();
|
||||
} else {
|
||||
spin_lock_irqsave(&drm_minor_lock, flags);
|
||||
r = idr_alloc(&drm_minors_idr,
|
||||
NULL,
|
||||
64 * type,
|
||||
64 * (type + 1),
|
||||
GFP_NOWAIT);
|
||||
spin_unlock_irqrestore(&drm_minor_lock, flags);
|
||||
}
|
||||
idr_preload_end();
|
||||
|
||||
r = xa_alloc(drm_minor_get_xa(type), &minor->index,
|
||||
NULL, DRM_MINOR_LIMIT(type), GFP_KERNEL);
|
||||
if (r == -EBUSY && (type == DRM_MINOR_PRIMARY || type == DRM_MINOR_RENDER))
|
||||
r = xa_alloc(&drm_minors_xa, &minor->index,
|
||||
NULL, DRM_EXTENDED_MINOR_LIMIT, GFP_KERNEL);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
minor->index = r;
|
||||
|
||||
r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -163,7 +170,7 @@ static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
|
|||
static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
|
||||
{
|
||||
struct drm_minor *minor;
|
||||
unsigned long flags;
|
||||
void *entry;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
@ -187,13 +194,12 @@ static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
|
|||
goto err_debugfs;
|
||||
|
||||
/* replace NULL with @minor so lookups will succeed from now on */
|
||||
if (minor->type == DRM_MINOR_ACCEL) {
|
||||
accel_minor_replace(minor, minor->index);
|
||||
} else {
|
||||
spin_lock_irqsave(&drm_minor_lock, flags);
|
||||
idr_replace(&drm_minors_idr, minor, minor->index);
|
||||
spin_unlock_irqrestore(&drm_minor_lock, flags);
|
||||
entry = xa_store(drm_minor_get_xa(type), minor->index, minor, GFP_KERNEL);
|
||||
if (xa_is_err(entry)) {
|
||||
ret = xa_err(entry);
|
||||
goto err_debugfs;
|
||||
}
|
||||
WARN_ON(entry);
|
||||
|
||||
DRM_DEBUG("new minor registered %d\n", minor->index);
|
||||
return 0;
|
||||
|
@ -206,20 +212,13 @@ err_debugfs:
|
|||
static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type)
|
||||
{
|
||||
struct drm_minor *minor;
|
||||
unsigned long flags;
|
||||
|
||||
minor = *drm_minor_get_slot(dev, type);
|
||||
if (!minor || !device_is_registered(minor->kdev))
|
||||
return;
|
||||
|
||||
/* replace @minor with NULL so lookups will fail from now on */
|
||||
if (minor->type == DRM_MINOR_ACCEL) {
|
||||
accel_minor_replace(NULL, minor->index);
|
||||
} else {
|
||||
spin_lock_irqsave(&drm_minor_lock, flags);
|
||||
idr_replace(&drm_minors_idr, NULL, minor->index);
|
||||
spin_unlock_irqrestore(&drm_minor_lock, flags);
|
||||
}
|
||||
xa_store(drm_minor_get_xa(type), minor->index, NULL, GFP_KERNEL);
|
||||
|
||||
device_del(minor->kdev);
|
||||
dev_set_drvdata(minor->kdev, NULL); /* safety belt */
|
||||
|
@ -235,16 +234,15 @@ static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type typ
|
|||
* minor->dev pointer will stay valid! However, the device may get unplugged and
|
||||
* unregistered while you hold the minor.
|
||||
*/
|
||||
struct drm_minor *drm_minor_acquire(unsigned int minor_id)
|
||||
struct drm_minor *drm_minor_acquire(struct xarray *minor_xa, unsigned int minor_id)
|
||||
{
|
||||
struct drm_minor *minor;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&drm_minor_lock, flags);
|
||||
minor = idr_find(&drm_minors_idr, minor_id);
|
||||
xa_lock(minor_xa);
|
||||
minor = xa_load(minor_xa, minor_id);
|
||||
if (minor)
|
||||
drm_dev_get(minor->dev);
|
||||
spin_unlock_irqrestore(&drm_minor_lock, flags);
|
||||
xa_unlock(minor_xa);
|
||||
|
||||
if (!minor) {
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
@ -1038,7 +1036,7 @@ static int drm_stub_open(struct inode *inode, struct file *filp)
|
|||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
minor = drm_minor_acquire(iminor(inode));
|
||||
minor = drm_minor_acquire(&drm_minors_xa, iminor(inode));
|
||||
if (IS_ERR(minor))
|
||||
return PTR_ERR(minor);
|
||||
|
||||
|
@ -1073,7 +1071,7 @@ static void drm_core_exit(void)
|
|||
unregister_chrdev(DRM_MAJOR, "drm");
|
||||
debugfs_remove(drm_debugfs_root);
|
||||
drm_sysfs_destroy();
|
||||
idr_destroy(&drm_minors_idr);
|
||||
WARN_ON(!xa_empty(&drm_minors_xa));
|
||||
drm_connector_ida_destroy();
|
||||
}
|
||||
|
||||
|
@ -1082,7 +1080,6 @@ static int __init drm_core_init(void)
|
|||
int ret;
|
||||
|
||||
drm_connector_ida_init();
|
||||
idr_init(&drm_minors_idr);
|
||||
drm_memcpy_init_early();
|
||||
|
||||
ret = drm_sysfs_init();
|
||||
|
|
|
@ -413,7 +413,7 @@ int drm_open(struct inode *inode, struct file *filp)
|
|||
int retcode;
|
||||
int need_setup = 0;
|
||||
|
||||
minor = drm_minor_acquire(iminor(inode));
|
||||
minor = drm_minor_acquire(&drm_minors_xa, iminor(inode));
|
||||
if (IS_ERR(minor))
|
||||
return PTR_ERR(minor);
|
||||
|
||||
|
|
|
@ -77,10 +77,6 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
|
|||
void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
|
||||
uint32_t handle);
|
||||
|
||||
/* drm_drv.c */
|
||||
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
|
||||
void drm_minor_release(struct drm_minor *minor);
|
||||
|
||||
/* drm_managed.c */
|
||||
void drm_managed_release(struct drm_device *dev);
|
||||
void drmm_add_final_kfree(struct drm_device *dev, void *container);
|
||||
|
|
|
@ -402,7 +402,7 @@ static const struct ec_board_info board_info_strix_b550_i_gaming = {
|
|||
|
||||
static const struct ec_board_info board_info_strix_x570_e_gaming = {
|
||||
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||||
SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
|
||||
SENSOR_TEMP_T_SENSOR |
|
||||
SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
|
||||
SENSOR_IN_CPU_CORE,
|
||||
.mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||||
|
|
|
@ -744,6 +744,7 @@ static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
|
|||
|
||||
mcp251xfd_chip_interrupts_disable(priv);
|
||||
mcp251xfd_chip_rx_int_disable(priv);
|
||||
mcp251xfd_timestamp_stop(priv);
|
||||
mcp251xfd_chip_sleep(priv);
|
||||
}
|
||||
|
||||
|
@ -763,6 +764,8 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
|
|||
if (err)
|
||||
goto out_chip_stop;
|
||||
|
||||
mcp251xfd_timestamp_start(priv);
|
||||
|
||||
err = mcp251xfd_set_bittiming(priv);
|
||||
if (err)
|
||||
goto out_chip_stop;
|
||||
|
@ -791,7 +794,7 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
|
|||
|
||||
return 0;
|
||||
|
||||
out_chip_stop:
|
||||
out_chip_stop:
|
||||
mcp251xfd_dump(priv);
|
||||
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
|
||||
|
||||
|
@ -1576,7 +1579,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
|
|||
handled = IRQ_HANDLED;
|
||||
} while (1);
|
||||
|
||||
out_fail:
|
||||
out_fail:
|
||||
can_rx_offload_threaded_irq_finish(&priv->offload);
|
||||
|
||||
netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
|
||||
|
@ -1610,11 +1613,12 @@ static int mcp251xfd_open(struct net_device *ndev)
|
|||
if (err)
|
||||
goto out_mcp251xfd_ring_free;
|
||||
|
||||
mcp251xfd_timestamp_init(priv);
|
||||
|
||||
err = mcp251xfd_chip_start(priv);
|
||||
if (err)
|
||||
goto out_transceiver_disable;
|
||||
|
||||
mcp251xfd_timestamp_init(priv);
|
||||
clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
|
||||
can_rx_offload_enable(&priv->offload);
|
||||
|
||||
|
@ -1641,22 +1645,21 @@ static int mcp251xfd_open(struct net_device *ndev)
|
|||
|
||||
return 0;
|
||||
|
||||
out_free_irq:
|
||||
out_free_irq:
|
||||
free_irq(spi->irq, priv);
|
||||
out_destroy_workqueue:
|
||||
out_destroy_workqueue:
|
||||
destroy_workqueue(priv->wq);
|
||||
out_can_rx_offload_disable:
|
||||
out_can_rx_offload_disable:
|
||||
can_rx_offload_disable(&priv->offload);
|
||||
set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
|
||||
mcp251xfd_timestamp_stop(priv);
|
||||
out_transceiver_disable:
|
||||
out_transceiver_disable:
|
||||
mcp251xfd_transceiver_disable(priv);
|
||||
out_mcp251xfd_ring_free:
|
||||
out_mcp251xfd_ring_free:
|
||||
mcp251xfd_ring_free(priv);
|
||||
out_pm_runtime_put:
|
||||
out_pm_runtime_put:
|
||||
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
|
||||
pm_runtime_put(ndev->dev.parent);
|
||||
out_close_candev:
|
||||
out_close_candev:
|
||||
close_candev(ndev);
|
||||
|
||||
return err;
|
||||
|
@ -1674,7 +1677,6 @@ static int mcp251xfd_stop(struct net_device *ndev)
|
|||
free_irq(ndev->irq, priv);
|
||||
destroy_workqueue(priv->wq);
|
||||
can_rx_offload_disable(&priv->offload);
|
||||
mcp251xfd_timestamp_stop(priv);
|
||||
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
|
||||
mcp251xfd_transceiver_disable(priv);
|
||||
mcp251xfd_ring_free(priv);
|
||||
|
@ -1820,9 +1822,9 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
|
|||
*effective_speed_hz_slow = xfer[0].effective_speed_hz;
|
||||
*effective_speed_hz_fast = xfer[1].effective_speed_hz;
|
||||
|
||||
out_kfree_buf_tx:
|
||||
out_kfree_buf_tx:
|
||||
kfree(buf_tx);
|
||||
out_kfree_buf_rx:
|
||||
out_kfree_buf_rx:
|
||||
kfree(buf_rx);
|
||||
|
||||
return err;
|
||||
|
@ -1936,13 +1938,13 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
|
|||
|
||||
return 0;
|
||||
|
||||
out_unregister_candev:
|
||||
out_unregister_candev:
|
||||
unregister_candev(ndev);
|
||||
out_chip_sleep:
|
||||
out_chip_sleep:
|
||||
mcp251xfd_chip_sleep(priv);
|
||||
out_runtime_disable:
|
||||
out_runtime_disable:
|
||||
pm_runtime_disable(ndev->dev.parent);
|
||||
out_runtime_put_noidle:
|
||||
out_runtime_put_noidle:
|
||||
pm_runtime_put_noidle(ndev->dev.parent);
|
||||
mcp251xfd_clks_and_vdd_disable(priv);
|
||||
|
||||
|
@ -2162,9 +2164,9 @@ static int mcp251xfd_probe(struct spi_device *spi)
|
|||
|
||||
return 0;
|
||||
|
||||
out_can_rx_offload_del:
|
||||
out_can_rx_offload_del:
|
||||
can_rx_offload_del(&priv->offload);
|
||||
out_free_candev:
|
||||
out_free_candev:
|
||||
spi->max_speed_hz = priv->spi_max_speed_hz_orig;
|
||||
|
||||
free_candev(ndev);
|
||||
|
|
|
@ -94,7 +94,7 @@ static void mcp251xfd_dump_registers(const struct mcp251xfd_priv *priv,
|
|||
kfree(buf);
|
||||
}
|
||||
|
||||
out:
|
||||
out:
|
||||
mcp251xfd_dump_header(iter, MCP251XFD_DUMP_OBJECT_TYPE_REG, reg);
|
||||
}
|
||||
|
||||
|
|
|
@ -397,7 +397,7 @@ mcp251xfd_regmap_crc_read(void *context,
|
|||
|
||||
return err;
|
||||
}
|
||||
out:
|
||||
out:
|
||||
memcpy(val_buf, buf_rx->data, val_len);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -290,7 +290,7 @@ int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
|
|||
const struct mcp251xfd_rx_ring *rx_ring;
|
||||
u16 base = 0, ram_used;
|
||||
u8 fifo_nr = 1;
|
||||
int i;
|
||||
int err = 0, i;
|
||||
|
||||
netdev_reset_queue(priv->ndev);
|
||||
|
||||
|
@ -386,10 +386,18 @@ int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
|
|||
netdev_err(priv->ndev,
|
||||
"Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n",
|
||||
ram_used, MCP251XFD_RAM_SIZE);
|
||||
return -ENOMEM;
|
||||
err = -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (priv->tx_obj_num_coalesce_irq &&
|
||||
priv->tx_obj_num_coalesce_irq * 2 != priv->tx->obj_num) {
|
||||
netdev_err(priv->ndev,
|
||||
"Error during ring configuration, number of TEF coalescing buffers (%u) must be half of TEF buffers (%u).\n",
|
||||
priv->tx_obj_num_coalesce_irq, priv->tx->obj_num);
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
|
||||
|
|
|
@ -219,7 +219,7 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
|
|||
total_frame_len += frame_len;
|
||||
}
|
||||
|
||||
out_netif_wake_queue:
|
||||
out_netif_wake_queue:
|
||||
len = i; /* number of handled goods TEFs */
|
||||
if (len) {
|
||||
struct mcp251xfd_tef_ring *ring = priv->tef;
|
||||
|
|
|
@ -48,9 +48,12 @@ void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv)
|
|||
cc->shift = 1;
|
||||
cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift);
|
||||
|
||||
timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
|
||||
|
||||
INIT_DELAYED_WORK(&priv->timestamp, mcp251xfd_timestamp_work);
|
||||
}
|
||||
|
||||
void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv)
|
||||
{
|
||||
timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
|
||||
schedule_delayed_work(&priv->timestamp,
|
||||
MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
|
||||
}
|
||||
|
|
|
@ -957,6 +957,7 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv);
|
|||
int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv);
|
||||
int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv);
|
||||
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
|
||||
void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv);
|
||||
void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
|
||||
|
||||
void mcp251xfd_tx_obj_write_sync(struct work_struct *work);
|
||||
|
|
|
@ -572,7 +572,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
|
|||
(*processed)++;
|
||||
return true;
|
||||
|
||||
drop:
|
||||
drop:
|
||||
/* Clean rxdes0 (which resets own bit) */
|
||||
rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
|
||||
priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
|
||||
|
@ -656,6 +656,11 @@ static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
|
|||
ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
|
||||
txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
|
||||
|
||||
/* Ensure the descriptor config is visible before setting the tx
|
||||
* pointer.
|
||||
*/
|
||||
smp_wmb();
|
||||
|
||||
priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
|
||||
|
||||
return true;
|
||||
|
@ -809,6 +814,11 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
|
|||
dma_wmb();
|
||||
first->txdes0 = cpu_to_le32(f_ctl_stat);
|
||||
|
||||
/* Ensure the descriptor config is visible before setting the tx
|
||||
* pointer.
|
||||
*/
|
||||
smp_wmb();
|
||||
|
||||
/* Update next TX pointer */
|
||||
priv->tx_pointer = pointer;
|
||||
|
||||
|
@ -829,7 +839,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
|
|||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
dma_err:
|
||||
dma_err:
|
||||
if (net_ratelimit())
|
||||
netdev_err(netdev, "map tx fragment failed\n");
|
||||
|
||||
|
@ -851,7 +861,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
|
|||
* last fragment, so we know ftgmac100_free_tx_packet()
|
||||
* hasn't freed the skb yet.
|
||||
*/
|
||||
drop:
|
||||
drop:
|
||||
/* Drop the packet */
|
||||
dev_kfree_skb_any(skb);
|
||||
netdev->stats.tx_dropped++;
|
||||
|
@ -1344,7 +1354,7 @@ static void ftgmac100_reset(struct ftgmac100 *priv)
|
|||
ftgmac100_init_all(priv, true);
|
||||
|
||||
netdev_dbg(netdev, "Reset done !\n");
|
||||
bail:
|
||||
bail:
|
||||
if (priv->mii_bus)
|
||||
mutex_unlock(&priv->mii_bus->mdio_lock);
|
||||
if (netdev->phydev)
|
||||
|
@ -1543,15 +1553,15 @@ static int ftgmac100_open(struct net_device *netdev)
|
|||
|
||||
return 0;
|
||||
|
||||
err_ncsi:
|
||||
err_ncsi:
|
||||
napi_disable(&priv->napi);
|
||||
netif_stop_queue(netdev);
|
||||
err_alloc:
|
||||
err_alloc:
|
||||
ftgmac100_free_buffers(priv);
|
||||
free_irq(netdev->irq, netdev);
|
||||
err_irq:
|
||||
err_irq:
|
||||
netif_napi_del(&priv->napi);
|
||||
err_hw:
|
||||
err_hw:
|
||||
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
|
||||
ftgmac100_free_rings(priv);
|
||||
return err;
|
||||
|
|
|
@ -3206,7 +3206,7 @@ void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
|
|||
{
|
||||
int ret __maybe_unused = 0;
|
||||
|
||||
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
|
||||
if (!iwl_trans_fw_running(fwrt->trans))
|
||||
return;
|
||||
|
||||
if (fw_has_capa(&fwrt->fw->ucode_capa,
|
||||
|
|
|
@ -1554,8 +1554,8 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
|
|||
|
||||
/* prevent double restarts due to the same erroneous FW */
|
||||
if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
|
||||
iwl_op_mode_nic_error(trans->op_mode, sync);
|
||||
trans->state = IWL_TRANS_NO_FW;
|
||||
iwl_op_mode_nic_error(trans->op_mode, sync);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5589,6 +5589,10 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
|
|||
int i;
|
||||
|
||||
if (!iwl_mvm_has_new_tx_api(mvm)) {
|
||||
/* we can't ask the firmware anything if it is dead */
|
||||
if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
|
||||
&mvm->status))
|
||||
return;
|
||||
if (drop) {
|
||||
mutex_lock(&mvm->mutex);
|
||||
iwl_mvm_flush_tx_path(mvm,
|
||||
|
@ -5673,8 +5677,11 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|||
|
||||
/* this can take a while, and we may need/want other operations
|
||||
* to succeed while doing this, so do it without the mutex held
|
||||
* If the firmware is dead, this can't work...
|
||||
*/
|
||||
if (!drop && !iwl_mvm_has_new_tx_api(mvm))
|
||||
if (!drop && !iwl_mvm_has_new_tx_api(mvm) &&
|
||||
!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
|
||||
&mvm->status))
|
||||
iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
|
||||
}
|
||||
|
||||
|
|
|
@ -1418,6 +1418,8 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm)
|
|||
|
||||
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
|
||||
|
||||
iwl_mvm_pause_tcm(mvm, false);
|
||||
|
||||
iwl_fw_dbg_stop_sync(&mvm->fwrt);
|
||||
iwl_trans_stop_device(mvm->trans);
|
||||
iwl_free_fw_paging(&mvm->fwrt);
|
||||
|
|
|
@ -48,6 +48,8 @@
|
|||
/* Number of iterations on the channel for mei filtered scan */
|
||||
#define IWL_MEI_SCAN_NUM_ITER 5U
|
||||
|
||||
#define WFA_TPC_IE_LEN 9
|
||||
|
||||
struct iwl_mvm_scan_timing_params {
|
||||
u32 suspend_time;
|
||||
u32 max_out_time;
|
||||
|
@ -296,8 +298,8 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
|
|||
|
||||
max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
|
||||
|
||||
/* we create the 802.11 header and SSID element */
|
||||
max_probe_len -= 24 + 2;
|
||||
/* we create the 802.11 header SSID element and WFA TPC element */
|
||||
max_probe_len -= 24 + 2 + WFA_TPC_IE_LEN;
|
||||
|
||||
/* DS parameter set element is added on 2.4GHZ band if required */
|
||||
if (iwl_mvm_rrm_scan_needed(mvm))
|
||||
|
@ -724,8 +726,6 @@ static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
|
|||
return newpos;
|
||||
}
|
||||
|
||||
#define WFA_TPC_IE_LEN 9
|
||||
|
||||
static void iwl_mvm_add_tpc_report_ie(u8 *pos)
|
||||
{
|
||||
pos[0] = WLAN_EID_VENDOR_SPECIFIC;
|
||||
|
@ -830,8 +830,8 @@ static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
|
|||
return ((n_ssids <= PROBE_OPTION_MAX) &&
|
||||
(n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
|
||||
(ies->common_ie_len +
|
||||
ies->len[NL80211_BAND_2GHZ] +
|
||||
ies->len[NL80211_BAND_5GHZ] <=
|
||||
ies->len[NL80211_BAND_2GHZ] + ies->len[NL80211_BAND_5GHZ] +
|
||||
ies->len[NL80211_BAND_6GHZ] <=
|
||||
iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
|
||||
}
|
||||
|
||||
|
@ -3118,18 +3118,16 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
|
|||
params.n_channels = j;
|
||||
}
|
||||
|
||||
if (non_psc_included &&
|
||||
!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) {
|
||||
kfree(params.channels);
|
||||
return -ENOBUFS;
|
||||
if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) {
|
||||
ret = -ENOBUFS;
|
||||
goto out;
|
||||
}
|
||||
|
||||
uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, type);
|
||||
|
||||
if (non_psc_included)
|
||||
kfree(params.channels);
|
||||
if (uid < 0)
|
||||
return uid;
|
||||
if (uid < 0) {
|
||||
ret = uid;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = iwl_mvm_send_cmd(mvm, &hcmd);
|
||||
if (!ret) {
|
||||
|
@ -3146,6 +3144,9 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
|
|||
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
|
||||
}
|
||||
|
||||
out:
|
||||
if (non_psc_included)
|
||||
kfree(params.channels);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -68,7 +68,8 @@ iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
|
|||
}
|
||||
break;
|
||||
default:
|
||||
IWL_ERR(trans, "WRT: Invalid buffer destination\n");
|
||||
IWL_DEBUG_FW(trans, "WRT: Invalid buffer destination (%d)\n",
|
||||
le32_to_cpu(fw_mon_cfg->buf_location));
|
||||
}
|
||||
out:
|
||||
if (dbg_flags)
|
||||
|
|
|
@ -88,6 +88,11 @@ enum nvme_quirks {
|
|||
*/
|
||||
NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
|
||||
|
||||
/*
|
||||
* Problems seen with concurrent commands
|
||||
*/
|
||||
NVME_QUIRK_QDEPTH_ONE = (1 << 6),
|
||||
|
||||
/*
|
||||
* Set MEDIUM priority on SQ creation
|
||||
*/
|
||||
|
|
|
@ -2526,15 +2526,8 @@ static int nvme_pci_enable(struct nvme_dev *dev)
|
|||
else
|
||||
dev->io_sqes = NVME_NVM_IOSQES;
|
||||
|
||||
/*
|
||||
* Temporary fix for the Apple controller found in the MacBook8,1 and
|
||||
* some MacBook7,1 to avoid controller resets and data loss.
|
||||
*/
|
||||
if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
|
||||
if (dev->ctrl.quirks & NVME_QUIRK_QDEPTH_ONE) {
|
||||
dev->q_depth = 2;
|
||||
dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
|
||||
"set queue depth=%u to work around controller resets\n",
|
||||
dev->q_depth);
|
||||
} else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG &&
|
||||
(pdev->device == 0xa821 || pdev->device == 0xa822) &&
|
||||
NVME_CAP_MQES(dev->ctrl.cap) == 0) {
|
||||
|
@ -3399,6 +3392,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
|||
NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
|
||||
.driver_data = NVME_QUIRK_QDEPTH_ONE },
|
||||
{ PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
|
||||
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
|
||||
NVME_QUIRK_BOGUS_NID, },
|
||||
|
@ -3531,7 +3526,12 @@ static const struct pci_device_id nvme_id_table[] = {
|
|||
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
|
||||
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
|
||||
.driver_data = NVME_QUIRK_SINGLE_VECTOR },
|
||||
/*
|
||||
* Fix for the Apple controller found in the MacBook8,1 and
|
||||
* some MacBook7,1 to avoid controller resets and data loss.
|
||||
*/
|
||||
.driver_data = NVME_QUIRK_SINGLE_VECTOR |
|
||||
NVME_QUIRK_QDEPTH_ONE },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
|
||||
.driver_data = NVME_QUIRK_SINGLE_VECTOR |
|
||||
|
|
|
@ -1410,8 +1410,11 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
|
|||
|
||||
/* We will handle a range of GPIO pins */
|
||||
for (i = 0; i < gpio_banks; i++)
|
||||
if (gpio_chips[i])
|
||||
if (gpio_chips[i]) {
|
||||
pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
|
||||
gpiochip_add_pin_range(&gpio_chips[i]->chip, dev_name(info->pctl->dev), 0,
|
||||
gpio_chips[i]->range.pin_base, gpio_chips[i]->range.npins);
|
||||
}
|
||||
|
||||
dev_info(dev, "initialized AT91 pinctrl driver\n");
|
||||
|
||||
|
|
|
@ -122,7 +122,6 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
|
|||
/* Lenovo Yoga Tab 3 Pro YT3-X90F */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
|
||||
},
|
||||
.driver_data = (void *)&lenovo_yt3_info,
|
||||
|
|
|
@ -1280,6 +1280,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
|
|||
|
||||
X86_MATCH_VENDOR_FAM(AMD, 0x17, &rapl_defaults_amd),
|
||||
X86_MATCH_VENDOR_FAM(AMD, 0x19, &rapl_defaults_amd),
|
||||
X86_MATCH_VENDOR_FAM(AMD, 0x1A, &rapl_defaults_amd),
|
||||
X86_MATCH_VENDOR_FAM(HYGON, 0x18, &rapl_defaults_amd),
|
||||
{}
|
||||
};
|
||||
|
|
|
@ -5409,7 +5409,7 @@ lpfc_get_cgnbuf_info(struct bsg_job *job)
|
|||
struct get_cgnbuf_info_req *cgnbuf_req;
|
||||
struct lpfc_cgn_info *cp;
|
||||
uint8_t *cgn_buff;
|
||||
int size, cinfosz;
|
||||
size_t size, cinfosz;
|
||||
int rc = 0;
|
||||
|
||||
if (job->request_len < sizeof(struct fc_bsg_request) +
|
||||
|
|
|
@ -472,6 +472,7 @@ static const struct of_device_id bcm63xx_spi_of_match[] = {
|
|||
{ .compatible = "brcm,bcm6358-spi", .data = &bcm6358_spi_reg_offsets },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, bcm63xx_spi_of_match);
|
||||
|
||||
static int bcm63xx_spi_probe(struct platform_device *pdev)
|
||||
{
|
||||
|
|
|
@ -706,6 +706,7 @@ static struct class *spidev_class;
|
|||
static const struct spi_device_id spidev_spi_ids[] = {
|
||||
{ .name = "bh2228fv" },
|
||||
{ .name = "dh2228fv" },
|
||||
{ .name = "jg10309-01" },
|
||||
{ .name = "ltc2488" },
|
||||
{ .name = "sx1301" },
|
||||
{ .name = "bk4" },
|
||||
|
@ -735,6 +736,7 @@ static int spidev_of_check(struct device *dev)
|
|||
static const struct of_device_id spidev_dt_ids[] = {
|
||||
{ .compatible = "cisco,spi-petra", .data = &spidev_of_check },
|
||||
{ .compatible = "dh,dhcom-board", .data = &spidev_of_check },
|
||||
{ .compatible = "elgin,jg10309-01", .data = &spidev_of_check },
|
||||
{ .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check },
|
||||
{ .compatible = "lwn,bk4", .data = &spidev_of_check },
|
||||
{ .compatible = "menlo,m53cpld", .data = &spidev_of_check },
|
||||
|
|
|
@ -754,7 +754,7 @@ static struct urb *usbtmc_create_urb(void)
|
|||
if (!urb)
|
||||
return NULL;
|
||||
|
||||
dmabuf = kmalloc(bufsize, GFP_KERNEL);
|
||||
dmabuf = kzalloc(bufsize, GFP_KERNEL);
|
||||
if (!dmabuf) {
|
||||
usb_free_urb(urb);
|
||||
return NULL;
|
||||
|
|
|
@ -118,6 +118,7 @@ static const struct usb_device_id id_table[] = {
|
|||
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
|
||||
{ USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
|
||||
{ USB_DEVICE(IBM_VENDOR_ID, IBM_PRODUCT_ID) },
|
||||
{ USB_DEVICE(MACROSILICON_VENDOR_ID, MACROSILICON_MS3020_PRODUCT_ID) },
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
|
||||
|
|
|
@ -171,3 +171,7 @@
|
|||
/* Allied Telesis VT-Kit3 */
|
||||
#define AT_VENDOR_ID 0x0caa
|
||||
#define AT_VTKIT3_PRODUCT_ID 0x3001
|
||||
|
||||
/* Macrosilicon MS3020 */
|
||||
#define MACROSILICON_VENDOR_ID 0x345f
|
||||
#define MACROSILICON_MS3020_PRODUCT_ID 0x3020
|
||||
|
|
|
@ -1062,13 +1062,13 @@ ssize_t ocfs2_listxattr(struct dentry *dentry,
|
|||
return i_ret + b_ret;
|
||||
}
|
||||
|
||||
static int ocfs2_xattr_find_entry(int name_index,
|
||||
static int ocfs2_xattr_find_entry(struct inode *inode, int name_index,
|
||||
const char *name,
|
||||
struct ocfs2_xattr_search *xs)
|
||||
{
|
||||
struct ocfs2_xattr_entry *entry;
|
||||
size_t name_len;
|
||||
int i, cmp = 1;
|
||||
int i, name_offset, cmp = 1;
|
||||
|
||||
if (name == NULL)
|
||||
return -EINVAL;
|
||||
|
@ -1076,13 +1076,22 @@ static int ocfs2_xattr_find_entry(int name_index,
|
|||
name_len = strlen(name);
|
||||
entry = xs->here;
|
||||
for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
|
||||
if ((void *)entry >= xs->end) {
|
||||
ocfs2_error(inode->i_sb, "corrupted xattr entries");
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
cmp = name_index - ocfs2_xattr_get_type(entry);
|
||||
if (!cmp)
|
||||
cmp = name_len - entry->xe_name_len;
|
||||
if (!cmp)
|
||||
cmp = memcmp(name, (xs->base +
|
||||
le16_to_cpu(entry->xe_name_offset)),
|
||||
name_len);
|
||||
if (!cmp) {
|
||||
name_offset = le16_to_cpu(entry->xe_name_offset);
|
||||
if ((xs->base + name_offset + name_len) > xs->end) {
|
||||
ocfs2_error(inode->i_sb,
|
||||
"corrupted xattr entries");
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
cmp = memcmp(name, (xs->base + name_offset), name_len);
|
||||
}
|
||||
if (cmp == 0)
|
||||
break;
|
||||
entry += 1;
|
||||
|
@ -1166,7 +1175,7 @@ static int ocfs2_xattr_ibody_get(struct inode *inode,
|
|||
xs->base = (void *)xs->header;
|
||||
xs->here = xs->header->xh_entries;
|
||||
|
||||
ret = ocfs2_xattr_find_entry(name_index, name, xs);
|
||||
ret = ocfs2_xattr_find_entry(inode, name_index, name, xs);
|
||||
if (ret)
|
||||
return ret;
|
||||
size = le64_to_cpu(xs->here->xe_value_size);
|
||||
|
@ -2698,7 +2707,7 @@ static int ocfs2_xattr_ibody_find(struct inode *inode,
|
|||
|
||||
/* Find the named attribute. */
|
||||
if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
|
||||
ret = ocfs2_xattr_find_entry(name_index, name, xs);
|
||||
ret = ocfs2_xattr_find_entry(inode, name_index, name, xs);
|
||||
if (ret && ret != -ENODATA)
|
||||
return ret;
|
||||
xs->not_found = ret;
|
||||
|
@ -2833,7 +2842,7 @@ static int ocfs2_xattr_block_find(struct inode *inode,
|
|||
xs->end = (void *)(blk_bh->b_data) + blk_bh->b_size;
|
||||
xs->here = xs->header->xh_entries;
|
||||
|
||||
ret = ocfs2_xattr_find_entry(name_index, name, xs);
|
||||
ret = ocfs2_xattr_find_entry(inode, name_index, name, xs);
|
||||
} else
|
||||
ret = ocfs2_xattr_index_block_find(inode, blk_bh,
|
||||
name_index,
|
||||
|
|
|
@ -656,6 +656,19 @@ allocate_buffers(struct TCP_Server_Info *server)
|
|||
static bool
|
||||
server_unresponsive(struct TCP_Server_Info *server)
|
||||
{
|
||||
/*
|
||||
* If we're in the process of mounting a share or reconnecting a session
|
||||
* and the server abruptly shut down (e.g. socket wasn't closed, packet
|
||||
* had been ACK'ed but no SMB response), don't wait longer than 20s to
|
||||
* negotiate protocol.
|
||||
*/
|
||||
spin_lock(&server->srv_lock);
|
||||
if (server->tcpStatus == CifsInNegotiate &&
|
||||
time_after(jiffies, server->lstrp + 20 * HZ)) {
|
||||
spin_unlock(&server->srv_lock);
|
||||
cifs_reconnect(server, false);
|
||||
return true;
|
||||
}
|
||||
/*
|
||||
* We need to wait 3 echo intervals to make sure we handle such
|
||||
* situations right:
|
||||
|
@ -667,7 +680,6 @@ server_unresponsive(struct TCP_Server_Info *server)
|
|||
* 65s kernel_recvmsg times out, and we see that we haven't gotten
|
||||
* a response in >60s.
|
||||
*/
|
||||
spin_lock(&server->srv_lock);
|
||||
if ((server->tcpStatus == CifsGood ||
|
||||
server->tcpStatus == CifsNeedNegotiate) &&
|
||||
(!server->ops->can_echo || server->ops->can_echo(server)) &&
|
||||
|
|
|
@ -51,11 +51,10 @@
|
|||
|
||||
#if IS_ENABLED(CONFIG_DRM_ACCEL)
|
||||
|
||||
extern struct xarray accel_minors_xa;
|
||||
|
||||
void accel_core_exit(void);
|
||||
int accel_core_init(void);
|
||||
void accel_minor_remove(int index);
|
||||
int accel_minor_alloc(void);
|
||||
void accel_minor_replace(struct drm_minor *minor, int index);
|
||||
void accel_set_device_instance_params(struct device *kdev, int index);
|
||||
int accel_open(struct inode *inode, struct file *filp);
|
||||
void accel_debugfs_init(struct drm_minor *minor, int minor_id);
|
||||
|
@ -72,19 +71,6 @@ static inline int __init accel_core_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void accel_minor_remove(int index)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int accel_minor_alloc(void)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void accel_minor_replace(struct drm_minor *minor, int index)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void accel_set_device_instance_params(struct device *kdev, int index)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -45,6 +45,8 @@ struct drm_printer;
|
|||
struct device;
|
||||
struct file;
|
||||
|
||||
extern struct xarray drm_minors_xa;
|
||||
|
||||
/*
|
||||
* FIXME: Not sure we want to have drm_minor here in the end, but to avoid
|
||||
* header include loops we need it here for now.
|
||||
|
@ -441,6 +443,9 @@ static inline bool drm_is_accel_client(const struct drm_file *file_priv)
|
|||
|
||||
void drm_file_update_pid(struct drm_file *);
|
||||
|
||||
struct drm_minor *drm_minor_acquire(struct xarray *minors_xa, unsigned int minor_id);
|
||||
void drm_minor_release(struct drm_minor *minor);
|
||||
|
||||
int drm_open(struct inode *inode, struct file *filp);
|
||||
int drm_open_helper(struct file *filp, struct drm_minor *minor);
|
||||
ssize_t drm_read(struct file *filp, char __user *buffer,
|
||||
|
|
|
@ -297,9 +297,22 @@ struct nft_set_elem {
|
|||
void *priv;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum nft_iter_type - nftables set iterator type
|
||||
*
|
||||
* @NFT_ITER_READ: read-only iteration over set elements
|
||||
* @NFT_ITER_UPDATE: iteration under mutex to update set element state
|
||||
*/
|
||||
enum nft_iter_type {
|
||||
NFT_ITER_UNSPEC,
|
||||
NFT_ITER_READ,
|
||||
NFT_ITER_UPDATE,
|
||||
};
|
||||
|
||||
struct nft_set;
|
||||
struct nft_set_iter {
|
||||
u8 genmask;
|
||||
enum nft_iter_type type:8;
|
||||
unsigned int count;
|
||||
unsigned int skip;
|
||||
int err;
|
||||
|
|
|
@ -5311,8 +5311,10 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
|
|||
if (beacon->tail)
|
||||
skb_put_data(skb, beacon->tail, beacon->tail_len);
|
||||
|
||||
if (ieee80211_beacon_protect(skb, local, sdata, link) < 0)
|
||||
if (ieee80211_beacon_protect(skb, local, sdata, link) < 0) {
|
||||
dev_kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb,
|
||||
chanctx_conf, csa_off_base);
|
||||
|
|
|
@ -628,6 +628,7 @@ static void nft_map_deactivate(const struct nft_ctx *ctx, struct nft_set *set)
|
|||
{
|
||||
struct nft_set_iter iter = {
|
||||
.genmask = nft_genmask_next(ctx->net),
|
||||
.type = NFT_ITER_UPDATE,
|
||||
.fn = nft_mapelem_deactivate,
|
||||
};
|
||||
|
||||
|
@ -5392,6 +5393,7 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
|||
}
|
||||
|
||||
iter.genmask = nft_genmask_next(ctx->net);
|
||||
iter.type = NFT_ITER_UPDATE;
|
||||
iter.skip = 0;
|
||||
iter.count = 0;
|
||||
iter.err = 0;
|
||||
|
@ -5467,6 +5469,7 @@ static void nft_map_activate(const struct nft_ctx *ctx, struct nft_set *set)
|
|||
{
|
||||
struct nft_set_iter iter = {
|
||||
.genmask = nft_genmask_next(ctx->net),
|
||||
.type = NFT_ITER_UPDATE,
|
||||
.fn = nft_mapelem_activate,
|
||||
};
|
||||
|
||||
|
@ -5845,6 +5848,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
args.skb = skb;
|
||||
args.reset = reset;
|
||||
args.iter.genmask = nft_genmask_cur(net);
|
||||
args.iter.type = NFT_ITER_READ;
|
||||
args.iter.skip = cb->args[0];
|
||||
args.iter.count = 0;
|
||||
args.iter.err = 0;
|
||||
|
@ -7246,6 +7250,7 @@ static int nft_set_flush(struct nft_ctx *ctx, struct nft_set *set, u8 genmask)
|
|||
{
|
||||
struct nft_set_iter iter = {
|
||||
.genmask = genmask,
|
||||
.type = NFT_ITER_UPDATE,
|
||||
.fn = nft_setelem_flush,
|
||||
};
|
||||
|
||||
|
|
|
@ -217,6 +217,7 @@ static int nft_lookup_validate(const struct nft_ctx *ctx,
|
|||
return 0;
|
||||
|
||||
iter.genmask = nft_genmask_next(ctx->net);
|
||||
iter.type = NFT_ITER_UPDATE;
|
||||
iter.skip = 0;
|
||||
iter.count = 0;
|
||||
iter.err = 0;
|
||||
|
|
|
@ -2037,13 +2037,15 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
|
|||
struct nft_set_iter *iter)
|
||||
{
|
||||
struct nft_pipapo *priv = nft_set_priv(set);
|
||||
struct net *net = read_pnet(&set->net);
|
||||
const struct nft_pipapo_match *m;
|
||||
const struct nft_pipapo_field *f;
|
||||
int i, r;
|
||||
|
||||
WARN_ON_ONCE(iter->type != NFT_ITER_READ &&
|
||||
iter->type != NFT_ITER_UPDATE);
|
||||
|
||||
rcu_read_lock();
|
||||
if (iter->genmask == nft_genmask_cur(net))
|
||||
if (iter->type == NFT_ITER_READ)
|
||||
m = rcu_dereference(priv->match);
|
||||
else
|
||||
m = priv->clone;
|
||||
|
|
|
@ -9,7 +9,8 @@
|
|||
|
||||
struct nft_socket {
|
||||
enum nft_socket_keys key:8;
|
||||
u8 level;
|
||||
u8 level; /* cgroupv2 level to extract */
|
||||
u8 level_user; /* cgroupv2 level provided by userspace */
|
||||
u8 len;
|
||||
union {
|
||||
u8 dreg;
|
||||
|
@ -53,6 +54,28 @@ nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo
|
|||
memcpy(dest, &cgid, sizeof(u64));
|
||||
return true;
|
||||
}
|
||||
|
||||
/* process context only, uses current->nsproxy. */
|
||||
static noinline int nft_socket_cgroup_subtree_level(void)
|
||||
{
|
||||
struct cgroup *cgrp = cgroup_get_from_path("/");
|
||||
int level;
|
||||
|
||||
if (IS_ERR(cgrp))
|
||||
return PTR_ERR(cgrp);
|
||||
|
||||
level = cgrp->level;
|
||||
|
||||
cgroup_put(cgrp);
|
||||
|
||||
if (WARN_ON_ONCE(level > 255))
|
||||
return -ERANGE;
|
||||
|
||||
if (WARN_ON_ONCE(level < 0))
|
||||
return -EINVAL;
|
||||
|
||||
return level;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct sock *nft_socket_do_lookup(const struct nft_pktinfo *pkt)
|
||||
|
@ -174,9 +197,10 @@ static int nft_socket_init(const struct nft_ctx *ctx,
|
|||
case NFT_SOCKET_MARK:
|
||||
len = sizeof(u32);
|
||||
break;
|
||||
#ifdef CONFIG_CGROUPS
|
||||
#ifdef CONFIG_SOCK_CGROUP_DATA
|
||||
case NFT_SOCKET_CGROUPV2: {
|
||||
unsigned int level;
|
||||
int err;
|
||||
|
||||
if (!tb[NFTA_SOCKET_LEVEL])
|
||||
return -EINVAL;
|
||||
|
@ -185,6 +209,17 @@ static int nft_socket_init(const struct nft_ctx *ctx,
|
|||
if (level > 255)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = nft_socket_cgroup_subtree_level();
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
priv->level_user = level;
|
||||
|
||||
level += err;
|
||||
/* Implies a giant cgroup tree */
|
||||
if (WARN_ON_ONCE(level > 255))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
priv->level = level;
|
||||
len = sizeof(u64);
|
||||
break;
|
||||
|
@ -209,7 +244,7 @@ static int nft_socket_dump(struct sk_buff *skb,
|
|||
if (nft_dump_register(skb, NFTA_SOCKET_DREG, priv->dreg))
|
||||
return -1;
|
||||
if (priv->key == NFT_SOCKET_CGROUPV2 &&
|
||||
nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level)))
|
||||
nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level_user)))
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -236,7 +236,6 @@ void cfg80211_register_wdev(struct cfg80211_registered_device *rdev,
|
|||
static inline void wdev_lock(struct wireless_dev *wdev)
|
||||
__acquires(wdev)
|
||||
{
|
||||
lockdep_assert_held(&wdev->wiphy->mtx);
|
||||
mutex_lock(&wdev->mtx);
|
||||
__acquire(wdev->mtx);
|
||||
}
|
||||
|
@ -244,16 +243,11 @@ static inline void wdev_lock(struct wireless_dev *wdev)
|
|||
static inline void wdev_unlock(struct wireless_dev *wdev)
|
||||
__releases(wdev)
|
||||
{
|
||||
lockdep_assert_held(&wdev->wiphy->mtx);
|
||||
__release(wdev->mtx);
|
||||
mutex_unlock(&wdev->mtx);
|
||||
}
|
||||
|
||||
static inline void ASSERT_WDEV_LOCK(struct wireless_dev *wdev)
|
||||
{
|
||||
lockdep_assert_held(&wdev->wiphy->mtx);
|
||||
lockdep_assert_held(&wdev->mtx);
|
||||
}
|
||||
#define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx)
|
||||
|
||||
static inline bool cfg80211_has_monitors_only(struct cfg80211_registered_device *rdev)
|
||||
{
|
||||
|
|
|
@ -4646,6 +4646,7 @@ HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_adlp_hdmi),
|
|||
HDA_CODEC_ENTRY(0x8086281d, "Meteor Lake HDMI", patch_i915_adlp_hdmi),
|
||||
HDA_CODEC_ENTRY(0x8086281f, "Raptor Lake P HDMI", patch_i915_adlp_hdmi),
|
||||
HDA_CODEC_ENTRY(0x80862820, "Lunar Lake HDMI", patch_i915_adlp_hdmi),
|
||||
HDA_CODEC_ENTRY(0x80862822, "Panther Lake HDMI", patch_i915_adlp_hdmi),
|
||||
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
|
||||
HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),
|
||||
HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi),
|
||||
|
|
|
@ -4931,6 +4931,30 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
|
|||
}
|
||||
}
|
||||
|
||||
static void alc_hp_mute_disable(struct hda_codec *codec, unsigned int delay)
|
||||
{
|
||||
if (delay <= 0)
|
||||
delay = 75;
|
||||
snd_hda_codec_write(codec, 0x21, 0,
|
||||
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
|
||||
msleep(delay);
|
||||
snd_hda_codec_write(codec, 0x21, 0,
|
||||
AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
|
||||
msleep(delay);
|
||||
}
|
||||
|
||||
static void alc_hp_enable_unmute(struct hda_codec *codec, unsigned int delay)
|
||||
{
|
||||
if (delay <= 0)
|
||||
delay = 75;
|
||||
snd_hda_codec_write(codec, 0x21, 0,
|
||||
AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
|
||||
msleep(delay);
|
||||
snd_hda_codec_write(codec, 0x21, 0,
|
||||
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
|
||||
msleep(delay);
|
||||
}
|
||||
|
||||
static const struct coef_fw alc225_pre_hsmode[] = {
|
||||
UPDATE_COEF(0x4a, 1<<8, 0),
|
||||
UPDATE_COEFEX(0x57, 0x05, 1<<14, 0),
|
||||
|
@ -5032,6 +5056,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
|
|||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
case 0x19e58326:
|
||||
alc_hp_mute_disable(codec, 75);
|
||||
alc_process_coef_fw(codec, coef0256);
|
||||
break;
|
||||
case 0x10ec0234:
|
||||
|
@ -5066,6 +5091,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
|
|||
case 0x10ec0295:
|
||||
case 0x10ec0289:
|
||||
case 0x10ec0299:
|
||||
alc_hp_mute_disable(codec, 75);
|
||||
alc_process_coef_fw(codec, alc225_pre_hsmode);
|
||||
alc_process_coef_fw(codec, coef0225);
|
||||
break;
|
||||
|
@ -5291,6 +5317,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
|
|||
case 0x10ec0299:
|
||||
alc_process_coef_fw(codec, alc225_pre_hsmode);
|
||||
alc_process_coef_fw(codec, coef0225);
|
||||
alc_hp_enable_unmute(codec, 75);
|
||||
break;
|
||||
case 0x10ec0255:
|
||||
alc_process_coef_fw(codec, coef0255);
|
||||
|
@ -5303,6 +5330,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
|
|||
alc_write_coef_idx(codec, 0x45, 0xc089);
|
||||
msleep(50);
|
||||
alc_process_coef_fw(codec, coef0256);
|
||||
alc_hp_enable_unmute(codec, 75);
|
||||
break;
|
||||
case 0x10ec0234:
|
||||
case 0x10ec0274:
|
||||
|
@ -5400,6 +5428,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
|
|||
case 0x10ec0256:
|
||||
case 0x19e58326:
|
||||
alc_process_coef_fw(codec, coef0256);
|
||||
alc_hp_enable_unmute(codec, 75);
|
||||
break;
|
||||
case 0x10ec0234:
|
||||
case 0x10ec0274:
|
||||
|
@ -5448,6 +5477,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
|
|||
alc_process_coef_fw(codec, coef0225_2);
|
||||
else
|
||||
alc_process_coef_fw(codec, coef0225_1);
|
||||
alc_hp_enable_unmute(codec, 75);
|
||||
break;
|
||||
case 0x10ec0867:
|
||||
alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
|
||||
|
@ -5515,6 +5545,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
|
|||
case 0x10ec0256:
|
||||
case 0x19e58326:
|
||||
alc_process_coef_fw(codec, coef0256);
|
||||
alc_hp_enable_unmute(codec, 75);
|
||||
break;
|
||||
case 0x10ec0234:
|
||||
case 0x10ec0274:
|
||||
|
@ -5552,6 +5583,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
|
|||
case 0x10ec0289:
|
||||
case 0x10ec0299:
|
||||
alc_process_coef_fw(codec, coef0225);
|
||||
alc_hp_enable_unmute(codec, 75);
|
||||
break;
|
||||
}
|
||||
codec_dbg(codec, "Headset jack set to Nokia-style headset mode.\n");
|
||||
|
@ -5620,25 +5652,21 @@ static void alc_determine_headset_type(struct hda_codec *codec)
|
|||
alc_write_coef_idx(codec, 0x06, 0x6104);
|
||||
alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3);
|
||||
|
||||
snd_hda_codec_write(codec, 0x21, 0,
|
||||
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
|
||||
msleep(80);
|
||||
snd_hda_codec_write(codec, 0x21, 0,
|
||||
AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
|
||||
|
||||
alc_process_coef_fw(codec, coef0255);
|
||||
msleep(300);
|
||||
val = alc_read_coef_idx(codec, 0x46);
|
||||
is_ctia = (val & 0x0070) == 0x0070;
|
||||
|
||||
if (!is_ctia) {
|
||||
alc_write_coef_idx(codec, 0x45, 0xe089);
|
||||
msleep(100);
|
||||
val = alc_read_coef_idx(codec, 0x46);
|
||||
if ((val & 0x0070) == 0x0070)
|
||||
is_ctia = false;
|
||||
else
|
||||
is_ctia = true;
|
||||
}
|
||||
alc_write_coefex_idx(codec, 0x57, 0x3, 0x0da3);
|
||||
alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
|
||||
|
||||
snd_hda_codec_write(codec, 0x21, 0,
|
||||
AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
|
||||
msleep(80);
|
||||
snd_hda_codec_write(codec, 0x21, 0,
|
||||
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
|
||||
break;
|
||||
case 0x10ec0234:
|
||||
case 0x10ec0274:
|
||||
|
@ -5715,12 +5743,6 @@ static void alc_determine_headset_type(struct hda_codec *codec)
|
|||
case 0x10ec0295:
|
||||
case 0x10ec0289:
|
||||
case 0x10ec0299:
|
||||
snd_hda_codec_write(codec, 0x21, 0,
|
||||
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
|
||||
msleep(80);
|
||||
snd_hda_codec_write(codec, 0x21, 0,
|
||||
AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
|
||||
|
||||
alc_process_coef_fw(codec, alc225_pre_hsmode);
|
||||
alc_update_coef_idx(codec, 0x67, 0xf000, 0x1000);
|
||||
val = alc_read_coef_idx(codec, 0x45);
|
||||
|
@ -5737,15 +5759,19 @@ static void alc_determine_headset_type(struct hda_codec *codec)
|
|||
val = alc_read_coef_idx(codec, 0x46);
|
||||
is_ctia = (val & 0x00f0) == 0x00f0;
|
||||
}
|
||||
if (!is_ctia) {
|
||||
alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x38<<10);
|
||||
alc_update_coef_idx(codec, 0x49, 3<<8, 1<<8);
|
||||
msleep(100);
|
||||
val = alc_read_coef_idx(codec, 0x46);
|
||||
if ((val & 0x00f0) == 0x00f0)
|
||||
is_ctia = false;
|
||||
else
|
||||
is_ctia = true;
|
||||
}
|
||||
alc_update_coef_idx(codec, 0x4a, 7<<6, 7<<6);
|
||||
alc_update_coef_idx(codec, 0x4a, 3<<4, 3<<4);
|
||||
alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
|
||||
|
||||
snd_hda_codec_write(codec, 0x21, 0,
|
||||
AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
|
||||
msleep(80);
|
||||
snd_hda_codec_write(codec, 0x21, 0,
|
||||
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
|
||||
break;
|
||||
case 0x10ec0867:
|
||||
is_ctia = true;
|
||||
|
|
|
@ -162,6 +162,8 @@ static const struct platform_device_id board_ids[] = {
|
|||
},
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(platform, board_ids);
|
||||
|
||||
static struct platform_driver acp_asoc_audio = {
|
||||
.driver = {
|
||||
.name = "sof_mach",
|
||||
|
|
|
@ -353,6 +353,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 C7VF"),
|
||||
}
|
||||
},
|
||||
{
|
||||
.driver_data = &acp6x_card,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 17 D7VEK"),
|
||||
}
|
||||
},
|
||||
{
|
||||
.driver_data = &acp6x_card,
|
||||
.matches = {
|
||||
|
|
|
@ -44,6 +44,7 @@ static const struct platform_device_id db1200_pids[] = {
|
|||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(platform, db1200_pids);
|
||||
|
||||
/*------------------------- AC97 PART ---------------------------*/
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ static const struct of_device_id chv3_codec_of_match[] = {
|
|||
{ .compatible = "google,chv3-codec", },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, chv3_codec_of_match);
|
||||
|
||||
static struct platform_driver chv3_codec_platform_driver = {
|
||||
.driver = {
|
||||
|
|
|
@ -623,6 +623,7 @@ static const struct of_device_id tda7419_of_match[] = {
|
|||
{ .compatible = "st,tda7419" },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, tda7419_of_match);
|
||||
|
||||
static struct i2c_driver tda7419_driver = {
|
||||
.driver = {
|
||||
|
|
|
@ -322,6 +322,7 @@ static const struct of_device_id chv3_i2s_of_match[] = {
|
|||
{ .compatible = "google,chv3-i2s" },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, chv3_i2s_of_match);
|
||||
|
||||
static struct platform_driver chv3_i2s_driver = {
|
||||
.probe = chv3_i2s_probe,
|
||||
|
|
|
@ -84,7 +84,6 @@ static const struct dmi_system_id lenovo_yoga_tab3_x90[] = {
|
|||
/* Lenovo Yoga Tab 3 Pro YT3-X90, codec missing from DSDT */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
|
||||
},
|
||||
},
|
||||
|
|
|
@ -815,6 +815,7 @@ static const struct of_device_id kmb_plat_of_match[] = {
|
|||
{ .compatible = "intel,keembay-tdm", .data = &intel_kmb_tdm_dai},
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, kmb_plat_of_match);
|
||||
|
||||
static int kmb_plat_dai_probe(struct platform_device *pdev)
|
||||
{
|
||||
|
|
|
@ -2748,6 +2748,7 @@ static bool mt8188_is_volatile_reg(struct device *dev, unsigned int reg)
|
|||
case AFE_ASRC12_NEW_CON9:
|
||||
case AFE_LRCK_CNT:
|
||||
case AFE_DAC_MON0:
|
||||
case AFE_DAC_CON0:
|
||||
case AFE_DL2_CUR:
|
||||
case AFE_DL3_CUR:
|
||||
case AFE_DL6_CUR:
|
||||
|
|
|
@ -625,6 +625,9 @@ static struct snd_sof_of_mach sof_mt8195_machs[] = {
|
|||
{
|
||||
.compatible = "google,tomato",
|
||||
.sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682.tplg"
|
||||
}, {
|
||||
.compatible = "google,dojo",
|
||||
.sof_tplg_filename = "sof-mt8195-mt6359-max98390-rt5682.tplg"
|
||||
}, {
|
||||
.compatible = "mediatek,mt8195",
|
||||
.sof_tplg_filename = "sof-mt8195.tplg"
|
||||
|
|
|
@ -47,7 +47,7 @@ $(OUTPUT)hv_fcopy_daemon: $(HV_FCOPY_DAEMON_IN)
|
|||
|
||||
clean:
|
||||
rm -f $(ALL_PROGRAMS)
|
||||
find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
|
||||
find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.cmd' -delete
|
||||
|
||||
install: $(ALL_PROGRAMS)
|
||||
install -d -m 755 $(DESTDIR)$(sbindir); \
|
||||
|
|
Loading…
Reference in New Issue
Block a user