mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2026-01-27 12:47:24 +01:00
Merge branch 'v6.1/standard/base' into v6.1/standard/mti-malta64
This commit is contained in:
commit
653f11e25b
2
Makefile
2
Makefile
|
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 1
|
||||
SUBLEVEL = 160
|
||||
SUBLEVEL = 161
|
||||
EXTRAVERSION =
|
||||
NAME = Curry Ramen
|
||||
|
||||
|
|
|
|||
|
|
@ -23,10 +23,10 @@
|
|||
#define TCSETSW _IOW('t', 21, struct termios)
|
||||
#define TCSETSF _IOW('t', 22, struct termios)
|
||||
|
||||
#define TCGETA _IOR('t', 23, struct termio)
|
||||
#define TCSETA _IOW('t', 24, struct termio)
|
||||
#define TCSETAW _IOW('t', 25, struct termio)
|
||||
#define TCSETAF _IOW('t', 28, struct termio)
|
||||
#define TCGETA 0x40127417
|
||||
#define TCSETA 0x80127418
|
||||
#define TCSETAW 0x80127419
|
||||
#define TCSETAF 0x8012741c
|
||||
|
||||
#define TCSBRK _IO('t', 29)
|
||||
#define TCXONC _IO('t', 30)
|
||||
|
|
|
|||
|
|
@ -1318,7 +1318,7 @@ config HIGHMEM
|
|||
|
||||
config HIGHPTE
|
||||
bool "Allocate 2nd-level pagetables from highmem" if EXPERT
|
||||
depends on HIGHMEM
|
||||
depends on HIGHMEM && !PREEMPT_RT
|
||||
default y
|
||||
help
|
||||
The VM uses one page of physical memory for each page table.
|
||||
|
|
|
|||
|
|
@ -335,7 +335,7 @@
|
|||
pinctrl-0 = <&pinctrl_rtc>;
|
||||
reg = <0x32>;
|
||||
interrupt-parent = <&gpio4>;
|
||||
interrupts = <10 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -97,6 +97,7 @@
|
|||
ethphy0f: ethernet-phy@1 { /* SMSC LAN8740Ai */
|
||||
compatible = "ethernet-phy-id0007.c110",
|
||||
"ethernet-phy-ieee802.3-c22";
|
||||
clocks = <&clk IMX8MP_CLK_ENET_QOS>;
|
||||
interrupt-parent = <&gpio3>;
|
||||
interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
|
||||
pinctrl-0 = <&pinctrl_ethphy0>;
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@
|
|||
regulator-max-microvolt = <3000000>;
|
||||
gpio = <&lsio_gpio4 7 GPIO_ACTIVE_HIGH>;
|
||||
enable-active-high;
|
||||
off-on-delay-us = <4800>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -45,8 +45,8 @@ static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
|
|||
if (trap_no(regs) != VEC_TLBMODIFIED)
|
||||
return;
|
||||
|
||||
if (instruction_pointer(regs) == csky_cmpxchg_stw)
|
||||
instruction_pointer_set(regs, csky_cmpxchg_ldw);
|
||||
if (instruction_pointer(regs) == (unsigned long)&csky_cmpxchg_stw)
|
||||
instruction_pointer_set(regs, (unsigned long)&csky_cmpxchg_ldw);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -1587,7 +1587,8 @@ he_stop(struct he_dev *he_dev)
|
|||
he_dev->tbrq_base, he_dev->tbrq_phys);
|
||||
|
||||
if (he_dev->tpdrq_base)
|
||||
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
|
||||
dma_free_coherent(&he_dev->pci_dev->dev,
|
||||
CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
|
||||
he_dev->tpdrq_base, he_dev->tpdrq_phys);
|
||||
|
||||
dma_pool_destroy(he_dev->tpd_pool);
|
||||
|
|
|
|||
|
|
@ -229,8 +229,7 @@ static int interrupt_cnt_probe(struct platform_device *pdev)
|
|||
|
||||
irq_set_status_flags(priv->irq, IRQ_NOAUTOEN);
|
||||
ret = devm_request_irq(dev, priv->irq, interrupt_cnt_isr,
|
||||
IRQF_TRIGGER_RISING | IRQF_NO_THREAD,
|
||||
dev_name(dev), counter);
|
||||
IRQF_TRIGGER_RISING, dev_name(dev), counter);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
|||
|
|
@ -584,6 +584,7 @@ static int rockchip_gpiolib_register(struct rockchip_pin_bank *bank)
|
|||
gc->ngpio = bank->nr_pins;
|
||||
gc->label = bank->name;
|
||||
gc->parent = bank->dev;
|
||||
gc->can_sleep = true;
|
||||
|
||||
ret = gpiochip_add_data(gc, bank);
|
||||
if (ret) {
|
||||
|
|
|
|||
|
|
@ -297,7 +297,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
|
|||
variant->name, priv);
|
||||
if (ret != 0) {
|
||||
dev_err(dev, "%s failed irq %d\n", __func__, ret);
|
||||
return ret;
|
||||
goto dev_put;
|
||||
}
|
||||
|
||||
ret = pl111_modeset_init(drm);
|
||||
|
|
|
|||
|
|
@ -220,6 +220,15 @@ static const struct hid_device_id hid_quirks[] = {
|
|||
* used as a driver. See hid_scan_report().
|
||||
*/
|
||||
static const struct hid_device_id hid_have_special_driver[] = {
|
||||
#if IS_ENABLED(CONFIG_APPLEDISPLAY)
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, 0x9218) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, 0x9219) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, 0x921c) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, 0x921d) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, 0x9222) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, 0x9226) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, 0x9236) },
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_HID_A4TECH)
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
|
||||
|
|
|
|||
|
|
@ -122,6 +122,8 @@
|
|||
|
||||
#define MEI_DEV_ID_WCL_P 0x4D70 /* Wildcat Lake P */
|
||||
|
||||
#define MEI_DEV_ID_NVL_S 0x6E68 /* Nova Lake Point S */
|
||||
|
||||
/*
|
||||
* MEI HW Section
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -129,6 +129,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
|
|||
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_WCL_P, MEI_ME_PCH15_CFG)},
|
||||
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_NVL_S, MEI_ME_PCH15_CFG)},
|
||||
|
||||
/* required last entry */
|
||||
{0, }
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1473,7 +1473,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
|
|||
return 0;
|
||||
|
||||
free_ring:
|
||||
dma_free_coherent(&pdev->dev,
|
||||
dma_free_coherent(gendev,
|
||||
sizeof(struct boom_rx_desc) * RX_RING_SIZE +
|
||||
sizeof(struct boom_tx_desc) * TX_RING_SIZE,
|
||||
vp->rx_ring, vp->rx_ring_dma);
|
||||
|
|
|
|||
|
|
@ -293,6 +293,38 @@ static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
|
|||
BNXT_DB_CQ(db, idx);
|
||||
}
|
||||
|
||||
static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
|
||||
{
|
||||
if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
|
||||
return;
|
||||
|
||||
if (BNXT_PF(bp))
|
||||
queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
|
||||
else
|
||||
schedule_delayed_work(&bp->fw_reset_task, delay);
|
||||
}
|
||||
|
||||
static void bnxt_queue_sp_work(struct bnxt *bp)
|
||||
{
|
||||
if (BNXT_PF(bp))
|
||||
queue_work(bnxt_pf_wq, &bp->sp_task);
|
||||
else
|
||||
schedule_work(&bp->sp_task);
|
||||
}
|
||||
|
||||
static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
|
||||
{
|
||||
if (!rxr->bnapi->in_reset) {
|
||||
rxr->bnapi->in_reset = true;
|
||||
if (bp->flags & BNXT_FLAG_CHIP_P5)
|
||||
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
|
||||
else
|
||||
set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
|
||||
bnxt_queue_sp_work(bp);
|
||||
}
|
||||
rxr->rx_next_cons = 0xffff;
|
||||
}
|
||||
|
||||
const u16 bnxt_lhint_arr[] = {
|
||||
TX_BD_FLAGS_LHINT_512_AND_SMALLER,
|
||||
TX_BD_FLAGS_LHINT_512_TO_1023,
|
||||
|
|
@ -1269,46 +1301,16 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
|
||||
{
|
||||
if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
|
||||
return;
|
||||
|
||||
if (BNXT_PF(bp))
|
||||
queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
|
||||
else
|
||||
schedule_delayed_work(&bp->fw_reset_task, delay);
|
||||
}
|
||||
|
||||
static void bnxt_queue_sp_work(struct bnxt *bp)
|
||||
{
|
||||
if (BNXT_PF(bp))
|
||||
queue_work(bnxt_pf_wq, &bp->sp_task);
|
||||
else
|
||||
schedule_work(&bp->sp_task);
|
||||
}
|
||||
|
||||
static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
|
||||
{
|
||||
if (!rxr->bnapi->in_reset) {
|
||||
rxr->bnapi->in_reset = true;
|
||||
if (bp->flags & BNXT_FLAG_CHIP_P5)
|
||||
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
|
||||
else
|
||||
set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
|
||||
bnxt_queue_sp_work(bp);
|
||||
}
|
||||
rxr->rx_next_cons = 0xffff;
|
||||
}
|
||||
|
||||
static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
|
||||
{
|
||||
struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
|
||||
u16 idx = agg_id & MAX_TPA_P5_MASK;
|
||||
|
||||
if (test_bit(idx, map->agg_idx_bmap))
|
||||
idx = find_first_zero_bit(map->agg_idx_bmap,
|
||||
BNXT_AGG_IDX_BMAP_SIZE);
|
||||
if (test_bit(idx, map->agg_idx_bmap)) {
|
||||
idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA_P5);
|
||||
if (idx >= MAX_TPA_P5)
|
||||
return INVALID_HW_RING_ID;
|
||||
}
|
||||
__set_bit(idx, map->agg_idx_bmap);
|
||||
map->agg_id_tbl[agg_id] = idx;
|
||||
return idx;
|
||||
|
|
@ -1341,6 +1343,13 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
|
|||
if (bp->flags & BNXT_FLAG_CHIP_P5) {
|
||||
agg_id = TPA_START_AGG_ID_P5(tpa_start);
|
||||
agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
|
||||
if (unlikely(agg_id == INVALID_HW_RING_ID)) {
|
||||
netdev_warn(bp->dev, "Unable to allocate agg ID for ring %d, agg 0x%x\n",
|
||||
rxr->bnapi->index,
|
||||
TPA_START_AGG_ID_P5(tpa_start));
|
||||
bnxt_sched_reset_rxr(bp, rxr);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
agg_id = TPA_START_AGG_ID(tpa_start);
|
||||
}
|
||||
|
|
@ -1355,7 +1364,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
|
|||
netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
|
||||
cons, rxr->rx_next_cons,
|
||||
TPA_START_ERROR_CODE(tpa_start1));
|
||||
bnxt_sched_reset(bp, rxr);
|
||||
bnxt_sched_reset_rxr(bp, rxr);
|
||||
return;
|
||||
}
|
||||
/* Store cfa_code in tpa_info to use in tpa_end
|
||||
|
|
@ -1895,7 +1904,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
|||
if (rxr->rx_next_cons != 0xffff)
|
||||
netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
|
||||
cons, rxr->rx_next_cons);
|
||||
bnxt_sched_reset(bp, rxr);
|
||||
bnxt_sched_reset_rxr(bp, rxr);
|
||||
if (rc1)
|
||||
return rc1;
|
||||
goto next_rx_no_prod_no_len;
|
||||
|
|
@ -1933,7 +1942,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
|||
!(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
|
||||
netdev_warn_once(bp->dev, "RX buffer error %x\n",
|
||||
rx_err);
|
||||
bnxt_sched_reset(bp, rxr);
|
||||
bnxt_sched_reset_rxr(bp, rxr);
|
||||
}
|
||||
}
|
||||
goto next_rx_no_len;
|
||||
|
|
@ -2371,7 +2380,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
|
|||
goto async_event_process_exit;
|
||||
}
|
||||
rxr = bp->bnapi[grp_idx]->rx_ring;
|
||||
bnxt_sched_reset(bp, rxr);
|
||||
bnxt_sched_reset_rxr(bp, rxr);
|
||||
goto async_event_process_exit;
|
||||
}
|
||||
case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
|
||||
|
|
|
|||
|
|
@ -897,11 +897,9 @@ struct bnxt_tpa_info {
|
|||
struct rx_agg_cmp *agg_arr;
|
||||
};
|
||||
|
||||
#define BNXT_AGG_IDX_BMAP_SIZE (MAX_TPA_P5 / BITS_PER_LONG)
|
||||
|
||||
struct bnxt_tpa_idx_map {
|
||||
u16 agg_id_tbl[1024];
|
||||
unsigned long agg_idx_bmap[BNXT_AGG_IDX_BMAP_SIZE];
|
||||
DECLARE_BITMAP(agg_idx_bmap, MAX_TPA_P5);
|
||||
};
|
||||
|
||||
struct bnxt_rx_ring_info {
|
||||
|
|
|
|||
|
|
@ -43,9 +43,9 @@ struct enetc_tx_swbd {
|
|||
#define ENETC_RXB_TRUESIZE (PAGE_SIZE >> 1)
|
||||
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
|
||||
#define ENETC_RXB_DMA_SIZE \
|
||||
(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
|
||||
min(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD, 0xffff)
|
||||
#define ENETC_RXB_DMA_SIZE_XDP \
|
||||
(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM)
|
||||
min(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM, 0xffff)
|
||||
|
||||
struct enetc_rx_swbd {
|
||||
dma_addr_t dma;
|
||||
|
|
|
|||
|
|
@ -392,6 +392,8 @@ struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev)
|
|||
|
||||
dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch),
|
||||
dev->dev);
|
||||
if (!dl)
|
||||
return NULL;
|
||||
|
||||
return devlink_priv(dl);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -432,7 +432,8 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
|
|||
mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
|
||||
break;
|
||||
default:
|
||||
mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
|
||||
mlx5_core_dbg(dev, "Module ID not recognized: 0x%x\n",
|
||||
module_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2202,14 +2202,16 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
|
|||
|
||||
/* Now, set PGIDs for each active LAG */
|
||||
for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
|
||||
struct net_device *bond = ocelot->ports[lag]->bond;
|
||||
struct ocelot_port *ocelot_port = ocelot->ports[lag];
|
||||
int num_active_ports = 0;
|
||||
struct net_device *bond;
|
||||
unsigned long bond_mask;
|
||||
u8 aggr_idx[16];
|
||||
|
||||
if (!bond || (visited & BIT(lag)))
|
||||
if (!ocelot_port || !ocelot_port->bond || (visited & BIT(lag)))
|
||||
continue;
|
||||
|
||||
bond = ocelot_port->bond;
|
||||
bond_mask = ocelot_get_bond_mask(ocelot, bond);
|
||||
|
||||
for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
|
||||
|
|
|
|||
|
|
@ -168,6 +168,8 @@ static int update_eth_regs_async(pegasus_t *pegasus)
|
|||
netif_device_detach(pegasus->net);
|
||||
netif_err(pegasus, drv, pegasus->net,
|
||||
"%s returned %d\n", __func__, ret);
|
||||
usb_free_urb(async_urb);
|
||||
kfree(req);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -456,6 +456,7 @@ void ipc_mux_deinit(struct iosm_mux *ipc_mux)
|
|||
struct sk_buff_head *free_list;
|
||||
union mux_msg mux_msg;
|
||||
struct sk_buff *skb;
|
||||
int i;
|
||||
|
||||
if (!ipc_mux->initialized)
|
||||
return;
|
||||
|
|
@ -479,5 +480,10 @@ void ipc_mux_deinit(struct iosm_mux *ipc_mux)
|
|||
ipc_mux->channel->dl_pipe.is_open = false;
|
||||
}
|
||||
|
||||
if (ipc_mux->protocol != MUX_LITE) {
|
||||
for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++)
|
||||
kfree(ipc_mux->ul_adb.pp_qlt[i]);
|
||||
}
|
||||
|
||||
kfree(ipc_mux);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -435,7 +435,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
|
|||
pctrl->chip.ngpio = data->npins;
|
||||
pctrl->chip.label = dev_name(dev);
|
||||
pctrl->chip.of_gpio_n_cells = 2;
|
||||
pctrl->chip.can_sleep = false;
|
||||
pctrl->chip.can_sleep = true;
|
||||
|
||||
mutex_init(&pctrl->lock);
|
||||
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ static ssize_t show_constraint_##_attr(struct device *dev, \
|
|||
int id; \
|
||||
struct powercap_zone_constraint *pconst;\
|
||||
\
|
||||
if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
|
||||
if (sscanf(dev_attr->attr.name, "constraint_%d_", &id) != 1) \
|
||||
return -EINVAL; \
|
||||
if (id >= power_zone->const_id_cnt) \
|
||||
return -EINVAL; \
|
||||
|
|
@ -92,7 +92,7 @@ static ssize_t store_constraint_##_attr(struct device *dev,\
|
|||
int id; \
|
||||
struct powercap_zone_constraint *pconst;\
|
||||
\
|
||||
if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
|
||||
if (sscanf(dev_attr->attr.name, "constraint_%d_", &id) != 1) \
|
||||
return -EINVAL; \
|
||||
if (id >= power_zone->const_id_cnt) \
|
||||
return -EINVAL; \
|
||||
|
|
@ -161,7 +161,7 @@ static ssize_t show_constraint_name(struct device *dev,
|
|||
ssize_t len = -ENODATA;
|
||||
struct powercap_zone_constraint *pconst;
|
||||
|
||||
if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id))
|
||||
if (sscanf(dev_attr->attr.name, "constraint_%d_", &id) != 1)
|
||||
return -EINVAL;
|
||||
if (id >= power_zone->const_id_cnt)
|
||||
return -EINVAL;
|
||||
|
|
@ -624,17 +624,23 @@ struct powercap_control_type *powercap_register_control_type(
|
|||
INIT_LIST_HEAD(&control_type->node);
|
||||
control_type->dev.class = &powercap_class;
|
||||
dev_set_name(&control_type->dev, "%s", name);
|
||||
result = device_register(&control_type->dev);
|
||||
if (result) {
|
||||
put_device(&control_type->dev);
|
||||
return ERR_PTR(result);
|
||||
}
|
||||
idr_init(&control_type->idr);
|
||||
|
||||
mutex_lock(&powercap_cntrl_list_lock);
|
||||
list_add_tail(&control_type->node, &powercap_cntrl_list);
|
||||
mutex_unlock(&powercap_cntrl_list_lock);
|
||||
|
||||
result = device_register(&control_type->dev);
|
||||
if (result) {
|
||||
mutex_lock(&powercap_cntrl_list_lock);
|
||||
list_del(&control_type->node);
|
||||
mutex_unlock(&powercap_cntrl_list_lock);
|
||||
|
||||
idr_destroy(&control_type->idr);
|
||||
put_device(&control_type->dev);
|
||||
return ERR_PTR(result);
|
||||
}
|
||||
|
||||
return control_type;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(powercap_register_control_type);
|
||||
|
|
|
|||
|
|
@ -62,8 +62,8 @@
|
|||
#include <linux/hdreg.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/irq.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/processor.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
|
|
@ -8669,6 +8669,30 @@ static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
|
|||
return IPR_RC_JOB_RETURN;
|
||||
}
|
||||
|
||||
/**
|
||||
* ipr_set_affinity_nobalance
|
||||
* @ioa_cfg: ipr_ioa_cfg struct for an ipr device
|
||||
* @flag: bool
|
||||
* true: ensable "IRQ_NO_BALANCING" bit for msix interrupt
|
||||
* false: disable "IRQ_NO_BALANCING" bit for msix interrupt
|
||||
* Description: This function will be called to disable/enable
|
||||
* "IRQ_NO_BALANCING" to avoid irqbalance daemon
|
||||
* kicking in during adapter reset.
|
||||
**/
|
||||
static void ipr_set_affinity_nobalance(struct ipr_ioa_cfg *ioa_cfg, bool flag)
|
||||
{
|
||||
int irq, i;
|
||||
|
||||
for (i = 0; i < ioa_cfg->nvectors; i++) {
|
||||
irq = pci_irq_vector(ioa_cfg->pdev, i);
|
||||
|
||||
if (flag)
|
||||
irq_set_status_flags(irq, IRQ_NO_BALANCING);
|
||||
else
|
||||
irq_clear_status_flags(irq, IRQ_NO_BALANCING);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ipr_reset_restore_cfg_space - Restore PCI config space.
|
||||
* @ipr_cmd: ipr command struct
|
||||
|
|
@ -8693,6 +8717,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
|
|||
return IPR_RC_JOB_CONTINUE;
|
||||
}
|
||||
|
||||
ipr_set_affinity_nobalance(ioa_cfg, false);
|
||||
ipr_fail_all_ops(ioa_cfg);
|
||||
|
||||
if (ioa_cfg->sis64) {
|
||||
|
|
@ -8772,6 +8797,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
|
|||
rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
|
||||
|
||||
if (rc == PCIBIOS_SUCCESSFUL) {
|
||||
ipr_set_affinity_nobalance(ioa_cfg, true);
|
||||
ipr_cmd->job_step = ipr_reset_bist_done;
|
||||
ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
|
||||
rc = IPR_RC_JOB_RETURN;
|
||||
|
|
|
|||
|
|
@ -119,20 +119,6 @@ static inline void sas_fail_probe(struct domain_device *dev, const char *func, i
|
|||
func, dev->parent ? "exp-attached" :
|
||||
"direct-attached",
|
||||
SAS_ADDR(dev->sas_addr), err);
|
||||
|
||||
/*
|
||||
* If the device probe failed, the expander phy attached address
|
||||
* needs to be reset so that the phy will not be treated as flutter
|
||||
* in the next revalidation
|
||||
*/
|
||||
if (dev->parent && !dev_is_expander(dev->dev_type)) {
|
||||
struct sas_phy *phy = dev->phy;
|
||||
struct domain_device *parent = dev->parent;
|
||||
struct ex_phy *ex_phy = &parent->ex_dev.ex_phy[phy->number];
|
||||
|
||||
memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
|
||||
}
|
||||
|
||||
sas_unregister_dev(dev->port, dev);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -735,6 +735,8 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
|
|||
sg_remove_request(sfp, srp);
|
||||
return -EFAULT;
|
||||
}
|
||||
hp->duration = jiffies_to_msecs(jiffies);
|
||||
|
||||
if (hp->interface_id != 'S') {
|
||||
sg_remove_request(sfp, srp);
|
||||
return -ENOSYS;
|
||||
|
|
@ -819,7 +821,6 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
hp->duration = jiffies_to_msecs(jiffies);
|
||||
if (hp->interface_id != '\0' && /* v3 (or later) interface */
|
||||
(SG_FLAG_Q_AT_TAIL & hp->flags))
|
||||
at_head = 0;
|
||||
|
|
@ -1342,9 +1343,6 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
|
|||
"sg_cmd_done: pack_id=%d, res=0x%x\n",
|
||||
srp->header.pack_id, result));
|
||||
srp->header.resid = resid;
|
||||
ms = jiffies_to_msecs(jiffies);
|
||||
srp->header.duration = (ms > srp->header.duration) ?
|
||||
(ms - srp->header.duration) : 0;
|
||||
if (0 != result) {
|
||||
struct scsi_sense_hdr sshdr;
|
||||
|
||||
|
|
@ -1393,6 +1391,9 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
|
|||
done = 0;
|
||||
}
|
||||
srp->done = done;
|
||||
ms = jiffies_to_msecs(jiffies);
|
||||
srp->header.duration = (ms > srp->header.duration) ?
|
||||
(ms - srp->header.duration) : 0;
|
||||
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
|
||||
|
||||
if (likely(done)) {
|
||||
|
|
@ -2529,6 +2530,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
|
|||
const sg_io_hdr_t *hp;
|
||||
const char * cp;
|
||||
unsigned int ms;
|
||||
unsigned int duration;
|
||||
|
||||
k = 0;
|
||||
list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
|
||||
|
|
@ -2566,13 +2568,17 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
|
|||
seq_printf(s, " id=%d blen=%d",
|
||||
srp->header.pack_id, blen);
|
||||
if (srp->done)
|
||||
seq_printf(s, " dur=%d", hp->duration);
|
||||
seq_printf(s, " dur=%u", hp->duration);
|
||||
else {
|
||||
ms = jiffies_to_msecs(jiffies);
|
||||
seq_printf(s, " t_o/elap=%d/%d",
|
||||
duration = READ_ONCE(hp->duration);
|
||||
if (duration)
|
||||
duration = (ms > duration ?
|
||||
ms - duration : 0);
|
||||
seq_printf(s, " t_o/elap=%u/%u",
|
||||
(new_interface ? hp->timeout :
|
||||
jiffies_to_msecs(fp->timeout)),
|
||||
(ms > hp->duration ? ms - hp->duration : 0));
|
||||
duration);
|
||||
}
|
||||
seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
|
||||
(int) srp->data.cmd_opcode);
|
||||
|
|
|
|||
|
|
@ -6154,6 +6154,11 @@ static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
|
|||
|
||||
static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
|
||||
{
|
||||
/*
|
||||
* A WLUN resume failure could potentially lead to the HBA being
|
||||
* runtime suspended, so take an extra reference on hba->dev.
|
||||
*/
|
||||
pm_runtime_get_sync(hba->dev);
|
||||
ufshcd_rpm_get_sync(hba);
|
||||
if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) ||
|
||||
hba->is_sys_suspended) {
|
||||
|
|
@ -6194,6 +6199,7 @@ static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
|
|||
if (ufshcd_is_clkscaling_supported(hba))
|
||||
ufshcd_clk_scaling_suspend(hba, false);
|
||||
ufshcd_rpm_put(hba);
|
||||
pm_runtime_put(hba->dev);
|
||||
}
|
||||
|
||||
static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
|
||||
|
|
@ -6208,28 +6214,42 @@ static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
|
|||
#ifdef CONFIG_PM
|
||||
static void ufshcd_recover_pm_error(struct ufs_hba *hba)
|
||||
{
|
||||
struct scsi_target *starget = hba->ufs_device_wlun->sdev_target;
|
||||
struct Scsi_Host *shost = hba->host;
|
||||
struct scsi_device *sdev;
|
||||
struct request_queue *q;
|
||||
int ret;
|
||||
bool resume_sdev_queues = false;
|
||||
|
||||
hba->is_sys_suspended = false;
|
||||
/*
|
||||
* Set RPM status of wlun device to RPM_ACTIVE,
|
||||
* this also clears its runtime error.
|
||||
*/
|
||||
ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
|
||||
|
||||
/* hba device might have a runtime error otherwise */
|
||||
if (ret)
|
||||
ret = pm_runtime_set_active(hba->dev);
|
||||
/*
|
||||
* Ensure the parent's error status is cleared before proceeding
|
||||
* to the child, as the parent must be active to activate the child.
|
||||
*/
|
||||
if (hba->dev->power.runtime_error) {
|
||||
/* hba->dev has no functional parent thus simplily set RPM_ACTIVE */
|
||||
pm_runtime_set_active(hba->dev);
|
||||
resume_sdev_queues = true;
|
||||
}
|
||||
|
||||
if (hba->ufs_device_wlun->sdev_gendev.power.runtime_error) {
|
||||
/*
|
||||
* starget, parent of wlun, might be suspended if wlun resume failed.
|
||||
* Make sure parent is resumed before set child (wlun) active.
|
||||
*/
|
||||
pm_runtime_get_sync(&starget->dev);
|
||||
pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
|
||||
pm_runtime_put_sync(&starget->dev);
|
||||
resume_sdev_queues = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If wlun device had runtime error, we also need to resume those
|
||||
* consumer scsi devices in case any of them has failed to be
|
||||
* resumed due to supplier runtime resume failure. This is to unblock
|
||||
* blk_queue_enter in case there are bios waiting inside it.
|
||||
*/
|
||||
if (!ret) {
|
||||
if (resume_sdev_queues) {
|
||||
shost_for_each_device(sdev, shost) {
|
||||
q = sdev->request_queue;
|
||||
if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
|
||||
|
|
|
|||
|
|
@ -5940,10 +5940,8 @@ again:
|
|||
* and no keys greater than that, so bail out.
|
||||
*/
|
||||
break;
|
||||
} else if ((min_key->type == BTRFS_INODE_REF_KEY ||
|
||||
min_key->type == BTRFS_INODE_EXTREF_KEY) &&
|
||||
(inode->generation == trans->transid ||
|
||||
ctx->logging_conflict_inodes)) {
|
||||
} else if (min_key->type == BTRFS_INODE_REF_KEY ||
|
||||
min_key->type == BTRFS_INODE_EXTREF_KEY) {
|
||||
u64 other_ino = 0;
|
||||
u64 other_parent = 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -4752,6 +4752,11 @@ static inline int ext4_iget_extra_inode(struct inode *inode,
|
|||
*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
|
||||
int err;
|
||||
|
||||
err = xattr_check_inode(inode, IHDR(inode, raw_inode),
|
||||
ITAIL(inode, raw_inode));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ext4_set_inode_state(inode, EXT4_STATE_XATTR);
|
||||
err = ext4_find_inline_data_nolock(inode);
|
||||
if (!err && ext4_has_inline_data(inode))
|
||||
|
|
|
|||
|
|
@ -263,7 +263,7 @@ errout:
|
|||
__ext4_xattr_check_block((inode), (bh), __func__, __LINE__)
|
||||
|
||||
|
||||
static int
|
||||
int
|
||||
__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
|
||||
void *end, const char *function, unsigned int line)
|
||||
{
|
||||
|
|
@ -280,9 +280,6 @@ errout:
|
|||
return error;
|
||||
}
|
||||
|
||||
#define xattr_check_inode(inode, header, end) \
|
||||
__xattr_check_inode((inode), (header), (end), __func__, __LINE__)
|
||||
|
||||
static int
|
||||
xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry,
|
||||
void *end, int name_index, const char *name, int sorted)
|
||||
|
|
@ -599,10 +596,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
|
|||
return error;
|
||||
raw_inode = ext4_raw_inode(&iloc);
|
||||
header = IHDR(inode, raw_inode);
|
||||
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
||||
error = xattr_check_inode(inode, header, end);
|
||||
if (error)
|
||||
goto cleanup;
|
||||
end = ITAIL(inode, raw_inode);
|
||||
entry = IFIRST(header);
|
||||
error = xattr_find_entry(inode, &entry, end, name_index, name, 0);
|
||||
if (error)
|
||||
|
|
@ -734,7 +728,6 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
|
|||
struct ext4_xattr_ibody_header *header;
|
||||
struct ext4_inode *raw_inode;
|
||||
struct ext4_iloc iloc;
|
||||
void *end;
|
||||
int error;
|
||||
|
||||
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
|
||||
|
|
@ -744,14 +737,9 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
|
|||
return error;
|
||||
raw_inode = ext4_raw_inode(&iloc);
|
||||
header = IHDR(inode, raw_inode);
|
||||
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
||||
error = xattr_check_inode(inode, header, end);
|
||||
if (error)
|
||||
goto cleanup;
|
||||
error = ext4_xattr_list_entries(dentry, IFIRST(header),
|
||||
buffer, buffer_size);
|
||||
|
||||
cleanup:
|
||||
brelse(iloc.bh);
|
||||
return error;
|
||||
}
|
||||
|
|
@ -819,7 +807,6 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
|
|||
struct ext4_xattr_ibody_header *header;
|
||||
struct ext4_xattr_entry *entry;
|
||||
qsize_t ea_inode_refs = 0;
|
||||
void *end;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held_read(&EXT4_I(inode)->xattr_sem);
|
||||
|
|
@ -830,10 +817,6 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
|
|||
goto out;
|
||||
raw_inode = ext4_raw_inode(&iloc);
|
||||
header = IHDR(inode, raw_inode);
|
||||
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
||||
ret = xattr_check_inode(inode, header, end);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
for (entry = IFIRST(header); !IS_LAST_ENTRY(entry);
|
||||
entry = EXT4_XATTR_NEXT(entry))
|
||||
|
|
@ -2195,11 +2178,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
|
|||
header = IHDR(inode, raw_inode);
|
||||
is->s.base = is->s.first = IFIRST(header);
|
||||
is->s.here = is->s.first;
|
||||
is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
||||
is->s.end = ITAIL(inode, raw_inode);
|
||||
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
|
||||
error = xattr_check_inode(inode, header, is->s.end);
|
||||
if (error)
|
||||
return error;
|
||||
/* Find the named attribute. */
|
||||
error = xattr_find_entry(inode, &is->s.here, is->s.end,
|
||||
i->name_index, i->name, 0);
|
||||
|
|
@ -2746,14 +2726,10 @@ retry:
|
|||
*/
|
||||
|
||||
base = IFIRST(header);
|
||||
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
||||
end = ITAIL(inode, raw_inode);
|
||||
min_offs = end - base;
|
||||
total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
|
||||
|
||||
error = xattr_check_inode(inode, header, end);
|
||||
if (error)
|
||||
goto cleanup;
|
||||
|
||||
ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino);
|
||||
if (ifree >= isize_diff)
|
||||
goto shift;
|
||||
|
|
|
|||
|
|
@ -68,6 +68,9 @@ struct ext4_xattr_entry {
|
|||
((void *)raw_inode + \
|
||||
EXT4_GOOD_OLD_INODE_SIZE + \
|
||||
EXT4_I(inode)->i_extra_isize))
|
||||
#define ITAIL(inode, raw_inode) \
|
||||
((void *)(raw_inode) + \
|
||||
EXT4_SB((inode)->i_sb)->s_inode_size)
|
||||
#define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
|
||||
|
||||
/*
|
||||
|
|
@ -207,6 +210,13 @@ extern int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
|
|||
extern struct mb_cache *ext4_xattr_create_cache(void);
|
||||
extern void ext4_xattr_destroy_cache(struct mb_cache *);
|
||||
|
||||
extern int
|
||||
__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
|
||||
void *end, const char *function, unsigned int line);
|
||||
|
||||
#define xattr_check_inode(inode, header, end) \
|
||||
__xattr_check_inode((inode), (header), (end), __func__, __LINE__)
|
||||
|
||||
#ifdef CONFIG_EXT4_FS_SECURITY
|
||||
extern int ext4_init_security(handle_t *handle, struct inode *inode,
|
||||
struct inode *dir, const struct qstr *qstr);
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ config NFS_FS
|
|||
select CRC32
|
||||
select LOCKD
|
||||
select SUNRPC
|
||||
select NFS_COMMON
|
||||
select NFS_ACL_SUPPORT if NFS_V3_ACL
|
||||
help
|
||||
Choose Y here if you want to access files residing on other
|
||||
|
|
|
|||
|
|
@ -170,6 +170,11 @@ struct vfsmount *nfs_d_automount(struct path *path)
|
|||
if (!ctx->clone_data.fattr)
|
||||
goto out_fc;
|
||||
|
||||
if (fc->cred != server->cred) {
|
||||
put_cred(fc->cred);
|
||||
fc->cred = get_cred(server->cred);
|
||||
}
|
||||
|
||||
if (fc->net_ns != client->cl_net) {
|
||||
put_net(fc->net_ns);
|
||||
fc->net_ns = get_net(client->cl_net);
|
||||
|
|
|
|||
|
|
@ -22,14 +22,12 @@
|
|||
#include <linux/nfs.h>
|
||||
#include <linux/nfs2.h>
|
||||
#include <linux/nfs_fs.h>
|
||||
#include <linux/nfs_common.h>
|
||||
#include "nfstrace.h"
|
||||
#include "internal.h"
|
||||
|
||||
#define NFSDBG_FACILITY NFSDBG_XDR
|
||||
|
||||
/* Mapping from NFS error code to "errno" error code. */
|
||||
#define errno_NFSERR_IO EIO
|
||||
|
||||
/*
|
||||
* Declare the space requirements for NFS arguments and replies as
|
||||
* number of 32bit-words
|
||||
|
|
@ -64,8 +62,6 @@
|
|||
#define NFS_readdirres_sz (1+NFS_pagepad_sz)
|
||||
#define NFS_statfsres_sz (1+NFS_info_sz)
|
||||
|
||||
static int nfs_stat_to_errno(enum nfs_stat);
|
||||
|
||||
/*
|
||||
* Encode/decode NFSv2 basic data types
|
||||
*
|
||||
|
|
@ -1054,70 +1050,6 @@ out_default:
|
|||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* We need to translate between nfs status return values and
|
||||
* the local errno values which may not be the same.
|
||||
*/
|
||||
static const struct {
|
||||
int stat;
|
||||
int errno;
|
||||
} nfs_errtbl[] = {
|
||||
{ NFS_OK, 0 },
|
||||
{ NFSERR_PERM, -EPERM },
|
||||
{ NFSERR_NOENT, -ENOENT },
|
||||
{ NFSERR_IO, -errno_NFSERR_IO},
|
||||
{ NFSERR_NXIO, -ENXIO },
|
||||
/* { NFSERR_EAGAIN, -EAGAIN }, */
|
||||
{ NFSERR_ACCES, -EACCES },
|
||||
{ NFSERR_EXIST, -EEXIST },
|
||||
{ NFSERR_XDEV, -EXDEV },
|
||||
{ NFSERR_NODEV, -ENODEV },
|
||||
{ NFSERR_NOTDIR, -ENOTDIR },
|
||||
{ NFSERR_ISDIR, -EISDIR },
|
||||
{ NFSERR_INVAL, -EINVAL },
|
||||
{ NFSERR_FBIG, -EFBIG },
|
||||
{ NFSERR_NOSPC, -ENOSPC },
|
||||
{ NFSERR_ROFS, -EROFS },
|
||||
{ NFSERR_MLINK, -EMLINK },
|
||||
{ NFSERR_NAMETOOLONG, -ENAMETOOLONG },
|
||||
{ NFSERR_NOTEMPTY, -ENOTEMPTY },
|
||||
{ NFSERR_DQUOT, -EDQUOT },
|
||||
{ NFSERR_STALE, -ESTALE },
|
||||
{ NFSERR_REMOTE, -EREMOTE },
|
||||
#ifdef EWFLUSH
|
||||
{ NFSERR_WFLUSH, -EWFLUSH },
|
||||
#endif
|
||||
{ NFSERR_BADHANDLE, -EBADHANDLE },
|
||||
{ NFSERR_NOT_SYNC, -ENOTSYNC },
|
||||
{ NFSERR_BAD_COOKIE, -EBADCOOKIE },
|
||||
{ NFSERR_NOTSUPP, -ENOTSUPP },
|
||||
{ NFSERR_TOOSMALL, -ETOOSMALL },
|
||||
{ NFSERR_SERVERFAULT, -EREMOTEIO },
|
||||
{ NFSERR_BADTYPE, -EBADTYPE },
|
||||
{ NFSERR_JUKEBOX, -EJUKEBOX },
|
||||
{ -1, -EIO }
|
||||
};
|
||||
|
||||
/**
|
||||
* nfs_stat_to_errno - convert an NFS status code to a local errno
|
||||
* @status: NFS status code to convert
|
||||
*
|
||||
* Returns a local errno value, or -EIO if the NFS status code is
|
||||
* not recognized. This function is used jointly by NFSv2 and NFSv3.
|
||||
*/
|
||||
static int nfs_stat_to_errno(enum nfs_stat status)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; nfs_errtbl[i].stat != -1; i++) {
|
||||
if (nfs_errtbl[i].stat == (int)status)
|
||||
return nfs_errtbl[i].errno;
|
||||
}
|
||||
dprintk("NFS: Unrecognized nfs status value: %u\n", status);
|
||||
return nfs_errtbl[i].errno;
|
||||
}
|
||||
|
||||
#define PROC(proc, argtype, restype, timer) \
|
||||
[NFSPROC_##proc] = { \
|
||||
.p_proc = NFSPROC_##proc, \
|
||||
|
|
|
|||
108
fs/nfs/nfs3xdr.c
108
fs/nfs/nfs3xdr.c
|
|
@ -21,14 +21,13 @@
|
|||
#include <linux/nfs3.h>
|
||||
#include <linux/nfs_fs.h>
|
||||
#include <linux/nfsacl.h>
|
||||
#include <linux/nfs_common.h>
|
||||
|
||||
#include "nfstrace.h"
|
||||
#include "internal.h"
|
||||
|
||||
#define NFSDBG_FACILITY NFSDBG_XDR
|
||||
|
||||
/* Mapping from NFS error code to "errno" error code. */
|
||||
#define errno_NFSERR_IO EIO
|
||||
|
||||
/*
|
||||
* Declare the space requirements for NFS arguments and replies as
|
||||
* number of 32bit-words
|
||||
|
|
@ -91,8 +90,6 @@
|
|||
NFS3_pagepad_sz)
|
||||
#define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz)
|
||||
|
||||
static int nfs3_stat_to_errno(enum nfs_stat);
|
||||
|
||||
/*
|
||||
* Map file type to S_IFMT bits
|
||||
*/
|
||||
|
|
@ -1406,7 +1403,7 @@ static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
|
|||
out:
|
||||
return error;
|
||||
out_default:
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1445,7 +1442,7 @@ static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
|
|||
out:
|
||||
return error;
|
||||
out_status:
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1495,7 +1492,7 @@ out_default:
|
|||
error = decode_post_op_attr(xdr, result->dir_attr, userns);
|
||||
if (unlikely(error))
|
||||
goto out;
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1537,7 +1534,7 @@ static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
|
|||
out:
|
||||
return error;
|
||||
out_default:
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1578,7 +1575,7 @@ static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
|
|||
out:
|
||||
return error;
|
||||
out_default:
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1658,7 +1655,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
|
|||
out:
|
||||
return error;
|
||||
out_status:
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1728,7 +1725,7 @@ static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
|
|||
out:
|
||||
return error;
|
||||
out_status:
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1795,7 +1792,7 @@ out_default:
|
|||
error = decode_wcc_data(xdr, result->dir_attr, userns);
|
||||
if (unlikely(error))
|
||||
goto out;
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1835,7 +1832,7 @@ static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
|
|||
out:
|
||||
return error;
|
||||
out_status:
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1881,7 +1878,7 @@ static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
|
|||
out:
|
||||
return error;
|
||||
out_status:
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1926,7 +1923,7 @@ static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
|
|||
out:
|
||||
return error;
|
||||
out_status:
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -2101,7 +2098,7 @@ out_default:
|
|||
error = decode_post_op_attr(xdr, result->dir_attr, rpc_rqst_userns(req));
|
||||
if (unlikely(error))
|
||||
goto out;
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -2167,7 +2164,7 @@ static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
|
|||
out:
|
||||
return error;
|
||||
out_status:
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -2243,7 +2240,7 @@ static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
|
|||
out:
|
||||
return error;
|
||||
out_status:
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -2304,7 +2301,7 @@ static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
|
|||
out:
|
||||
return error;
|
||||
out_status:
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -2350,7 +2347,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
|
|||
out:
|
||||
return error;
|
||||
out_status:
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NFS_V3_ACL
|
||||
|
|
@ -2416,7 +2413,7 @@ static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
|
|||
out:
|
||||
return error;
|
||||
out_default:
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
|
||||
|
|
@ -2435,76 +2432,11 @@ static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
|
|||
out:
|
||||
return error;
|
||||
out_default:
|
||||
return nfs3_stat_to_errno(status);
|
||||
return nfs_stat_to_errno(status);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_NFS_V3_ACL */
|
||||
|
||||
|
||||
/*
|
||||
* We need to translate between nfs status return values and
|
||||
* the local errno values which may not be the same.
|
||||
*/
|
||||
static const struct {
|
||||
int stat;
|
||||
int errno;
|
||||
} nfs_errtbl[] = {
|
||||
{ NFS_OK, 0 },
|
||||
{ NFSERR_PERM, -EPERM },
|
||||
{ NFSERR_NOENT, -ENOENT },
|
||||
{ NFSERR_IO, -errno_NFSERR_IO},
|
||||
{ NFSERR_NXIO, -ENXIO },
|
||||
/* { NFSERR_EAGAIN, -EAGAIN }, */
|
||||
{ NFSERR_ACCES, -EACCES },
|
||||
{ NFSERR_EXIST, -EEXIST },
|
||||
{ NFSERR_XDEV, -EXDEV },
|
||||
{ NFSERR_NODEV, -ENODEV },
|
||||
{ NFSERR_NOTDIR, -ENOTDIR },
|
||||
{ NFSERR_ISDIR, -EISDIR },
|
||||
{ NFSERR_INVAL, -EINVAL },
|
||||
{ NFSERR_FBIG, -EFBIG },
|
||||
{ NFSERR_NOSPC, -ENOSPC },
|
||||
{ NFSERR_ROFS, -EROFS },
|
||||
{ NFSERR_MLINK, -EMLINK },
|
||||
{ NFSERR_NAMETOOLONG, -ENAMETOOLONG },
|
||||
{ NFSERR_NOTEMPTY, -ENOTEMPTY },
|
||||
{ NFSERR_DQUOT, -EDQUOT },
|
||||
{ NFSERR_STALE, -ESTALE },
|
||||
{ NFSERR_REMOTE, -EREMOTE },
|
||||
#ifdef EWFLUSH
|
||||
{ NFSERR_WFLUSH, -EWFLUSH },
|
||||
#endif
|
||||
{ NFSERR_BADHANDLE, -EBADHANDLE },
|
||||
{ NFSERR_NOT_SYNC, -ENOTSYNC },
|
||||
{ NFSERR_BAD_COOKIE, -EBADCOOKIE },
|
||||
{ NFSERR_NOTSUPP, -ENOTSUPP },
|
||||
{ NFSERR_TOOSMALL, -ETOOSMALL },
|
||||
{ NFSERR_SERVERFAULT, -EREMOTEIO },
|
||||
{ NFSERR_BADTYPE, -EBADTYPE },
|
||||
{ NFSERR_JUKEBOX, -EJUKEBOX },
|
||||
{ -1, -EIO }
|
||||
};
|
||||
|
||||
/**
|
||||
* nfs3_stat_to_errno - convert an NFS status code to a local errno
|
||||
* @status: NFS status code to convert
|
||||
*
|
||||
* Returns a local errno value, or -EIO if the NFS status code is
|
||||
* not recognized. This function is used jointly by NFSv2 and NFSv3.
|
||||
*/
|
||||
static int nfs3_stat_to_errno(enum nfs_stat status)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; nfs_errtbl[i].stat != -1; i++) {
|
||||
if (nfs_errtbl[i].stat == (int)status)
|
||||
return nfs_errtbl[i].errno;
|
||||
}
|
||||
dprintk("NFS: Unrecognized nfs status value: %u\n", status);
|
||||
return nfs_errtbl[i].errno;
|
||||
}
|
||||
|
||||
|
||||
#define PROC(proc, argtype, restype, timer) \
|
||||
[NFS3PROC_##proc] = { \
|
||||
.p_proc = NFS3PROC_##proc, \
|
||||
|
|
|
|||
|
|
@ -1700,8 +1700,17 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
|
|||
if (nfs_stateid_is_sequential(state, stateid))
|
||||
break;
|
||||
|
||||
if (status)
|
||||
break;
|
||||
if (status) {
|
||||
if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
|
||||
!nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
|
||||
trace_nfs4_open_stateid_update_skip(state->inode,
|
||||
stateid, status);
|
||||
return;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Rely on seqids for serialisation with NFSv4.0 */
|
||||
if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -1248,6 +1248,7 @@ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_setattr);
|
|||
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_delegreturn);
|
||||
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update);
|
||||
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_wait);
|
||||
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_skip);
|
||||
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_close_stateid_update_wait);
|
||||
|
||||
DECLARE_EVENT_CLASS(nfs4_getattr_event,
|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@
|
|||
#include <linux/nfs.h>
|
||||
#include <linux/nfs4.h>
|
||||
#include <linux/nfs_fs.h>
|
||||
#include <linux/nfs_common.h>
|
||||
|
||||
#include "nfs4_fs.h"
|
||||
#include "nfs4trace.h"
|
||||
|
|
@ -63,9 +64,6 @@
|
|||
|
||||
#define NFSDBG_FACILITY NFSDBG_XDR
|
||||
|
||||
/* Mapping from NFS error code to "errno" error code. */
|
||||
#define errno_NFSERR_IO EIO
|
||||
|
||||
struct compound_hdr;
|
||||
static int nfs4_stat_to_errno(int);
|
||||
static void encode_layoutget(struct xdr_stream *xdr,
|
||||
|
|
|
|||
|
|
@ -8,3 +8,5 @@ nfs_acl-objs := nfsacl.o
|
|||
|
||||
obj-$(CONFIG_GRACE_PERIOD) += grace.o
|
||||
obj-$(CONFIG_NFS_V4_2_SSC_HELPER) += nfs_ssc.o
|
||||
|
||||
obj-$(CONFIG_NFS_COMMON) += common.o
|
||||
|
|
|
|||
66
fs/nfs_common/common.c
Normal file
66
fs/nfs_common/common.c
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/nfs_common.h>
|
||||
|
||||
/*
|
||||
* We need to translate between nfs status return values and
|
||||
* the local errno values which may not be the same.
|
||||
*/
|
||||
static const struct {
|
||||
int stat;
|
||||
int errno;
|
||||
} nfs_errtbl[] = {
|
||||
{ NFS_OK, 0 },
|
||||
{ NFSERR_PERM, -EPERM },
|
||||
{ NFSERR_NOENT, -ENOENT },
|
||||
{ NFSERR_IO, -errno_NFSERR_IO},
|
||||
{ NFSERR_NXIO, -ENXIO },
|
||||
{ NFSERR_ACCES, -EACCES },
|
||||
{ NFSERR_EXIST, -EEXIST },
|
||||
{ NFSERR_XDEV, -EXDEV },
|
||||
{ NFSERR_NODEV, -ENODEV },
|
||||
{ NFSERR_NOTDIR, -ENOTDIR },
|
||||
{ NFSERR_ISDIR, -EISDIR },
|
||||
{ NFSERR_INVAL, -EINVAL },
|
||||
{ NFSERR_FBIG, -EFBIG },
|
||||
{ NFSERR_NOSPC, -ENOSPC },
|
||||
{ NFSERR_ROFS, -EROFS },
|
||||
{ NFSERR_MLINK, -EMLINK },
|
||||
{ NFSERR_NAMETOOLONG, -ENAMETOOLONG },
|
||||
{ NFSERR_NOTEMPTY, -ENOTEMPTY },
|
||||
{ NFSERR_DQUOT, -EDQUOT },
|
||||
{ NFSERR_STALE, -ESTALE },
|
||||
{ NFSERR_REMOTE, -EREMOTE },
|
||||
#ifdef EWFLUSH
|
||||
{ NFSERR_WFLUSH, -EWFLUSH },
|
||||
#endif
|
||||
{ NFSERR_BADHANDLE, -EBADHANDLE },
|
||||
{ NFSERR_NOT_SYNC, -ENOTSYNC },
|
||||
{ NFSERR_BAD_COOKIE, -EBADCOOKIE },
|
||||
{ NFSERR_NOTSUPP, -ENOTSUPP },
|
||||
{ NFSERR_TOOSMALL, -ETOOSMALL },
|
||||
{ NFSERR_SERVERFAULT, -EREMOTEIO },
|
||||
{ NFSERR_BADTYPE, -EBADTYPE },
|
||||
{ NFSERR_JUKEBOX, -EJUKEBOX },
|
||||
{ -1, -EIO }
|
||||
};
|
||||
|
||||
/**
|
||||
* nfs_stat_to_errno - convert an NFS status code to a local errno
|
||||
* @status: NFS status code to convert
|
||||
*
|
||||
* Returns a local errno value, or -EIO if the NFS status code is
|
||||
* not recognized. This function is used jointly by NFSv2 and NFSv3.
|
||||
*/
|
||||
int nfs_stat_to_errno(enum nfs_stat status)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; nfs_errtbl[i].stat != -1; i++) {
|
||||
if (nfs_errtbl[i].stat == (int)status)
|
||||
return nfs_errtbl[i].errno;
|
||||
}
|
||||
return nfs_errtbl[i].errno;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_stat_to_errno);
|
||||
|
|
@ -8,6 +8,7 @@ config NFSD
|
|||
select LOCKD
|
||||
select SUNRPC
|
||||
select EXPORTFS
|
||||
select NFS_COMMON
|
||||
select NFS_ACL_SUPPORT if NFSD_V2_ACL
|
||||
select NFS_ACL_SUPPORT if NFSD_V3_ACL
|
||||
depends on MULTIUSER
|
||||
|
|
|
|||
|
|
@ -64,6 +64,8 @@ struct nfsd_net {
|
|||
|
||||
struct lock_manager nfsd4_manager;
|
||||
bool grace_ended;
|
||||
bool grace_end_forced;
|
||||
bool client_tracking_active;
|
||||
time64_t boot_time;
|
||||
|
||||
struct dentry *nfsd_client_dir;
|
||||
|
|
|
|||
|
|
@ -1321,7 +1321,7 @@ try_again:
|
|||
(schedule_timeout(20*HZ) == 0)) {
|
||||
finish_wait(&nn->nfsd_ssc_waitq, &wait);
|
||||
kfree(work);
|
||||
return nfserr_eagain;
|
||||
return nfserr_jukebox;
|
||||
}
|
||||
finish_wait(&nn->nfsd_ssc_waitq, &wait);
|
||||
goto try_again;
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ static u64 current_sessionid = 1;
|
|||
/* forward declarations */
|
||||
static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
|
||||
static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
|
||||
void nfsd4_end_grace(struct nfsd_net *nn);
|
||||
static void nfsd4_end_grace(struct nfsd_net *nn);
|
||||
static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
|
||||
static void nfsd4_file_hash_remove(struct nfs4_file *fi);
|
||||
|
||||
|
|
@ -5882,7 +5882,7 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|||
return nfs_ok;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
nfsd4_end_grace(struct nfsd_net *nn)
|
||||
{
|
||||
/* do nothing if grace period already ended */
|
||||
|
|
@ -5915,6 +5915,33 @@ nfsd4_end_grace(struct nfsd_net *nn)
|
|||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
* nfsd4_force_end_grace - forcibly end the NFSv4 grace period
|
||||
* @nn: network namespace for the server instance to be updated
|
||||
*
|
||||
* Forces bypass of normal grace period completion, then schedules
|
||||
* the laundromat to end the grace period immediately. Does not wait
|
||||
* for the grace period to fully terminate before returning.
|
||||
*
|
||||
* Return values:
|
||||
* %true: Grace termination schedule
|
||||
* %false: No action was taken
|
||||
*/
|
||||
bool nfsd4_force_end_grace(struct nfsd_net *nn)
|
||||
{
|
||||
if (!nn->client_tracking_ops)
|
||||
return false;
|
||||
spin_lock(&nn->client_lock);
|
||||
if (nn->grace_ended || !nn->client_tracking_active) {
|
||||
spin_unlock(&nn->client_lock);
|
||||
return false;
|
||||
}
|
||||
WRITE_ONCE(nn->grace_end_forced, true);
|
||||
mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
|
||||
spin_unlock(&nn->client_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we've waited a lease period but there are still clients trying to
|
||||
* reclaim, wait a little longer to give them a chance to finish.
|
||||
|
|
@ -5924,6 +5951,8 @@ static bool clients_still_reclaiming(struct nfsd_net *nn)
|
|||
time64_t double_grace_period_end = nn->boot_time +
|
||||
2 * nn->nfsd4_lease;
|
||||
|
||||
if (READ_ONCE(nn->grace_end_forced))
|
||||
return false;
|
||||
if (nn->track_reclaim_completes &&
|
||||
atomic_read(&nn->nr_reclaim_complete) ==
|
||||
nn->reclaim_str_hashtbl_size)
|
||||
|
|
@ -8131,6 +8160,8 @@ static int nfs4_state_create_net(struct net *net)
|
|||
nn->unconf_name_tree = RB_ROOT;
|
||||
nn->boot_time = ktime_get_real_seconds();
|
||||
nn->grace_ended = false;
|
||||
nn->grace_end_forced = false;
|
||||
nn->client_tracking_active = false;
|
||||
nn->nfsd4_manager.block_opens = true;
|
||||
INIT_LIST_HEAD(&nn->nfsd4_manager.list);
|
||||
INIT_LIST_HEAD(&nn->client_lru);
|
||||
|
|
@ -8207,6 +8238,10 @@ nfs4_state_start_net(struct net *net)
|
|||
return ret;
|
||||
locks_start_grace(net, &nn->nfsd4_manager);
|
||||
nfsd4_client_tracking_init(net);
|
||||
/* safe for laundromat to run now */
|
||||
spin_lock(&nn->client_lock);
|
||||
nn->client_tracking_active = true;
|
||||
spin_unlock(&nn->client_lock);
|
||||
if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
|
||||
goto skip_grace;
|
||||
printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
|
||||
|
|
@ -8253,6 +8288,9 @@ nfs4_state_shutdown_net(struct net *net)
|
|||
|
||||
unregister_shrinker(&nn->nfsd_client_shrinker);
|
||||
cancel_work_sync(&nn->nfsd_shrinker_work);
|
||||
spin_lock(&nn->client_lock);
|
||||
nn->client_tracking_active = false;
|
||||
spin_unlock(&nn->client_lock);
|
||||
cancel_delayed_work_sync(&nn->laundromat_work);
|
||||
locks_end_grace(&nn->nfsd4_manager);
|
||||
|
||||
|
|
|
|||
|
|
@ -1117,9 +1117,8 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
|
|||
case 'Y':
|
||||
case 'y':
|
||||
case '1':
|
||||
if (!nn->nfsd_serv)
|
||||
if (!nfsd4_force_end_grace(nn))
|
||||
return -EBUSY;
|
||||
nfsd4_end_grace(nn);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
|
|
|||
|
|
@ -201,7 +201,6 @@ void nfsd_lockd_shutdown(void);
|
|||
#define nfserr_noent cpu_to_be32(NFSERR_NOENT)
|
||||
#define nfserr_io cpu_to_be32(NFSERR_IO)
|
||||
#define nfserr_nxio cpu_to_be32(NFSERR_NXIO)
|
||||
#define nfserr_eagain cpu_to_be32(NFSERR_EAGAIN)
|
||||
#define nfserr_acces cpu_to_be32(NFSERR_ACCES)
|
||||
#define nfserr_exist cpu_to_be32(NFSERR_EXIST)
|
||||
#define nfserr_xdev cpu_to_be32(NFSERR_XDEV)
|
||||
|
|
|
|||
|
|
@ -719,7 +719,7 @@ static inline void get_nfs4_file(struct nfs4_file *fi)
|
|||
struct nfsd_file *find_any_file(struct nfs4_file *f);
|
||||
|
||||
/* grace period management */
|
||||
void nfsd4_end_grace(struct nfsd_net *nn);
|
||||
bool nfsd4_force_end_grace(struct nfsd_net *nn);
|
||||
|
||||
/* nfs4recover operations */
|
||||
extern int nfsd4_client_tracking_init(struct net *net);
|
||||
|
|
|
|||
|
|
@ -41,10 +41,10 @@ extern const struct nt_err_code_struct nt_errs[];
|
|||
#define NT_STATUS_MEDIA_CHANGED 0x8000001c
|
||||
#define NT_STATUS_END_OF_MEDIA 0x8000001e
|
||||
#define NT_STATUS_MEDIA_CHECK 0x80000020
|
||||
#define NT_STATUS_NO_DATA_DETECTED 0x8000001c
|
||||
#define NT_STATUS_NO_DATA_DETECTED 0x80000022
|
||||
#define NT_STATUS_STOPPED_ON_SYMLINK 0x8000002d
|
||||
#define NT_STATUS_DEVICE_REQUIRES_CLEANING 0x80000288
|
||||
#define NT_STATUS_DEVICE_DOOR_OPEN 0x80000288
|
||||
#define NT_STATUS_DEVICE_DOOR_OPEN 0x80000289
|
||||
#define NT_STATUS_UNSUCCESSFUL 0xC0000000 | 0x0001
|
||||
#define NT_STATUS_NOT_IMPLEMENTED 0xC0000000 | 0x0002
|
||||
#define NT_STATUS_INVALID_INFO_CLASS 0xC0000000 | 0x0003
|
||||
|
|
@ -70,7 +70,7 @@ extern const struct nt_err_code_struct nt_errs[];
|
|||
#define NT_STATUS_NO_MEMORY 0xC0000000 | 0x0017
|
||||
#define NT_STATUS_CONFLICTING_ADDRESSES 0xC0000000 | 0x0018
|
||||
#define NT_STATUS_NOT_MAPPED_VIEW 0xC0000000 | 0x0019
|
||||
#define NT_STATUS_UNABLE_TO_FREE_VM 0x80000000 | 0x001a
|
||||
#define NT_STATUS_UNABLE_TO_FREE_VM 0xC0000000 | 0x001a
|
||||
#define NT_STATUS_UNABLE_TO_DELETE_SECTION 0xC0000000 | 0x001b
|
||||
#define NT_STATUS_INVALID_SYSTEM_SERVICE 0xC0000000 | 0x001c
|
||||
#define NT_STATUS_ILLEGAL_INSTRUCTION 0xC0000000 | 0x001d
|
||||
|
|
|
|||
|
|
@ -3355,13 +3355,12 @@ static inline bool debug_pagealloc_enabled_static(void)
|
|||
return static_branch_unlikely(&_debug_pagealloc_enabled);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
/*
|
||||
* To support DEBUG_PAGEALLOC architecture must ensure that
|
||||
* __kernel_map_pages() never fails
|
||||
*/
|
||||
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
|
||||
{
|
||||
if (debug_pagealloc_enabled_static())
|
||||
|
|
|
|||
|
|
@ -4951,7 +4951,8 @@ netdev_features_t netdev_increment_features(netdev_features_t all,
|
|||
static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
|
||||
netdev_features_t mask)
|
||||
{
|
||||
return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
|
||||
return netdev_increment_features(features, NETIF_F_ALL_TSO |
|
||||
NETIF_F_ALL_FOR_ALL, mask);
|
||||
}
|
||||
|
||||
int __netdev_update_features(struct net_device *dev);
|
||||
|
|
|
|||
16
include/linux/nfs_common.h
Normal file
16
include/linux/nfs_common.h
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* This file contains constants and methods used by both NFS client and server.
|
||||
*/
|
||||
#ifndef _LINUX_NFS_COMMON_H
|
||||
#define _LINUX_NFS_COMMON_H
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <uapi/linux/nfs.h>
|
||||
|
||||
/* Mapping from NFS error code to "errno" error code. */
|
||||
#define errno_NFSERR_IO EIO
|
||||
|
||||
int nfs_stat_to_errno(enum nfs_stat status);
|
||||
|
||||
#endif /* _LINUX_NFS_COMMON_H */
|
||||
|
|
@ -99,6 +99,9 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
|
|||
unsigned long end, const struct mm_walk_ops *ops,
|
||||
pgd_t *pgd,
|
||||
void *private);
|
||||
int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end, const struct mm_walk_ops *ops,
|
||||
void *private);
|
||||
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
|
||||
void *private);
|
||||
int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
|
||||
|
|
|
|||
|
|
@ -555,6 +555,18 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
|
|||
dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
|
||||
}
|
||||
|
||||
static inline struct net_device *dst_dev_rcu(const struct dst_entry *dst)
|
||||
{
|
||||
/* In the future, use rcu_dereference(dst->dev) */
|
||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||
return READ_ONCE(dst->dev);
|
||||
}
|
||||
|
||||
static inline struct net_device *skb_dst_dev_rcu(const struct sk_buff *skb)
|
||||
{
|
||||
return dst_dev_rcu(skb_dst(skb));
|
||||
}
|
||||
|
||||
struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
|
||||
void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
||||
struct sk_buff *skb, u32 mtu, bool confirm_neigh);
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@ TRACE_DEFINE_ENUM(NFSERR_PERM);
|
|||
TRACE_DEFINE_ENUM(NFSERR_NOENT);
|
||||
TRACE_DEFINE_ENUM(NFSERR_IO);
|
||||
TRACE_DEFINE_ENUM(NFSERR_NXIO);
|
||||
TRACE_DEFINE_ENUM(NFSERR_EAGAIN);
|
||||
TRACE_DEFINE_ENUM(NFSERR_ACCES);
|
||||
TRACE_DEFINE_ENUM(NFSERR_EXIST);
|
||||
TRACE_DEFINE_ENUM(NFSERR_XDEV);
|
||||
|
|
@ -52,7 +51,7 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
|
|||
{ NFSERR_IO, "IO" }, \
|
||||
{ NFSERR_NXIO, "NXIO" }, \
|
||||
{ ECHILD, "CHILD" }, \
|
||||
{ NFSERR_EAGAIN, "AGAIN" }, \
|
||||
{ ETIMEDOUT, "TIMEDOUT" }, \
|
||||
{ NFSERR_ACCES, "ACCES" }, \
|
||||
{ NFSERR_EXIST, "EXIST" }, \
|
||||
{ NFSERR_XDEV, "XDEV" }, \
|
||||
|
|
|
|||
|
|
@ -49,7 +49,6 @@
|
|||
NFSERR_NOENT = 2, /* v2 v3 v4 */
|
||||
NFSERR_IO = 5, /* v2 v3 v4 */
|
||||
NFSERR_NXIO = 6, /* v2 v3 v4 */
|
||||
NFSERR_EAGAIN = 11, /* v2 v3 */
|
||||
NFSERR_ACCES = 13, /* v2 v3 v4 */
|
||||
NFSERR_EXIST = 17, /* v2 v3 v4 */
|
||||
NFSERR_XDEV = 18, /* v3 v4 */
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@
|
|||
* Emit the sbox as volatile const to prevent the compiler from doing
|
||||
* constant folding on sbox references involving fixed indexes.
|
||||
*/
|
||||
static volatile const u8 __cacheline_aligned aes_sbox[] = {
|
||||
static volatile const u8 ____cacheline_aligned aes_sbox[] = {
|
||||
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
|
||||
0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
|
||||
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
|
||||
|
|
@ -47,7 +47,7 @@ static volatile const u8 __cacheline_aligned aes_sbox[] = {
|
|||
0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16,
|
||||
};
|
||||
|
||||
static volatile const u8 __cacheline_aligned aes_inv_sbox[] = {
|
||||
static volatile const u8 ____cacheline_aligned aes_inv_sbox[] = {
|
||||
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38,
|
||||
0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
|
||||
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
|
||||
|
|
|
|||
126
mm/ksm.c
126
mm/ksm.c
|
|
@ -39,6 +39,7 @@
|
|||
#include <linux/freezer.h>
|
||||
#include <linux/oom.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/pagewalk.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
#include "internal.h"
|
||||
|
|
@ -2223,6 +2224,94 @@ static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot,
|
|||
return rmap_item;
|
||||
}
|
||||
|
||||
struct ksm_next_page_arg {
|
||||
struct folio *folio;
|
||||
struct page *page;
|
||||
unsigned long addr;
|
||||
};
|
||||
|
||||
static int ksm_next_page_pmd_entry(pmd_t *pmdp, unsigned long addr, unsigned long end,
|
||||
struct mm_walk *walk)
|
||||
{
|
||||
struct ksm_next_page_arg *private = walk->private;
|
||||
struct vm_area_struct *vma = walk->vma;
|
||||
pte_t *start_ptep = NULL, *ptep, pte;
|
||||
struct mm_struct *mm = walk->mm;
|
||||
struct folio *folio;
|
||||
struct page *page;
|
||||
spinlock_t *ptl;
|
||||
pmd_t pmd;
|
||||
|
||||
if (ksm_test_exit(mm))
|
||||
return 0;
|
||||
|
||||
cond_resched();
|
||||
|
||||
pmd = pmd_read_atomic(pmdp);
|
||||
if (!pmd_present(pmd))
|
||||
return 0;
|
||||
|
||||
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && pmd_leaf(pmd)) {
|
||||
ptl = pmd_lock(mm, pmdp);
|
||||
pmd = READ_ONCE(*pmdp);
|
||||
|
||||
if (!pmd_present(pmd)) {
|
||||
goto not_found_unlock;
|
||||
} else if (pmd_leaf(pmd)) {
|
||||
page = vm_normal_page_pmd(vma, addr, pmd);
|
||||
if (!page)
|
||||
goto not_found_unlock;
|
||||
folio = page_folio(page);
|
||||
|
||||
if (folio_is_zone_device(folio) || !folio_test_anon(folio))
|
||||
goto not_found_unlock;
|
||||
|
||||
page += ((addr & (PMD_SIZE - 1)) >> PAGE_SHIFT);
|
||||
goto found_unlock;
|
||||
}
|
||||
spin_unlock(ptl);
|
||||
}
|
||||
|
||||
start_ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
|
||||
if (!start_ptep)
|
||||
return 0;
|
||||
|
||||
for (ptep = start_ptep; addr < end; ptep++, addr += PAGE_SIZE) {
|
||||
pte = ptep_get(ptep);
|
||||
|
||||
if (!pte_present(pte))
|
||||
continue;
|
||||
|
||||
page = vm_normal_page(vma, addr, pte);
|
||||
if (!page)
|
||||
continue;
|
||||
folio = page_folio(page);
|
||||
|
||||
if (folio_is_zone_device(folio) || !folio_test_anon(folio))
|
||||
continue;
|
||||
goto found_unlock;
|
||||
}
|
||||
|
||||
not_found_unlock:
|
||||
spin_unlock(ptl);
|
||||
if (start_ptep)
|
||||
pte_unmap(start_ptep);
|
||||
return 0;
|
||||
found_unlock:
|
||||
folio_get(folio);
|
||||
spin_unlock(ptl);
|
||||
if (start_ptep)
|
||||
pte_unmap(start_ptep);
|
||||
private->page = page;
|
||||
private->folio = folio;
|
||||
private->addr = addr;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct mm_walk_ops ksm_next_page_ops = {
|
||||
.pmd_entry = ksm_next_page_pmd_entry,
|
||||
};
|
||||
|
||||
static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
|
|
@ -2307,32 +2396,43 @@ next_mm:
|
|||
ksm_scan.address = vma->vm_end;
|
||||
|
||||
while (ksm_scan.address < vma->vm_end) {
|
||||
struct ksm_next_page_arg ksm_next_page_arg;
|
||||
struct page *tmp_page = NULL;
|
||||
struct folio *folio;
|
||||
|
||||
if (ksm_test_exit(mm))
|
||||
break;
|
||||
*page = follow_page(vma, ksm_scan.address, FOLL_GET);
|
||||
if (IS_ERR_OR_NULL(*page)) {
|
||||
ksm_scan.address += PAGE_SIZE;
|
||||
cond_resched();
|
||||
continue;
|
||||
|
||||
int found;
|
||||
|
||||
found = walk_page_range_vma(vma, ksm_scan.address,
|
||||
vma->vm_end,
|
||||
&ksm_next_page_ops,
|
||||
&ksm_next_page_arg);
|
||||
|
||||
if (found > 0) {
|
||||
folio = ksm_next_page_arg.folio;
|
||||
tmp_page = ksm_next_page_arg.page;
|
||||
ksm_scan.address = ksm_next_page_arg.addr;
|
||||
} else {
|
||||
VM_WARN_ON_ONCE(found < 0);
|
||||
ksm_scan.address = vma->vm_end - PAGE_SIZE;
|
||||
}
|
||||
if (is_zone_device_page(*page))
|
||||
goto next_page;
|
||||
if (PageAnon(*page)) {
|
||||
flush_anon_page(vma, *page, ksm_scan.address);
|
||||
flush_dcache_page(*page);
|
||||
if (tmp_page) {
|
||||
flush_anon_page(vma, tmp_page, ksm_scan.address);
|
||||
flush_dcache_page(tmp_page);
|
||||
rmap_item = get_next_rmap_item(mm_slot,
|
||||
ksm_scan.rmap_list, ksm_scan.address);
|
||||
if (rmap_item) {
|
||||
ksm_scan.rmap_list =
|
||||
&rmap_item->rmap_list;
|
||||
ksm_scan.address += PAGE_SIZE;
|
||||
*page = tmp_page;
|
||||
} else
|
||||
put_page(*page);
|
||||
folio_put(folio);
|
||||
mmap_read_unlock(mm);
|
||||
return rmap_item;
|
||||
}
|
||||
next_page:
|
||||
put_page(*page);
|
||||
ksm_scan.address += PAGE_SIZE;
|
||||
cond_resched();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -517,6 +517,26 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
|
|||
return walk_pgd_range(start, end, &walk);
|
||||
}
|
||||
|
||||
int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end, const struct mm_walk_ops *ops,
|
||||
void *private)
|
||||
{
|
||||
struct mm_walk walk = {
|
||||
.ops = ops,
|
||||
.mm = vma->vm_mm,
|
||||
.vma = vma,
|
||||
.private = private,
|
||||
};
|
||||
|
||||
if (start >= end || !walk.mm)
|
||||
return -EINVAL;
|
||||
if (start < vma->vm_start || end > vma->vm_end)
|
||||
return -EINVAL;
|
||||
|
||||
mmap_assert_locked(walk.mm);
|
||||
return __walk_page_range(start, end, &walk);
|
||||
}
|
||||
|
||||
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
|
||||
void *private)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -768,7 +768,7 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
|
|||
void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
|
||||
void *data;
|
||||
|
||||
if (user_size < ETH_HLEN || user_size > PAGE_SIZE - headroom - tailroom)
|
||||
if (user_size > PAGE_SIZE - headroom - tailroom)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
size = SKB_DATA_ALIGN(size);
|
||||
|
|
@ -1097,6 +1097,9 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|||
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (size < ETH_HLEN)
|
||||
return -EINVAL;
|
||||
|
||||
data = bpf_test_init(kattr, kattr->test.data_size_in,
|
||||
size, NET_SKB_PAD + NET_IP_ALIGN,
|
||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
|
||||
|
|
@ -1277,9 +1280,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|||
{
|
||||
bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
|
||||
u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
u32 retval = 0, meta_sz = 0, duration, max_linear_sz, size;
|
||||
u32 linear_sz = kattr->test.data_size_in;
|
||||
u32 batch_size = kattr->test.batch_size;
|
||||
u32 retval = 0, duration, max_data_sz;
|
||||
u32 size = kattr->test.data_size_in;
|
||||
u32 headroom = XDP_PACKET_HEADROOM;
|
||||
u32 repeat = kattr->test.repeat;
|
||||
struct netdev_rx_queue *rxqueue;
|
||||
|
|
@ -1301,8 +1304,6 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|||
batch_size = NAPI_POLL_WEIGHT;
|
||||
else if (batch_size > TEST_XDP_MAX_BATCH)
|
||||
return -E2BIG;
|
||||
|
||||
headroom += sizeof(struct xdp_page_head);
|
||||
} else if (batch_size) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
@ -1313,39 +1314,55 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|||
|
||||
if (ctx) {
|
||||
/* There can't be user provided data before the meta data */
|
||||
if (ctx->data_meta || ctx->data_end != size ||
|
||||
if (ctx->data_meta || ctx->data_end > kattr->test.data_size_in ||
|
||||
ctx->data > ctx->data_end ||
|
||||
unlikely(xdp_metalen_invalid(ctx->data)) ||
|
||||
(do_live && (kattr->test.data_out || kattr->test.ctx_out)))
|
||||
goto free_ctx;
|
||||
/* Meta data is allocated from the headroom */
|
||||
headroom -= ctx->data;
|
||||
}
|
||||
|
||||
max_data_sz = 4096 - headroom - tailroom;
|
||||
if (size > max_data_sz) {
|
||||
/* disallow live data mode for jumbo frames */
|
||||
if (do_live)
|
||||
meta_sz = ctx->data;
|
||||
if (xdp_metalen_invalid(meta_sz) || meta_sz > headroom - sizeof(struct xdp_frame))
|
||||
goto free_ctx;
|
||||
size = max_data_sz;
|
||||
|
||||
/* Meta data is allocated from the headroom */
|
||||
headroom -= meta_sz;
|
||||
linear_sz = ctx->data_end;
|
||||
}
|
||||
|
||||
data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
|
||||
/* The xdp_page_head structure takes up space in each page, limiting the
|
||||
* size of the packet data; add the extra size to headroom here to make
|
||||
* sure it's accounted in the length checks below, but not in the
|
||||
* metadata size check above.
|
||||
*/
|
||||
if (do_live)
|
||||
headroom += sizeof(struct xdp_page_head);
|
||||
|
||||
max_linear_sz = PAGE_SIZE - headroom - tailroom;
|
||||
linear_sz = min_t(u32, linear_sz, max_linear_sz);
|
||||
|
||||
/* disallow live data mode for jumbo frames */
|
||||
if (do_live && kattr->test.data_size_in > linear_sz)
|
||||
goto free_ctx;
|
||||
|
||||
if (kattr->test.data_size_in - meta_sz < ETH_HLEN)
|
||||
goto free_ctx;
|
||||
|
||||
data = bpf_test_init(kattr, linear_sz, max_linear_sz, headroom, tailroom);
|
||||
if (IS_ERR(data)) {
|
||||
ret = PTR_ERR(data);
|
||||
goto free_ctx;
|
||||
}
|
||||
|
||||
rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
|
||||
rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
|
||||
rxqueue->xdp_rxq.frag_size = PAGE_SIZE;
|
||||
xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
|
||||
xdp_prepare_buff(&xdp, data, headroom, size, true);
|
||||
xdp_prepare_buff(&xdp, data, headroom, linear_sz, true);
|
||||
sinfo = xdp_get_shared_info_from_buff(&xdp);
|
||||
|
||||
ret = xdp_convert_md_to_buff(ctx, &xdp);
|
||||
if (ret)
|
||||
goto free_data;
|
||||
|
||||
size = linear_sz;
|
||||
if (unlikely(kattr->test.data_size_in > size)) {
|
||||
void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
|
||||
|
||||
|
|
@ -1356,13 +1373,13 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|||
|
||||
if (sinfo->nr_frags == MAX_SKB_FRAGS) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
goto out_put_dev;
|
||||
}
|
||||
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
goto out_put_dev;
|
||||
}
|
||||
|
||||
frag = &sinfo->frags[sinfo->nr_frags++];
|
||||
|
|
@ -1375,7 +1392,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|||
if (copy_from_user(page_address(page), data_in + size,
|
||||
data_len)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
goto out_put_dev;
|
||||
}
|
||||
sinfo->xdp_frags_size += data_len;
|
||||
size += data_len;
|
||||
|
|
@ -1390,6 +1407,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|||
ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
|
||||
else
|
||||
ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
|
||||
out_put_dev:
|
||||
/* We convert the xdp_buff back to an xdp_md before checking the return
|
||||
* code so the reference count of any held netdevice will be decremented
|
||||
* even if the test run failed.
|
||||
|
|
|
|||
|
|
@ -187,7 +187,6 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
|
|||
{
|
||||
struct metadata_dst *tunnel_dst;
|
||||
__be64 tunnel_id;
|
||||
int err;
|
||||
|
||||
if (!vlan)
|
||||
return 0;
|
||||
|
|
@ -197,9 +196,13 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
|
|||
return 0;
|
||||
|
||||
skb_dst_drop(skb);
|
||||
err = skb_vlan_pop(skb);
|
||||
if (err)
|
||||
return err;
|
||||
/* For 802.1ad (QinQ), skb_vlan_pop() incorrectly moves the C-VLAN
|
||||
* from payload to hwaccel after clearing S-VLAN. We only need to
|
||||
* clear the hwaccel S-VLAN; the C-VLAN must stay in payload for
|
||||
* correct VXLAN encapsulation. This is also correct for 802.1Q
|
||||
* where no C-VLAN exists in payload.
|
||||
*/
|
||||
__vlan_hwaccel_clear_tag(skb);
|
||||
|
||||
tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
|
||||
if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
|
||||
|
|
|
|||
|
|
@ -1571,6 +1571,8 @@ int j1939_session_activate(struct j1939_session *session)
|
|||
if (active) {
|
||||
j1939_session_put(active);
|
||||
ret = -EAGAIN;
|
||||
} else if (priv->ndev->reg_state != NETREG_REGISTERED) {
|
||||
ret = -ENODEV;
|
||||
} else {
|
||||
WARN_ON_ONCE(session->state != J1939_SESSION_NEW);
|
||||
list_add_tail(&session->active_session_list_entry,
|
||||
|
|
|
|||
|
|
@ -2187,7 +2187,9 @@ static int process_auth_done(struct ceph_connection *con, void *p, void *end)
|
|||
|
||||
ceph_decode_64_safe(&p, end, global_id, bad);
|
||||
ceph_decode_32_safe(&p, end, con->v2.con_mode, bad);
|
||||
|
||||
ceph_decode_32_safe(&p, end, payload_len, bad);
|
||||
ceph_decode_need(&p, end, payload_len, bad);
|
||||
|
||||
dout("%s con %p global_id %llu con_mode %d payload_len %d\n",
|
||||
__func__, con, global_id, con->v2.con_mode, payload_len);
|
||||
|
|
|
|||
|
|
@ -1417,7 +1417,7 @@ static int mon_handle_auth_done(struct ceph_connection *con,
|
|||
if (!ret)
|
||||
finish_hunting(monc);
|
||||
mutex_unlock(&monc->mutex);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mon_handle_auth_bad_method(struct ceph_connection *con,
|
||||
|
|
|
|||
|
|
@ -1529,6 +1529,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
|
|||
struct ceph_pg_pool_info *pi;
|
||||
struct ceph_pg pgid, last_pgid;
|
||||
struct ceph_osds up, acting;
|
||||
bool should_be_paused;
|
||||
bool is_read = t->flags & CEPH_OSD_FLAG_READ;
|
||||
bool is_write = t->flags & CEPH_OSD_FLAG_WRITE;
|
||||
bool force_resend = false;
|
||||
|
|
@ -1597,10 +1598,16 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
|
|||
&last_pgid))
|
||||
force_resend = true;
|
||||
|
||||
if (t->paused && !target_should_be_paused(osdc, t, pi)) {
|
||||
t->paused = false;
|
||||
should_be_paused = target_should_be_paused(osdc, t, pi);
|
||||
if (t->paused && !should_be_paused) {
|
||||
unpaused = true;
|
||||
}
|
||||
if (t->paused != should_be_paused) {
|
||||
dout("%s t %p paused %d -> %d\n", __func__, t, t->paused,
|
||||
should_be_paused);
|
||||
t->paused = should_be_paused;
|
||||
}
|
||||
|
||||
legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
|
||||
ceph_osds_changed(&t->acting, &acting,
|
||||
t->used_replica || any_change);
|
||||
|
|
|
|||
|
|
@ -241,22 +241,26 @@ static struct crush_choose_arg_map *alloc_choose_arg_map(void)
|
|||
|
||||
static void free_choose_arg_map(struct crush_choose_arg_map *arg_map)
|
||||
{
|
||||
if (arg_map) {
|
||||
int i, j;
|
||||
int i, j;
|
||||
|
||||
WARN_ON(!RB_EMPTY_NODE(&arg_map->node));
|
||||
if (!arg_map)
|
||||
return;
|
||||
|
||||
WARN_ON(!RB_EMPTY_NODE(&arg_map->node));
|
||||
|
||||
if (arg_map->args) {
|
||||
for (i = 0; i < arg_map->size; i++) {
|
||||
struct crush_choose_arg *arg = &arg_map->args[i];
|
||||
|
||||
for (j = 0; j < arg->weight_set_size; j++)
|
||||
kfree(arg->weight_set[j].weights);
|
||||
kfree(arg->weight_set);
|
||||
if (arg->weight_set) {
|
||||
for (j = 0; j < arg->weight_set_size; j++)
|
||||
kfree(arg->weight_set[j].weights);
|
||||
kfree(arg->weight_set);
|
||||
}
|
||||
kfree(arg->ids);
|
||||
}
|
||||
kfree(arg_map->args);
|
||||
kfree(arg_map);
|
||||
}
|
||||
kfree(arg_map);
|
||||
}
|
||||
|
||||
DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index,
|
||||
|
|
@ -1979,11 +1983,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, bool msgr2,
|
|||
sizeof(u64) + sizeof(u32), e_inval);
|
||||
ceph_decode_copy(p, &fsid, sizeof(fsid));
|
||||
epoch = ceph_decode_32(p);
|
||||
BUG_ON(epoch != map->epoch+1);
|
||||
ceph_decode_copy(p, &modified, sizeof(modified));
|
||||
new_pool_max = ceph_decode_64(p);
|
||||
new_flags = ceph_decode_32(p);
|
||||
|
||||
if (epoch != map->epoch + 1)
|
||||
goto e_inval;
|
||||
|
||||
/* full map? */
|
||||
ceph_decode_32_safe(p, end, len, e_inval);
|
||||
if (len > 0) {
|
||||
|
|
|
|||
|
|
@ -4045,12 +4045,14 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
|
|||
{
|
||||
struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
|
||||
unsigned int tnl_hlen = skb_tnl_header_len(skb);
|
||||
unsigned int delta_truesize = 0;
|
||||
unsigned int delta_len = 0;
|
||||
struct sk_buff *tail = NULL;
|
||||
struct sk_buff *nskb, *tmp;
|
||||
int len_diff, err;
|
||||
|
||||
/* Only skb_gro_receive_list generated skbs arrive here */
|
||||
DEBUG_NET_WARN_ON_ONCE(!(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST));
|
||||
|
||||
skb_push(skb, -skb_network_offset(skb) + offset);
|
||||
|
||||
/* Ensure the head is writeable before touching the shared info */
|
||||
|
|
@ -4064,8 +4066,9 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
|
|||
nskb = list_skb;
|
||||
list_skb = list_skb->next;
|
||||
|
||||
DEBUG_NET_WARN_ON_ONCE(nskb->sk);
|
||||
|
||||
err = 0;
|
||||
delta_truesize += nskb->truesize;
|
||||
if (skb_shared(nskb)) {
|
||||
tmp = skb_clone(nskb, GFP_ATOMIC);
|
||||
if (tmp) {
|
||||
|
|
@ -4108,7 +4111,6 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
|
|||
goto err_linearize;
|
||||
}
|
||||
|
||||
skb->truesize = skb->truesize - delta_truesize;
|
||||
skb->data_len = skb->data_len - delta_len;
|
||||
skb->len = skb->len - delta_len;
|
||||
|
||||
|
|
|
|||
|
|
@ -3614,7 +3614,7 @@ void sock_enable_timestamp(struct sock *sk, enum sock_flags flag)
|
|||
int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
|
||||
int level, int type)
|
||||
{
|
||||
struct sock_exterr_skb *serr;
|
||||
struct sock_extended_err ee;
|
||||
struct sk_buff *skb;
|
||||
int copied, err;
|
||||
|
||||
|
|
@ -3634,8 +3634,9 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
|
|||
|
||||
sock_recv_timestamp(msg, sk, skb);
|
||||
|
||||
serr = SKB_EXT_ERR(skb);
|
||||
put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
|
||||
/* We must use a bounce buffer for CONFIG_HARDENED_USERCOPY=y */
|
||||
ee = SKB_EXT_ERR(skb)->ee;
|
||||
put_cmsg(msg, level, type, sizeof(ee), &ee);
|
||||
|
||||
msg->msg_flags |= MSG_ERRQUEUE;
|
||||
err = copied;
|
||||
|
|
|
|||
|
|
@ -563,7 +563,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
|
|||
|
||||
skb_reserve(skb, hlen);
|
||||
skb_reset_network_header(skb);
|
||||
arp = skb_put(skb, arp_hdr_len(dev));
|
||||
skb_put(skb, arp_hdr_len(dev));
|
||||
skb->dev = dev;
|
||||
skb->protocol = htons(ETH_P_ARP);
|
||||
if (!src_hw)
|
||||
|
|
@ -571,12 +571,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
|
|||
if (!dest_hw)
|
||||
dest_hw = dev->broadcast;
|
||||
|
||||
/*
|
||||
* Fill the device header for the ARP frame
|
||||
/* Fill the device header for the ARP frame.
|
||||
* Note: skb->head can be changed.
|
||||
*/
|
||||
if (dev_hard_header(skb, dev, ptype, dest_hw, src_hw, skb->len) < 0)
|
||||
goto out;
|
||||
|
||||
arp = arp_hdr(skb);
|
||||
/*
|
||||
* Fill out the arp protocol part.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -423,15 +423,21 @@ int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
|
||||
struct net_device *dev, *indev = skb->dev;
|
||||
int ret_val;
|
||||
|
||||
rcu_read_lock();
|
||||
dev = skb_dst_dev_rcu(skb);
|
||||
|
||||
skb->dev = dev;
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
|
||||
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
|
||||
net, sk, skb, indev, dev,
|
||||
ip_finish_output,
|
||||
!(IPCB(skb)->flags & IPSKB_REROUTED));
|
||||
ret_val = NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
|
||||
net, sk, skb, indev, dev,
|
||||
ip_finish_output,
|
||||
!(IPCB(skb)->flags & IPSKB_REROUTED));
|
||||
rcu_read_unlock();
|
||||
return ret_val;
|
||||
}
|
||||
EXPORT_SYMBOL(ip_output);
|
||||
|
||||
|
|
|
|||
|
|
@ -848,10 +848,8 @@ out:
|
|||
out_free:
|
||||
if (free)
|
||||
kfree(ipc.opt);
|
||||
if (!err) {
|
||||
icmp_out_count(sock_net(sk), user_icmph.type);
|
||||
if (!err)
|
||||
return len;
|
||||
}
|
||||
return err;
|
||||
|
||||
do_confirm:
|
||||
|
|
|
|||
|
|
@ -229,6 +229,7 @@ static int __nf_conncount_add(struct net *net,
|
|||
|
||||
nf_ct_put(found_ct);
|
||||
}
|
||||
list->last_gc = (u32)jiffies;
|
||||
|
||||
add_new_node:
|
||||
if (WARN_ON_ONCE(list->count > INT_MAX)) {
|
||||
|
|
@ -248,7 +249,6 @@ add_new_node:
|
|||
conn->jiffies32 = (u32)jiffies;
|
||||
list_add_tail(&conn->node, &list->head);
|
||||
list->count++;
|
||||
list->last_gc = (u32)jiffies;
|
||||
|
||||
out_put:
|
||||
if (refcounted)
|
||||
|
|
|
|||
|
|
@ -3811,7 +3811,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
|
|||
|
||||
if (!nft_use_inc(&chain->use)) {
|
||||
err = -EMFILE;
|
||||
goto err_release_rule;
|
||||
goto err_destroy_flow;
|
||||
}
|
||||
|
||||
if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
|
||||
|
|
@ -3861,6 +3861,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
|
|||
|
||||
err_destroy_flow_rule:
|
||||
nft_use_dec_restore(&chain->use);
|
||||
err_destroy_flow:
|
||||
if (flow)
|
||||
nft_flow_rule_destroy(flow);
|
||||
err_release_rule:
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ static void nft_synproxy_eval_v4(const struct nft_synproxy *priv,
|
|||
struct tcphdr *_tcph,
|
||||
struct synproxy_options *opts)
|
||||
{
|
||||
struct nf_synproxy_info info = priv->info;
|
||||
struct nf_synproxy_info info = READ_ONCE(priv->info);
|
||||
struct net *net = nft_net(pkt);
|
||||
struct synproxy_net *snet = synproxy_pernet(net);
|
||||
struct sk_buff *skb = pkt->skb;
|
||||
|
|
@ -79,7 +79,7 @@ static void nft_synproxy_eval_v6(const struct nft_synproxy *priv,
|
|||
struct tcphdr *_tcph,
|
||||
struct synproxy_options *opts)
|
||||
{
|
||||
struct nf_synproxy_info info = priv->info;
|
||||
struct nf_synproxy_info info = READ_ONCE(priv->info);
|
||||
struct net *net = nft_net(pkt);
|
||||
struct synproxy_net *snet = synproxy_pernet(net);
|
||||
struct sk_buff *skb = pkt->skb;
|
||||
|
|
@ -340,7 +340,7 @@ static void nft_synproxy_obj_update(struct nft_object *obj,
|
|||
struct nft_synproxy *newpriv = nft_obj_data(newobj);
|
||||
struct nft_synproxy *priv = nft_obj_data(obj);
|
||||
|
||||
priv->info = newpriv->info;
|
||||
WRITE_ONCE(priv->info, newpriv->info);
|
||||
}
|
||||
|
||||
static struct nft_object_type nft_synproxy_obj_type;
|
||||
|
|
|
|||
|
|
@ -1483,7 +1483,7 @@ static void qfq_reset_qdisc(struct Qdisc *sch)
|
|||
|
||||
for (i = 0; i < q->clhash.hashsize; i++) {
|
||||
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
|
||||
if (cl->qdisc->q.qlen > 0)
|
||||
if (cl_is_active(cl))
|
||||
qfq_deactivate_class(q, cl);
|
||||
|
||||
qdisc_reset(cl->qdisc);
|
||||
|
|
|
|||
|
|
@ -125,17 +125,19 @@ static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
|
|||
/* We assume that the socket is already connected */
|
||||
static struct net_device *get_netdev_for_sock(struct sock *sk)
|
||||
{
|
||||
struct dst_entry *dst = sk_dst_get(sk);
|
||||
struct net_device *netdev = NULL;
|
||||
struct net_device *dev, *lowest_dev = NULL;
|
||||
struct dst_entry *dst;
|
||||
|
||||
if (likely(dst)) {
|
||||
netdev = netdev_sk_get_lowest_dev(dst->dev, sk);
|
||||
dev_hold(netdev);
|
||||
rcu_read_lock();
|
||||
dst = __sk_dst_get(sk);
|
||||
dev = dst ? dst_dev_rcu(dst) : NULL;
|
||||
if (likely(dev)) {
|
||||
lowest_dev = netdev_sk_get_lowest_dev(dev, sk);
|
||||
dev_hold(lowest_dev);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
dst_release(dst);
|
||||
|
||||
return netdev;
|
||||
return lowest_dev;
|
||||
}
|
||||
|
||||
static void destroy_record(struct tls_record_info *record)
|
||||
|
|
|
|||
|
|
@ -1084,6 +1084,10 @@ static int compat_standard_call(struct net_device *dev,
|
|||
return ioctl_standard_call(dev, iwr, cmd, info, handler);
|
||||
|
||||
iwp_compat = (struct compat_iw_point *) &iwr->u.data;
|
||||
|
||||
/* struct iw_point has a 32bit hole on 64bit arches. */
|
||||
memset(&iwp, 0, sizeof(iwp));
|
||||
|
||||
iwp.pointer = compat_ptr(iwp_compat->pointer);
|
||||
iwp.length = iwp_compat->length;
|
||||
iwp.flags = iwp_compat->flags;
|
||||
|
|
|
|||
|
|
@ -228,6 +228,10 @@ int compat_private_call(struct net_device *dev, struct iwreq *iwr,
|
|||
struct iw_point iwp;
|
||||
|
||||
iwp_compat = (struct compat_iw_point *) &iwr->u.data;
|
||||
|
||||
/* struct iw_point has a 32bit hole on 64bit arches. */
|
||||
memset(&iwp, 0, sizeof(iwp));
|
||||
|
||||
iwp.pointer = compat_ptr(iwp_compat->pointer);
|
||||
iwp.length = iwp_compat->length;
|
||||
iwp.flags = iwp_compat->flags;
|
||||
|
|
|
|||
|
|
@ -242,10 +242,9 @@ static ssize_t cold_reset_store(struct device *dev,
|
|||
{
|
||||
struct ac97_controller *ac97_ctrl;
|
||||
|
||||
mutex_lock(&ac97_controllers_mutex);
|
||||
guard(mutex)(&ac97_controllers_mutex);
|
||||
ac97_ctrl = to_ac97_controller(dev);
|
||||
ac97_ctrl->ops->reset(ac97_ctrl);
|
||||
mutex_unlock(&ac97_controllers_mutex);
|
||||
return len;
|
||||
}
|
||||
static DEVICE_ATTR_WO(cold_reset);
|
||||
|
|
@ -259,10 +258,9 @@ static ssize_t warm_reset_store(struct device *dev,
|
|||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
mutex_lock(&ac97_controllers_mutex);
|
||||
guard(mutex)(&ac97_controllers_mutex);
|
||||
ac97_ctrl = to_ac97_controller(dev);
|
||||
ac97_ctrl->ops->warm_reset(ac97_ctrl);
|
||||
mutex_unlock(&ac97_controllers_mutex);
|
||||
return len;
|
||||
}
|
||||
static DEVICE_ATTR_WO(warm_reset);
|
||||
|
|
@ -285,10 +283,10 @@ static const struct attribute_group *ac97_adapter_groups[] = {
|
|||
|
||||
static void ac97_del_adapter(struct ac97_controller *ac97_ctrl)
|
||||
{
|
||||
mutex_lock(&ac97_controllers_mutex);
|
||||
ac97_ctrl_codecs_unregister(ac97_ctrl);
|
||||
list_del(&ac97_ctrl->controllers);
|
||||
mutex_unlock(&ac97_controllers_mutex);
|
||||
scoped_guard(mutex, &ac97_controllers_mutex) {
|
||||
ac97_ctrl_codecs_unregister(ac97_ctrl);
|
||||
list_del(&ac97_ctrl->controllers);
|
||||
}
|
||||
|
||||
device_unregister(&ac97_ctrl->adap);
|
||||
}
|
||||
|
|
@ -301,6 +299,7 @@ static void ac97_adapter_release(struct device *dev)
|
|||
idr_remove(&ac97_adapter_idr, ac97_ctrl->nr);
|
||||
dev_dbg(&ac97_ctrl->adap, "adapter unregistered by %s\n",
|
||||
dev_name(ac97_ctrl->parent));
|
||||
kfree(ac97_ctrl);
|
||||
}
|
||||
|
||||
static const struct device_type ac97_adapter_type = {
|
||||
|
|
@ -312,7 +311,7 @@ static int ac97_add_adapter(struct ac97_controller *ac97_ctrl)
|
|||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&ac97_controllers_mutex);
|
||||
guard(mutex)(&ac97_controllers_mutex);
|
||||
ret = idr_alloc(&ac97_adapter_idr, ac97_ctrl, 0, 0, GFP_KERNEL);
|
||||
ac97_ctrl->nr = ret;
|
||||
if (ret >= 0) {
|
||||
|
|
@ -322,14 +321,14 @@ static int ac97_add_adapter(struct ac97_controller *ac97_ctrl)
|
|||
ret = device_register(&ac97_ctrl->adap);
|
||||
if (ret)
|
||||
put_device(&ac97_ctrl->adap);
|
||||
}
|
||||
if (!ret)
|
||||
list_add(&ac97_ctrl->controllers, &ac97_controllers);
|
||||
mutex_unlock(&ac97_controllers_mutex);
|
||||
} else
|
||||
kfree(ac97_ctrl);
|
||||
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
list_add(&ac97_ctrl->controllers, &ac97_controllers);
|
||||
dev_dbg(&ac97_ctrl->adap, "adapter registered by %s\n",
|
||||
dev_name(ac97_ctrl->parent));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -365,14 +364,11 @@ struct ac97_controller *snd_ac97_controller_register(
|
|||
ret = ac97_add_adapter(ac97_ctrl);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
return ERR_PTR(ret);
|
||||
ac97_bus_reset(ac97_ctrl);
|
||||
ac97_bus_scan(ac97_ctrl);
|
||||
|
||||
return ac97_ctrl;
|
||||
err:
|
||||
kfree(ac97_ctrl);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(snd_ac97_controller_register);
|
||||
|
||||
|
|
|
|||
|
|
@ -521,6 +521,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 C7UCX"),
|
||||
}
|
||||
},
|
||||
{
|
||||
.driver_data = &acp6x_card,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "HONOR"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "GOH-X"),
|
||||
}
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -979,6 +979,7 @@ static struct reg_default fsl_sai_reg_defaults_ofs0[] = {
|
|||
{FSL_SAI_TDR6, 0},
|
||||
{FSL_SAI_TDR7, 0},
|
||||
{FSL_SAI_TMR, 0},
|
||||
{FSL_SAI_TTCTL, 0},
|
||||
{FSL_SAI_RCR1(0), 0},
|
||||
{FSL_SAI_RCR2(0), 0},
|
||||
{FSL_SAI_RCR3(0), 0},
|
||||
|
|
@ -1002,12 +1003,14 @@ static struct reg_default fsl_sai_reg_defaults_ofs8[] = {
|
|||
{FSL_SAI_TDR6, 0},
|
||||
{FSL_SAI_TDR7, 0},
|
||||
{FSL_SAI_TMR, 0},
|
||||
{FSL_SAI_TTCTL, 0},
|
||||
{FSL_SAI_RCR1(8), 0},
|
||||
{FSL_SAI_RCR2(8), 0},
|
||||
{FSL_SAI_RCR3(8), 0},
|
||||
{FSL_SAI_RCR4(8), 0},
|
||||
{FSL_SAI_RCR5(8), 0},
|
||||
{FSL_SAI_RMR, 0},
|
||||
{FSL_SAI_RTCTL, 0},
|
||||
{FSL_SAI_MCTL, 0},
|
||||
{FSL_SAI_MDIV, 0},
|
||||
};
|
||||
|
|
|
|||
|
|
@ -37,21 +37,26 @@ static void test_xdp_adjust_tail_shrink(void)
|
|||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
static void test_xdp_adjust_tail_grow(void)
|
||||
static void test_xdp_adjust_tail_grow(bool is_64k_pagesize)
|
||||
{
|
||||
const char *file = "./test_xdp_adjust_tail_grow.bpf.o";
|
||||
struct bpf_object *obj;
|
||||
char buf[4096]; /* avoid segfault: large buf to hold grow results */
|
||||
char buf[8192]; /* avoid segfault: large buf to hold grow results */
|
||||
__u32 expect_sz;
|
||||
int err, prog_fd;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.data_out = buf,
|
||||
.data_size_out = sizeof(buf),
|
||||
.repeat = 1,
|
||||
);
|
||||
|
||||
/* topts.data_size_in as a special signal to bpf prog */
|
||||
if (is_64k_pagesize)
|
||||
topts.data_size_in = sizeof(pkt_v4) - 1;
|
||||
else
|
||||
topts.data_size_in = sizeof(pkt_v4);
|
||||
|
||||
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
|
||||
if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
|
||||
return;
|
||||
|
|
@ -201,7 +206,7 @@ out:
|
|||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
static void test_xdp_adjust_frags_tail_grow(void)
|
||||
static void test_xdp_adjust_frags_tail_grow_4k(void)
|
||||
{
|
||||
const char *file = "./test_xdp_adjust_tail_grow.bpf.o";
|
||||
__u32 exp_size;
|
||||
|
|
@ -266,16 +271,93 @@ out:
|
|||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
static void test_xdp_adjust_frags_tail_grow_64k(void)
|
||||
{
|
||||
const char *file = "./test_xdp_adjust_tail_grow.bpf.o";
|
||||
__u32 exp_size;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
int err, i, prog_fd;
|
||||
__u8 *buf;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts);
|
||||
|
||||
obj = bpf_object__open(file);
|
||||
if (libbpf_get_error(obj))
|
||||
return;
|
||||
|
||||
prog = bpf_object__next_program(obj, NULL);
|
||||
if (bpf_object__load(obj))
|
||||
goto out;
|
||||
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
|
||||
buf = malloc(262144);
|
||||
if (!ASSERT_OK_PTR(buf, "alloc buf 256Kb"))
|
||||
goto out;
|
||||
|
||||
/* Test case add 10 bytes to last frag */
|
||||
memset(buf, 1, 262144);
|
||||
exp_size = 90000 + 10;
|
||||
|
||||
topts.data_in = buf;
|
||||
topts.data_out = buf;
|
||||
topts.data_size_in = 90000;
|
||||
topts.data_size_out = 262144;
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
|
||||
ASSERT_OK(err, "90Kb+10b");
|
||||
ASSERT_EQ(topts.retval, XDP_TX, "90Kb+10b retval");
|
||||
ASSERT_EQ(topts.data_size_out, exp_size, "90Kb+10b size");
|
||||
|
||||
for (i = 0; i < 90000; i++) {
|
||||
if (buf[i] != 1)
|
||||
ASSERT_EQ(buf[i], 1, "90Kb+10b-old");
|
||||
}
|
||||
|
||||
for (i = 90000; i < 90010; i++) {
|
||||
if (buf[i] != 0)
|
||||
ASSERT_EQ(buf[i], 0, "90Kb+10b-new");
|
||||
}
|
||||
|
||||
for (i = 90010; i < 262144; i++) {
|
||||
if (buf[i] != 1)
|
||||
ASSERT_EQ(buf[i], 1, "90Kb+10b-untouched");
|
||||
}
|
||||
|
||||
/* Test a too large grow */
|
||||
memset(buf, 1, 262144);
|
||||
exp_size = 90001;
|
||||
|
||||
topts.data_in = topts.data_out = buf;
|
||||
topts.data_size_in = 90001;
|
||||
topts.data_size_out = 262144;
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
|
||||
ASSERT_OK(err, "90Kb+10b");
|
||||
ASSERT_EQ(topts.retval, XDP_DROP, "90Kb+10b retval");
|
||||
ASSERT_EQ(topts.data_size_out, exp_size, "90Kb+10b size");
|
||||
|
||||
free(buf);
|
||||
out:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
void test_xdp_adjust_tail(void)
|
||||
{
|
||||
int page_size = getpagesize();
|
||||
|
||||
if (test__start_subtest("xdp_adjust_tail_shrink"))
|
||||
test_xdp_adjust_tail_shrink();
|
||||
if (test__start_subtest("xdp_adjust_tail_grow"))
|
||||
test_xdp_adjust_tail_grow();
|
||||
test_xdp_adjust_tail_grow(page_size == 65536);
|
||||
if (test__start_subtest("xdp_adjust_tail_grow2"))
|
||||
test_xdp_adjust_tail_grow2();
|
||||
if (test__start_subtest("xdp_adjust_frags_tail_shrink"))
|
||||
test_xdp_adjust_frags_tail_shrink();
|
||||
if (test__start_subtest("xdp_adjust_frags_tail_grow"))
|
||||
test_xdp_adjust_frags_tail_grow();
|
||||
if (test__start_subtest("xdp_adjust_frags_tail_grow")) {
|
||||
if (page_size == 65536)
|
||||
test_xdp_adjust_frags_tail_grow_64k();
|
||||
else
|
||||
test_xdp_adjust_frags_tail_grow_4k();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -80,9 +80,7 @@ void test_xdp_context_test_run(void)
|
|||
/* Meta data must be 32 bytes or smaller */
|
||||
test_xdp_context_error(prog_fd, opts, 0, 36, sizeof(data), 0, 0, 0);
|
||||
|
||||
/* Total size of data must match data_end - data_meta */
|
||||
test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32),
|
||||
sizeof(data) - 1, 0, 0, 0);
|
||||
/* Total size of data must be data_end - data_meta or larger */
|
||||
test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32),
|
||||
sizeof(data) + 1, 0, 0, 0);
|
||||
|
||||
|
|
|
|||
|
|
@ -13,7 +13,9 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp)
|
|||
/* Data length determine test case */
|
||||
|
||||
if (data_len == 54) { /* sizeof(pkt_v4) */
|
||||
offset = 4096; /* test too large offset */
|
||||
offset = 4096; /* test too large offset, 4k page size */
|
||||
} else if (data_len == 53) { /* sizeof(pkt_v4) - 1 */
|
||||
offset = 65536; /* test too large offset, 64k page size */
|
||||
} else if (data_len == 74) { /* sizeof(pkt_v6) */
|
||||
offset = 40;
|
||||
} else if (data_len == 64) {
|
||||
|
|
@ -25,6 +27,10 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp)
|
|||
offset = 10;
|
||||
} else if (data_len == 9001) {
|
||||
offset = 4096;
|
||||
} else if (data_len == 90000) {
|
||||
offset = 10; /* test a small offset, 64k page size */
|
||||
} else if (data_len == 90001) {
|
||||
offset = 65536; /* test too large offset, 64k page size */
|
||||
} else {
|
||||
return XDP_ABORTED; /* No matching test */
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user