Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Merge in late fixes to prepare for the 6.16 net-next PR.

No conflicts nor adjacent changes.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Paolo Abeni 2025-05-28 10:03:53 +02:00
commit f6bd8faeb1
24 changed files with 210 additions and 61 deletions

View File

@ -2883,7 +2883,15 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth,
if (err)
return err;
return register_netdev(dev);
err = register_netdev(dev);
if (err)
goto free_metadata_dst;
return 0;
free_metadata_dst:
airoha_metadata_dst_free(port);
return err;
}
static int airoha_probe(struct platform_device *pdev)

View File

@ -5283,7 +5283,11 @@ static int macb_probe(struct platform_device *pdev)
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
if (err) {
dev_err(&pdev->dev, "failed to set DMA mask\n");
goto err_out_free_netdev;
}
bp->hw_dma_cap |= HW_DMA_CAP_64B;
}
#endif

View File

@ -143,6 +143,8 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
mutex_unlock(&rvu->mbox_lock);
return 0;

View File

@ -272,6 +272,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
mutex_unlock(&rvu->mbox_lock);
} while (pfmap);
}

View File

@ -60,6 +60,8 @@ static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event)
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
mutex_unlock(&rvu->mbox_lock);
return 0;
}

View File

@ -1638,6 +1638,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
if (!node->is_static)
dwrr_del_node = true;
WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
/* destroy the leaf node */
otx2_qos_disable_sq(pfvf, qid);
otx2_qos_destroy_node(pfvf, node);
@ -1682,9 +1683,6 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
}
kfree(new_cfg);
/* update tx_real_queues */
otx2_qos_update_tx_netdev_queues(pfvf);
return 0;
}

View File

@ -256,6 +256,26 @@ out:
return err;
}
static int otx2_qos_nix_npa_ndc_sync(struct otx2_nic *pfvf)
{
struct ndc_sync_op *req;
int rc;
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_ndc_sync_op(&pfvf->mbox);
if (!req) {
mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
req->nix_lf_tx_sync = true;
req->npa_lf_sync = true;
rc = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
return rc;
}
void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
{
struct otx2_qset *qset = &pfvf->qset;
@ -285,6 +305,8 @@ void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
otx2_qos_sqb_flush(pfvf, sq_idx);
otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx));
/* NIX/NPA NDC sync */
otx2_qos_nix_npa_ndc_sync(pfvf);
otx2_cleanup_tx_cqes(pfvf, cq);
mutex_lock(&pfvf->mbox.lock);

View File

@ -465,19 +465,22 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
mlx5_query_nic_vport_context(mdev, 0, out);
err = mlx5_query_nic_vport_context(mdev, 0, out);
if (err)
goto out;
*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.node_guid);
out:
kvfree(out);
return 0;
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
@ -519,19 +522,22 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
mlx5_query_nic_vport_context(mdev, 0, out);
err = mlx5_query_nic_vport_context(mdev, 0, out);
if (err)
goto out;
*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.qkey_violation_counter);
out:
kvfree(out);
return 0;
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);

View File

@ -1330,7 +1330,7 @@ static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
}
/* PHY */
static int lan743x_phy_reset(struct lan743x_adapter *adapter)
static int lan743x_hw_reset_phy(struct lan743x_adapter *adapter)
{
u32 data;
@ -1346,11 +1346,6 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter)
50000, 1000000);
}
static int lan743x_phy_init(struct lan743x_adapter *adapter)
{
return lan743x_phy_reset(adapter);
}
static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
{
u32 id_rev;
@ -3534,10 +3529,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
if (ret)
return ret;
ret = lan743x_phy_init(adapter);
if (ret)
return ret;
ret = lan743x_ptp_init(adapter);
if (ret)
return ret;
@ -3674,6 +3665,10 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
if (ret)
goto cleanup_pci;
ret = lan743x_hw_reset_phy(adapter);
if (ret)
goto cleanup_pci;
ret = lan743x_hardware_init(adapter, pdev);
if (ret)
goto cleanup_pci;

View File

@ -353,6 +353,11 @@ static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op)
lan966x_ifh_set(ifh, rew_op, IFH_POS_REW_CMD, IFH_WID_REW_CMD);
}
static void lan966x_ifh_set_oam_type(void *ifh, u64 oam_type)
{
lan966x_ifh_set(ifh, oam_type, IFH_POS_PDU_TYPE, IFH_WID_PDU_TYPE);
}
static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
{
lan966x_ifh_set(ifh, timestamp, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP);
@ -380,6 +385,7 @@ static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb,
return err;
lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op);
lan966x_ifh_set_oam_type(ifh, LAN966X_SKB_CB(skb)->pdu_type);
lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id);
}

View File

@ -75,6 +75,10 @@
#define IFH_REW_OP_ONE_STEP_PTP 0x3
#define IFH_REW_OP_TWO_STEP_PTP 0x4
#define IFH_PDU_TYPE_NONE 0
#define IFH_PDU_TYPE_IPV4 7
#define IFH_PDU_TYPE_IPV6 8
#define FDMA_RX_DCB_MAX_DBS 1
#define FDMA_TX_DCB_MAX_DBS 1
@ -254,6 +258,7 @@ struct lan966x_phc {
struct lan966x_skb_cb {
u8 rew_op;
u8 pdu_type;
u16 ts_id;
unsigned long jiffies;
};

View File

@ -322,34 +322,55 @@ void lan966x_ptp_hwtstamp_get(struct lan966x_port *port,
*cfg = phc->hwtstamp_config;
}
static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb)
static void lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb,
u8 *rew_op, u8 *pdu_type)
{
struct ptp_header *header;
u8 msgtype;
int type;
if (port->ptp_tx_cmd == IFH_REW_OP_NOOP)
return IFH_REW_OP_NOOP;
if (port->ptp_tx_cmd == IFH_REW_OP_NOOP) {
*rew_op = IFH_REW_OP_NOOP;
*pdu_type = IFH_PDU_TYPE_NONE;
return;
}
type = ptp_classify_raw(skb);
if (type == PTP_CLASS_NONE)
return IFH_REW_OP_NOOP;
if (type == PTP_CLASS_NONE) {
*rew_op = IFH_REW_OP_NOOP;
*pdu_type = IFH_PDU_TYPE_NONE;
return;
}
header = ptp_parse_header(skb, type);
if (!header)
return IFH_REW_OP_NOOP;
if (!header) {
*rew_op = IFH_REW_OP_NOOP;
*pdu_type = IFH_PDU_TYPE_NONE;
return;
}
if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP)
return IFH_REW_OP_TWO_STEP_PTP;
if (type & PTP_CLASS_L2)
*pdu_type = IFH_PDU_TYPE_NONE;
if (type & PTP_CLASS_IPV4)
*pdu_type = IFH_PDU_TYPE_IPV4;
if (type & PTP_CLASS_IPV6)
*pdu_type = IFH_PDU_TYPE_IPV6;
if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP) {
*rew_op = IFH_REW_OP_TWO_STEP_PTP;
return;
}
/* If it is sync and run 1 step then set the correct operation,
* otherwise run as 2 step
*/
msgtype = ptp_get_msgtype(header, type);
if ((msgtype & 0xf) == 0)
return IFH_REW_OP_ONE_STEP_PTP;
if ((msgtype & 0xf) == 0) {
*rew_op = IFH_REW_OP_ONE_STEP_PTP;
return;
}
return IFH_REW_OP_TWO_STEP_PTP;
*rew_op = IFH_REW_OP_TWO_STEP_PTP;
}
static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port)
@ -374,10 +395,12 @@ int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
{
struct lan966x *lan966x = port->lan966x;
unsigned long flags;
u8 pdu_type;
u8 rew_op;
rew_op = lan966x_ptp_classify(port, skb);
lan966x_ptp_classify(port, skb, &rew_op, &pdu_type);
LAN966X_SKB_CB(skb)->rew_op = rew_op;
LAN966X_SKB_CB(skb)->pdu_type = pdu_type;
if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
return 0;

View File

@ -880,7 +880,7 @@ static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
dev_consume_skb_any(skbuf_dma->skb);
netif_txq_completed_wake(txq, 1, len,
CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
2 * MAX_SKB_FRAGS);
2);
}
/**
@ -914,7 +914,7 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
dma_dev = lp->tx_chan->device;
sg_len = skb_shinfo(skb)->nr_frags + 1;
if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) {
netif_stop_queue(ndev);
if (net_ratelimit())
netdev_warn(ndev, "TX ring unexpectedly full\n");
@ -964,7 +964,7 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
txq = skb_get_tx_queue(lp->ndev, skb);
netdev_tx_sent_queue(txq, skb->len);
netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
1, 2);
dmaengine_submit(dma_tx_desc);
dma_async_issue_pending(lp->tx_chan);

View File

@ -257,6 +257,8 @@ static int mctp_usb_open(struct net_device *dev)
WRITE_ONCE(mctp_usb->stopped, false);
netif_start_queue(dev);
return mctp_usb_rx_queue(mctp_usb, GFP_KERNEL);
}

View File

@ -946,7 +946,9 @@ static int vsc85xx_ip1_conf(struct phy_device *phydev, enum ts_blk blk,
/* UDP checksum offset in IPv4 packet
* according to: https://tools.ietf.org/html/rfc768
*/
val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26) | IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26);
if (enable)
val |= IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
val);
@ -1166,18 +1168,24 @@ static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts,
container_of(mii_ts, struct vsc8531_private, mii_ts);
if (!vsc8531->ptp->configured)
return;
goto out;
if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF) {
kfree_skb(skb);
return;
}
if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF)
goto out;
if (vsc8531->ptp->tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
if (ptp_msg_is_sync(skb, type))
goto out;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
mutex_lock(&vsc8531->ts_lock);
__skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
mutex_unlock(&vsc8531->ts_lock);
return;
out:
kfree_skb(skb);
}
static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,

View File

@ -1749,8 +1749,10 @@ void phy_detach(struct phy_device *phydev)
struct module *ndev_owner = NULL;
struct mii_bus *bus;
if (phydev->devlink)
if (phydev->devlink) {
device_link_del(phydev->devlink);
phydev->devlink = NULL;
}
if (phydev->sysfs_links) {
if (dev)

View File

@ -140,6 +140,7 @@ struct virtio_vsock_sock {
u32 last_fwd_cnt;
u32 rx_bytes;
u32 buf_alloc;
u32 buf_used;
struct sk_buff_head rx_queue;
u32 msg_count;
};

View File

@ -1165,6 +1165,9 @@ int netlbl_conn_setattr(struct sock *sk,
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
if (sk->sk_family != AF_INET6)
return -EAFNOSUPPORT;
addr6 = (struct sockaddr_in6 *)addr;
entry = netlbl_domhsh_getentry_af6(secattr->domain,
&addr6->sin6_addr);

View File

@ -788,7 +788,7 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
memset(&key->ipv4, 0, sizeof(key->ipv4));
}
} else if (eth_p_mpls(key->eth.type)) {
u8 label_count = 1;
size_t label_count = 1;
memset(&key->mpls, 0, sizeof(key->mpls));
skb_set_inner_network_header(skb, skb->mac_len);

View File

@ -3713,15 +3713,15 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
}
static void packet_dev_mclist_delete(struct net_device *dev,
struct packet_mclist **mlp)
struct packet_mclist **mlp,
struct list_head *list)
{
struct packet_mclist *ml;
while ((ml = *mlp) != NULL) {
if (ml->ifindex == dev->ifindex) {
packet_dev_mc(dev, ml, -1);
list_add(&ml->remove_list, list);
*mlp = ml->next;
kfree(ml);
} else
mlp = &ml->next;
}
@ -3769,6 +3769,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
memcpy(i->addr, mreq->mr_address, i->alen);
memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
i->count = 1;
INIT_LIST_HEAD(&i->remove_list);
i->next = po->mclist;
po->mclist = i;
err = packet_dev_mc(dev, i, 1);
@ -4233,9 +4234,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
static int packet_notifier(struct notifier_block *this,
unsigned long msg, void *ptr)
{
struct sock *sk;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
struct packet_mclist *ml, *tmp;
LIST_HEAD(mclist);
struct sock *sk;
rcu_read_lock();
sk_for_each_rcu(sk, &net->packet.sklist) {
@ -4244,7 +4247,8 @@ static int packet_notifier(struct notifier_block *this,
switch (msg) {
case NETDEV_UNREGISTER:
if (po->mclist)
packet_dev_mclist_delete(dev, &po->mclist);
packet_dev_mclist_delete(dev, &po->mclist,
&mclist);
fallthrough;
case NETDEV_DOWN:
@ -4277,6 +4281,13 @@ static int packet_notifier(struct notifier_block *this,
}
}
rcu_read_unlock();
/* packet_dev_mc might grab instance locks so can't run under rcu */
list_for_each_entry_safe(ml, tmp, &mclist, remove_list) {
packet_dev_mc(dev, ml, -1);
kfree(ml);
}
return NOTIFY_DONE;
}

View File

@ -11,6 +11,7 @@ struct packet_mclist {
unsigned short type;
unsigned short alen;
unsigned char addr[MAX_ADDR_LEN];
struct list_head remove_list;
};
/* kbdq - kernel block descriptor queue */

View File

@ -175,6 +175,11 @@ struct hfsc_sched {
#define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
static bool cl_in_el_or_vttree(struct hfsc_class *cl)
{
return ((cl->cl_flags & HFSC_FSC) && cl->cl_nactive) ||
((cl->cl_flags & HFSC_RSC) && !RB_EMPTY_NODE(&cl->el_node));
}
/*
* eligible tree holds backlogged classes being sorted by their eligible times.
@ -1040,6 +1045,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl == NULL)
return -ENOBUFS;
RB_CLEAR_NODE(&cl->el_node);
err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
if (err) {
kfree(cl);
@ -1572,7 +1579,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
sch->qstats.backlog += len;
sch->q.qlen++;
if (first && !cl->cl_nactive) {
if (first && !cl_in_el_or_vttree(cl)) {
if (cl->cl_flags & HFSC_RSC)
init_ed(cl, len);
if (cl->cl_flags & HFSC_FSC)

View File

@ -440,18 +440,20 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
u32 len)
{
if (vvs->rx_bytes + len > vvs->buf_alloc)
if (vvs->buf_used + len > vvs->buf_alloc)
return false;
vvs->rx_bytes += len;
vvs->buf_used += len;
return true;
}
static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
u32 len)
u32 bytes_read, u32 bytes_dequeued)
{
vvs->rx_bytes -= len;
vvs->fwd_cnt += len;
vvs->rx_bytes -= bytes_read;
vvs->buf_used -= bytes_dequeued;
vvs->fwd_cnt += bytes_dequeued;
}
void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb)
@ -580,11 +582,11 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
size_t len)
{
struct virtio_vsock_sock *vvs = vsk->trans;
size_t bytes, total = 0;
struct sk_buff *skb;
u32 fwd_cnt_delta;
bool low_rx_bytes;
int err = -EFAULT;
size_t total = 0;
u32 free_space;
spin_lock_bh(&vvs->rx_lock);
@ -596,6 +598,8 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
}
while (total < len && !skb_queue_empty(&vvs->rx_queue)) {
size_t bytes, dequeued = 0;
skb = skb_peek(&vvs->rx_queue);
bytes = min_t(size_t, len - total,
@ -619,12 +623,12 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
VIRTIO_VSOCK_SKB_CB(skb)->offset += bytes;
if (skb->len == VIRTIO_VSOCK_SKB_CB(skb)->offset) {
u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
virtio_transport_dec_rx_pkt(vvs, pkt_len);
dequeued = le32_to_cpu(virtio_vsock_hdr(skb)->len);
__skb_unlink(skb, &vvs->rx_queue);
consume_skb(skb);
}
virtio_transport_dec_rx_pkt(vvs, bytes, dequeued);
}
fwd_cnt_delta = vvs->fwd_cnt - vvs->last_fwd_cnt;
@ -780,7 +784,7 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
msg->msg_flags |= MSG_EOR;
}
virtio_transport_dec_rx_pkt(vvs, pkt_len);
virtio_transport_dec_rx_pkt(vvs, pkt_len, pkt_len);
kfree_skb(skb);
}
@ -1717,6 +1721,7 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
struct sock *sk = sk_vsock(vsk);
struct virtio_vsock_hdr *hdr;
struct sk_buff *skb;
u32 pkt_len;
int off = 0;
int err;
@ -1734,7 +1739,8 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)
vvs->msg_count--;
virtio_transport_dec_rx_pkt(vvs, le32_to_cpu(hdr->len));
pkt_len = le32_to_cpu(hdr->len);
virtio_transport_dec_rx_pkt(vvs, pkt_len, pkt_len);
spin_unlock_bh(&vvs->rx_lock);
virtio_transport_send_credit_update(vsk);

View File

@ -600,5 +600,40 @@
"matchPattern": "qdisc hfsc",
"matchCount": "1",
"teardown": ["$TC qdisc del dev $DEV1 root handle 1: drr"]
},
{
"id": "309e",
"name": "Test HFSC eltree double add with reentrant enqueue behaviour on netem",
"category": [
"qdisc",
"hfsc"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$IP link set dev $DUMMY up || true",
"$IP addr add 10.10.11.10/24 dev $DUMMY || true",
"$TC qdisc add dev $DUMMY root handle 1: tbf rate 8bit burst 100b latency 1s",
"$TC qdisc add dev $DUMMY parent 1:0 handle 2:0 hfsc",
"ping -I $DUMMY -f -c10 -s48 -W0.001 10.10.11.1 || true",
"$TC class add dev $DUMMY parent 2:0 classid 2:1 hfsc rt m2 20Kbit",
"$TC qdisc add dev $DUMMY parent 2:1 handle 3:0 netem duplicate 100%",
"$TC class add dev $DUMMY parent 2:0 classid 2:2 hfsc rt m2 20Kbit",
"$TC filter add dev $DUMMY parent 2:0 protocol ip prio 1 u32 match ip dst 10.10.11.2/32 flowid 2:1",
"$TC filter add dev $DUMMY parent 2:0 protocol ip prio 2 u32 match ip dst 10.10.11.3/32 flowid 2:2",
"ping -c 1 10.10.11.2 -I$DUMMY > /dev/null || true",
"$TC filter del dev $DUMMY parent 2:0 protocol ip prio 1",
"$TC class del dev $DUMMY classid 2:1",
"ping -c 1 10.10.11.3 -I$DUMMY > /dev/null || true"
],
"cmdUnderTest": "$TC class change dev $DUMMY parent 2:0 classid 2:2 hfsc sc m2 20Kbit",
"expExitCode": "0",
"verifyCmd": "$TC -j class ls dev $DUMMY classid 2:1",
"matchJSON": [],
"teardown": [
"$TC qdisc del dev $DUMMY handle 1:0 root",
"$IP addr del 10.10.10.10/24 dev $DUMMY || true"
]
}
]