usb: xhci: remove option to change a default ring's TRB cycle bit

[ Upstream commit e1b0fa8639 ]

The TRB cycle bit indicates TRB ownership by the Host Controller (HC) or
Host Controller Driver (HCD). New rings are initialized with 'cycle_state'
equal to one, and all its TRBs' cycle bits are set to zero. When handling
ring expansion, set the source ring cycle bits to the same value as the
destination ring.

Move the cycle bit setting from xhci_segment_alloc() to xhci_link_rings(),
and remove the 'cycle_state' argument from xhci_initialize_ring_info().
The xhci_segment_alloc() function uses kzalloc_node() to allocate segments,
ensuring that all TRB cycle bits are initialized to zero.

Signed-off-by: Niklas Neronin <niklas.neronin@linux.intel.com>
Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
Link: https://lore.kernel.org/r/20241106101459.775897-12-mathias.nyman@linux.intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Stable-dep-of: a5c98e8b1398 ("xhci: dbc: Fix full DbC transfer ring after several reconnects")
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Niklas Neronin 2025-09-17 08:39:07 -04:00 committed by Greg Kroah-Hartman
parent e5051c0559
commit 9b28ef1e4c
4 changed files with 27 additions and 33 deletions

View File

@ -471,7 +471,7 @@ xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK)); trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
} }
INIT_LIST_HEAD(&ring->td_list); INIT_LIST_HEAD(&ring->td_list);
xhci_initialize_ring_info(ring, 1); xhci_initialize_ring_info(ring);
return ring; return ring;
dma_fail: dma_fail:
kfree(seg); kfree(seg);

View File

@ -27,14 +27,12 @@
* "All components of all Command and Transfer TRBs shall be initialized to '0'" * "All components of all Command and Transfer TRBs shall be initialized to '0'"
*/ */
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
unsigned int cycle_state,
unsigned int max_packet, unsigned int max_packet,
unsigned int num, unsigned int num,
gfp_t flags) gfp_t flags)
{ {
struct xhci_segment *seg; struct xhci_segment *seg;
dma_addr_t dma; dma_addr_t dma;
int i;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev; struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev)); seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
@ -56,11 +54,6 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
return NULL; return NULL;
} }
} }
/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
if (cycle_state == 0) {
for (i = 0; i < TRBS_PER_SEGMENT; i++)
seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE);
}
seg->num = num; seg->num = num;
seg->dma = dma; seg->dma = dma;
seg->next = NULL; seg->next = NULL;
@ -138,6 +131,14 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
chain_links = xhci_link_chain_quirk(xhci, ring->type); chain_links = xhci_link_chain_quirk(xhci, ring->type);
/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
if (ring->cycle_state == 0) {
xhci_for_each_ring_seg(ring->first_seg, seg) {
for (int i = 0; i < TRBS_PER_SEGMENT; i++)
seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
}
}
next = ring->enq_seg->next; next = ring->enq_seg->next;
xhci_link_segments(ring->enq_seg, first, ring->type, chain_links); xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
xhci_link_segments(last, next, ring->type, chain_links); xhci_link_segments(last, next, ring->type, chain_links);
@ -287,8 +288,7 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
kfree(ring); kfree(ring);
} }
void xhci_initialize_ring_info(struct xhci_ring *ring, void xhci_initialize_ring_info(struct xhci_ring *ring)
unsigned int cycle_state)
{ {
/* The ring is empty, so the enqueue pointer == dequeue pointer */ /* The ring is empty, so the enqueue pointer == dequeue pointer */
ring->enqueue = ring->first_seg->trbs; ring->enqueue = ring->first_seg->trbs;
@ -302,7 +302,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring,
* New rings are initialized with cycle state equal to 1; if we are * New rings are initialized with cycle state equal to 1; if we are
* handling ring expansion, set the cycle state equal to the old ring. * handling ring expansion, set the cycle state equal to the old ring.
*/ */
ring->cycle_state = cycle_state; ring->cycle_state = 1;
/* /*
* Each segment has a link TRB, and leave an extra TRB for SW * Each segment has a link TRB, and leave an extra TRB for SW
@ -317,7 +317,6 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment **first, struct xhci_segment **first,
struct xhci_segment **last, struct xhci_segment **last,
unsigned int num_segs, unsigned int num_segs,
unsigned int cycle_state,
enum xhci_ring_type type, enum xhci_ring_type type,
unsigned int max_packet, unsigned int max_packet,
gfp_t flags) gfp_t flags)
@ -328,7 +327,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
chain_links = xhci_link_chain_quirk(xhci, type); chain_links = xhci_link_chain_quirk(xhci, type);
prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags); prev = xhci_segment_alloc(xhci, max_packet, num, flags);
if (!prev) if (!prev)
return -ENOMEM; return -ENOMEM;
num++; num++;
@ -337,8 +336,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
while (num < num_segs) { while (num < num_segs) {
struct xhci_segment *next; struct xhci_segment *next;
next = xhci_segment_alloc(xhci, cycle_state, max_packet, num, next = xhci_segment_alloc(xhci, max_packet, num, flags);
flags);
if (!next) if (!next)
goto free_segments; goto free_segments;
@ -363,9 +361,8 @@ free_segments:
* Set the end flag and the cycle toggle bit on the last segment. * Set the end flag and the cycle toggle bit on the last segment.
* See section 4.9.1 and figures 15 and 16. * See section 4.9.1 and figures 15 and 16.
*/ */
struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
unsigned int num_segs, unsigned int cycle_state, enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
{ {
struct xhci_ring *ring; struct xhci_ring *ring;
int ret; int ret;
@ -383,7 +380,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
return ring; return ring;
ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, &ring->last_seg, num_segs, ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, &ring->last_seg, num_segs,
cycle_state, type, max_packet, flags); type, max_packet, flags);
if (ret) if (ret)
goto fail; goto fail;
@ -393,7 +390,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
cpu_to_le32(LINK_TOGGLE); cpu_to_le32(LINK_TOGGLE);
} }
xhci_initialize_ring_info(ring, cycle_state); xhci_initialize_ring_info(ring);
trace_xhci_ring_alloc(ring); trace_xhci_ring_alloc(ring);
return ring; return ring;
@ -421,8 +418,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *last; struct xhci_segment *last;
int ret; int ret;
ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->cycle_state, ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->type,
ring->type, ring->bounce_buf_len, flags); ring->bounce_buf_len, flags);
if (ret) if (ret)
return -ENOMEM; return -ENOMEM;
@ -632,8 +629,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] = stream_info->stream_rings[cur_stream] =
xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet, xhci_ring_alloc(xhci, 2, TYPE_STREAM, max_packet, mem_flags);
mem_flags);
cur_ring = stream_info->stream_rings[cur_stream]; cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring) if (!cur_ring)
goto cleanup_rings; goto cleanup_rings;
@ -974,7 +970,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
} }
/* Allocate endpoint 0 ring */ /* Allocate endpoint 0 ring */
dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags); dev->eps[0].ring = xhci_ring_alloc(xhci, 2, TYPE_CTRL, 0, flags);
if (!dev->eps[0].ring) if (!dev->eps[0].ring)
goto fail; goto fail;
@ -1457,7 +1453,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
/* Set up the endpoint ring */ /* Set up the endpoint ring */
virt_dev->eps[ep_index].new_ring = virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags); xhci_ring_alloc(xhci, 2, ring_type, max_packet, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) if (!virt_dev->eps[ep_index].new_ring)
return -ENOMEM; return -ENOMEM;
@ -2266,7 +2262,7 @@ xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags)
if (!ir) if (!ir)
return NULL; return NULL;
ir->event_ring = xhci_ring_alloc(xhci, segs, 1, TYPE_EVENT, 0, flags); ir->event_ring = xhci_ring_alloc(xhci, segs, TYPE_EVENT, 0, flags);
if (!ir->event_ring) { if (!ir->event_ring) {
xhci_warn(xhci, "Failed to allocate interrupter event ring\n"); xhci_warn(xhci, "Failed to allocate interrupter event ring\n");
kfree(ir); kfree(ir);
@ -2472,7 +2468,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
goto fail; goto fail;
/* Set up the command ring to have one segments for now. */ /* Set up the command ring to have one segments for now. */
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags); xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags);
if (!xhci->cmd_ring) if (!xhci->cmd_ring)
goto fail; goto fail;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, xhci_dbg_trace(xhci, trace_xhci_dbg_init,

View File

@ -769,7 +769,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE); seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE);
} }
xhci_initialize_ring_info(ring, 1); xhci_initialize_ring_info(ring);
/* /*
* Reset the hardware dequeue pointer. * Reset the hardware dequeue pointer.
* Yes, this will need to be re-written after resume, but we're paranoid * Yes, this will need to be re-written after resume, but we're paranoid

View File

@ -1803,14 +1803,12 @@ void xhci_slot_copy(struct xhci_hcd *xhci,
int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
struct usb_device *udev, struct usb_host_endpoint *ep, struct usb_device *udev, struct usb_host_endpoint *ep,
gfp_t mem_flags); gfp_t mem_flags);
struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
unsigned int num_segs, unsigned int cycle_state,
enum xhci_ring_type type, unsigned int max_packet, gfp_t flags); enum xhci_ring_type type, unsigned int max_packet, gfp_t flags);
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs, gfp_t flags); unsigned int num_trbs, gfp_t flags);
void xhci_initialize_ring_info(struct xhci_ring *ring, void xhci_initialize_ring_info(struct xhci_ring *ring);
unsigned int cycle_state);
void xhci_free_endpoint_ring(struct xhci_hcd *xhci, void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev, struct xhci_virt_device *virt_dev,
unsigned int ep_index); unsigned int ep_index);