summaryrefslogtreecommitdiffstats
path: root/meta/recipes-kernel/linux/linux-omap-2.6.29/musb/0003-USB-musb-NAK-timeout-scheme-on-bulk-RX-endpoint.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta/recipes-kernel/linux/linux-omap-2.6.29/musb/0003-USB-musb-NAK-timeout-scheme-on-bulk-RX-endpoint.patch')
-rw-r--r--meta/recipes-kernel/linux/linux-omap-2.6.29/musb/0003-USB-musb-NAK-timeout-scheme-on-bulk-RX-endpoint.patch218
1 files changed, 218 insertions, 0 deletions
diff --git a/meta/recipes-kernel/linux/linux-omap-2.6.29/musb/0003-USB-musb-NAK-timeout-scheme-on-bulk-RX-endpoint.patch b/meta/recipes-kernel/linux/linux-omap-2.6.29/musb/0003-USB-musb-NAK-timeout-scheme-on-bulk-RX-endpoint.patch
new file mode 100644
index 0000000000..fadad9e44a
--- /dev/null
+++ b/meta/recipes-kernel/linux/linux-omap-2.6.29/musb/0003-USB-musb-NAK-timeout-scheme-on-bulk-RX-endpoint.patch
@@ -0,0 +1,218 @@
1From ba7b26e69f4bb41f10be444c5fded853330f82b5 Mon Sep 17 00:00:00 2001
2From: Ajay Kumar Gupta <ajay.gupta-l0cyMroinI0@public.gmane.org>
3Date: Tue, 24 Mar 2009 17:22:51 -0700
4Subject: [PATCH] USB: musb: NAK timeout scheme on bulk RX endpoint
5
6Fixes endpoint starvation issue when more than one bulk QH is
7multiplexed on the reserved bulk RX endpoint, which is normal
8for cases like serial and ethernet adapters.
9
10This patch sets the NAK timeout interval for such QHs, and when
11a timeout triggers the next QH will be scheduled. (This resembles
12the bulk scheduling done in hardware by EHCI, OHCI, and UHCI.)
13
14This scheme doesn't work for devices which are connected to a
15high to full speed tree (transaction translator) as there is
16no NAK timeout interrupt from the musb controller from such
17devices.
18
19Tested with PIO, Inventra DMA, CPPI DMA.
20
21[ dbrownell-Rn4VEauK+AKRv+LV9MX5uipxlwaOVQ5f@public.gmane.org: fold in start_urb() update;
22 clarify only for bulk RX; don't accidentally clear WZC bits ]
23
24Signed-off-by: Ajay Kumar Gupta <ajay.gupta-l0cyMroinI0@public.gmane.org>
25Cc: Felipe Balbi <felipe.balbi-xNZwKgViW5gAvxtiuMwx3w@public.gmane.org>
26Signed-off-by: David Brownell <dbrownell-Rn4VEauK+AKRv+LV9MX5uipxlwaOVQ5f@public.gmane.org>
27Signed-off-by: Greg Kroah-Hartman <gregkh-l3A5Bk7waGM@public.gmane.org>
28---
29 drivers/usb/musb/musb_host.c | 112 ++++++++++++++++++++++++++++++++----------
30 1 files changed, 85 insertions(+), 27 deletions(-)
31
32diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
33index 6dbbd07..bd1d5ae 100644
34--- a/drivers/usb/musb/musb_host.c
35+++ b/drivers/usb/musb/musb_host.c
36@@ -64,11 +64,8 @@
37 *
38 * - DMA (Mentor/OMAP) ...has at least toggle update problems
39 *
40- * - Still no traffic scheduling code to make NAKing for bulk or control
41- * transfers unable to starve other requests; or to make efficient use
42- * of hardware with periodic transfers. (Note that network drivers
43- * commonly post bulk reads that stay pending for a long time; these
44- * would make very visible trouble.)
45+ * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
46+ * starvation ... nothing yet for TX, interrupt, or bulk.
47 *
48 * - Not tested with HNP, but some SRP paths seem to behave.
49 *
50@@ -88,11 +85,8 @@
51 *
52 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
53 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
54- *
55 * (Yes, bulk _could_ use more of the endpoints than that, and would even
56- * benefit from it ... one remote device may easily be NAKing while others
57- * need to perform transfers in that same direction. The same thing could
58- * be done in software though, assuming dma cooperates.)
59+ * benefit from it.)
60 *
61 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
62 * So far that scheduling is both dumb and optimistic: the endpoint will be
63@@ -201,8 +195,9 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
64 len = urb->iso_frame_desc[0].length;
65 break;
66 default: /* bulk, interrupt */
67- buf = urb->transfer_buffer;
68- len = urb->transfer_buffer_length;
69+ /* actual_length may be nonzero on retry paths */
70+ buf = urb->transfer_buffer + urb->actual_length;
71+ len = urb->transfer_buffer_length - urb->actual_length;
72 }
73
74 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
75@@ -1045,7 +1040,8 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
76
77 /* NOTE: this code path would be a good place to PAUSE a
78 * control transfer, if another one is queued, so that
79- * ep0 is more likely to stay busy.
80+ * ep0 is more likely to stay busy. That's already done
81+ * for bulk RX transfers.
82 *
83 * if (qh->ring.next != &musb->control), then
84 * we have a candidate... NAKing is *NOT* an error
85@@ -1197,6 +1193,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
86 /* NOTE: this code path would be a good place to PAUSE a
87 * transfer, if there's some other (nonperiodic) tx urb
88 * that could use this fifo. (dma complicates it...)
89+ * That's already done for bulk RX transfers.
90 *
91 * if (bulk && qh->ring.next != &musb->out_bulk), then
92 * we have a candidate... NAKing is *NOT* an error
93@@ -1358,6 +1355,50 @@ finish:
94
95 #endif
96
97+/* Schedule next QH from musb->in_bulk and move the current qh to
98+ * the end; avoids starvation for other endpoints.
99+ */
100+static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
101+{
102+ struct dma_channel *dma;
103+ struct urb *urb;
104+ void __iomem *mbase = musb->mregs;
105+ void __iomem *epio = ep->regs;
106+ struct musb_qh *cur_qh, *next_qh;
107+ u16 rx_csr;
108+
109+ musb_ep_select(mbase, ep->epnum);
110+ dma = is_dma_capable() ? ep->rx_channel : NULL;
111+
112+ /* clear nak timeout bit */
113+ rx_csr = musb_readw(epio, MUSB_RXCSR);
114+ rx_csr |= MUSB_RXCSR_H_WZC_BITS;
115+ rx_csr &= ~MUSB_RXCSR_DATAERROR;
116+ musb_writew(epio, MUSB_RXCSR, rx_csr);
117+
118+ cur_qh = first_qh(&musb->in_bulk);
119+ if (cur_qh) {
120+ urb = next_urb(cur_qh);
121+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
122+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
123+ musb->dma_controller->channel_abort(dma);
124+ urb->actual_length += dma->actual_len;
125+ dma->actual_len = 0L;
126+ }
127+ musb_save_toggle(ep, 1, urb);
128+
129+ /* move cur_qh to end of queue */
130+ list_move_tail(&cur_qh->ring, &musb->in_bulk);
131+
132+ /* get the next qh from musb->in_bulk */
133+ next_qh = first_qh(&musb->in_bulk);
134+
135+ /* set rx_reinit and schedule the next qh */
136+ ep->rx_reinit = 1;
137+ musb_start_urb(musb, 1, next_qh);
138+ }
139+}
140+
141 /*
142 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
143 * and high-bandwidth IN transfer cases.
144@@ -1421,18 +1462,26 @@ void musb_host_rx(struct musb *musb, u8 epnum)
145 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
146
147 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
148- /* NOTE this code path would be a good place to PAUSE a
149- * transfer, if there's some other (nonperiodic) rx urb
150- * that could use this fifo. (dma complicates it...)
151+ DBG(6, "RX end %d NAK timeout\n", epnum);
152+
153+ /* NOTE: NAKing is *NOT* an error, so we want to
154+ * continue. Except ... if there's a request for
155+ * another QH, use that instead of starving it.
156 *
157- * if (bulk && qh->ring.next != &musb->in_bulk), then
158- * we have a candidate... NAKing is *NOT* an error
159+ * Devices like Ethernet and serial adapters keep
160+ * reads posted at all times, which will starve
161+ * other devices without this logic.
162 */
163- DBG(6, "RX end %d NAK timeout\n", epnum);
164+ if (usb_pipebulk(urb->pipe)
165+ && qh->mux == 1
166+ && !list_is_singular(&musb->in_bulk)) {
167+ musb_bulk_rx_nak_timeout(musb, hw_ep);
168+ return;
169+ }
170 musb_ep_select(mbase, epnum);
171- musb_writew(epio, MUSB_RXCSR,
172- MUSB_RXCSR_H_WZC_BITS
173- | MUSB_RXCSR_H_REQPKT);
174+ rx_csr |= MUSB_RXCSR_H_WZC_BITS;
175+ rx_csr &= ~MUSB_RXCSR_DATAERROR;
176+ musb_writew(epio, MUSB_RXCSR, rx_csr);
177
178 goto finish;
179 } else {
180@@ -1756,6 +1805,17 @@ static int musb_schedule(
181 head = &musb->in_bulk;
182 else
183 head = &musb->out_bulk;
184+
185+ /* Enable bulk RX NAK timeout scheme when bulk requests are
186+ * multiplexed. This scheme doen't work in high speed to full
187+ * speed scenario as NAK interrupts are not coming from a
188+ * full speed device connected to a high speed device.
189+ * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
190+ * 4 (8 frame or 8ms) for FS device.
191+ */
192+ if (is_in && qh->dev)
193+ qh->intv_reg =
194+ (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
195 goto success;
196 } else if (best_end < 0) {
197 return -ENOSPC;
198@@ -1888,13 +1948,11 @@ static int musb_urb_enqueue(
199 *
200 * The downside of disabling this is that transfer scheduling
201 * gets VERY unfair for nonperiodic transfers; a misbehaving
202- * peripheral could make that hurt. Or for reads, one that's
203- * perfectly normal: network and other drivers keep reads
204- * posted at all times, having one pending for a week should
205- * be perfectly safe.
206+ * peripheral could make that hurt. That's perfectly normal
207+ * for reads from network or serial adapters ... so we have
208+ * partial NAKlimit support for bulk RX.
209 *
210- * The upside of disabling it is avoidng transfer scheduling
211- * code to put this aside for while.
212+ * The upside of disabling it is simpler transfer scheduling.
213 */
214 interval = 0;
215 }
216--
2171.6.0.4
218