summaryrefslogtreecommitdiffstats
path: root/recipes-kernel/linux/linux-hierofalcon/319-Hierofalcon-Update-xgbe-drivers-for-B0-board.patch
diff options
context:
space:
mode:
authorTudor Florea <tudor.florea@enea.com>2015-10-08 22:42:49 +0200
committerTudor Florea <tudor.florea@enea.com>2015-10-08 22:42:49 +0200
commit635d320abfa6dc3c0e1d00e3ceae567dd0e55a5b (patch)
treedcd42fafb9189d3be13ef3d95f9ce6f4f5cfa267 /recipes-kernel/linux/linux-hierofalcon/319-Hierofalcon-Update-xgbe-drivers-for-B0-board.patch
downloadmeta-hierofalcon-635d320abfa6dc3c0e1d00e3ceae567dd0e55a5b.tar.gz
initial commit for Enea Linux 5.0 arm
Signed-off-by: Tudor Florea <tudor.florea@enea.com>
Diffstat (limited to 'recipes-kernel/linux/linux-hierofalcon/319-Hierofalcon-Update-xgbe-drivers-for-B0-board.patch')
-rw-r--r--recipes-kernel/linux/linux-hierofalcon/319-Hierofalcon-Update-xgbe-drivers-for-B0-board.patch3557
1 files changed, 3557 insertions, 0 deletions
diff --git a/recipes-kernel/linux/linux-hierofalcon/319-Hierofalcon-Update-xgbe-drivers-for-B0-board.patch b/recipes-kernel/linux/linux-hierofalcon/319-Hierofalcon-Update-xgbe-drivers-for-B0-board.patch
new file mode 100644
index 0000000..6c45b11
--- /dev/null
+++ b/recipes-kernel/linux/linux-hierofalcon/319-Hierofalcon-Update-xgbe-drivers-for-B0-board.patch
@@ -0,0 +1,3557 @@
1From 675ffdbcc905bc44a9fef9a7f6569493a3a8efe1 Mon Sep 17 00:00:00 2001
2From: Adrian Calianu <adrian.calianu@enea.com>
3Date: Fri, 28 Aug 2015 17:35:57 +0200
4Subject: [PATCH] Hierofalcon: Update xgbe drivers for B0 board
5
6Port ethernet drivers for AMD xgbe from 4.1 kernel to 3.19
7in order to have ethernet working on B0 board
8
9Signed-off-by: Adrian Calianu <adrian.calianu@enea.com>
10---
11 drivers/net/ethernet/amd/xgbe/xgbe-common.h | 2 +
12 drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c | 2 +-
13 drivers/net/ethernet/amd/xgbe/xgbe-desc.c | 34 +-
14 drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 147 +++-
15 drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 309 +++----
16 drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 29 +-
17 drivers/net/ethernet/amd/xgbe/xgbe-main.c | 207 ++++-
18 drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 29 +-
19 drivers/net/ethernet/amd/xgbe/xgbe-ptp.c | 21 +-
20 drivers/net/ethernet/amd/xgbe/xgbe.h | 46 +-
21 drivers/net/phy/amd-xgbe-phy.c | 1142 ++++++++++++++++++--------
22 include/linux/clocksource.h | 9 +
23 12 files changed, 1284 insertions(+), 693 deletions(-)
24
25diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
26index 29a0927..34c28aa 100644
27--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
28+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
29@@ -365,6 +365,8 @@
30 #define MAC_HWF0R_TXCOESEL_WIDTH 1
31 #define MAC_HWF0R_VLHASH_INDEX 4
32 #define MAC_HWF0R_VLHASH_WIDTH 1
33+#define MAC_HWF1R_ADDR64_INDEX 14
34+#define MAC_HWF1R_ADDR64_WIDTH 2
35 #define MAC_HWF1R_ADVTHWORD_INDEX 13
36 #define MAC_HWF1R_ADVTHWORD_WIDTH 1
37 #define MAC_HWF1R_DBGMEMA_INDEX 19
38diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
39index 76479d0..2c063b6 100644
40--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
41+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
42@@ -328,7 +328,7 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
43
44 buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
45 pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
46- if (pdata->xgbe_debugfs == NULL) {
47+ if (!pdata->xgbe_debugfs) {
48 netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
49 return;
50 }
51diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
52index a50891f..5c92fb7 100644
53--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
54+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
55@@ -263,7 +263,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
56 int ret;
57
58 /* Try to obtain pages, decreasing order if necessary */
59- gfp |= __GFP_COLD | __GFP_COMP;
60+ gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
61 while (order >= 0) {
62 pages = alloc_pages(gfp, order);
63 if (pages)
64@@ -422,7 +422,6 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
65
66 ring->cur = 0;
67 ring->dirty = 0;
68- memset(&ring->rx, 0, sizeof(ring->rx));
69
70 hw_if->rx_desc_init(channel);
71 }
72@@ -621,35 +620,6 @@ err_out:
73 return 0;
74 }
75
76-static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
77-{
78- struct xgbe_prv_data *pdata = channel->pdata;
79- struct xgbe_hw_if *hw_if = &pdata->hw_if;
80- struct xgbe_ring *ring = channel->rx_ring;
81- struct xgbe_ring_data *rdata;
82- int i;
83-
84- DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
85- ring->rx.realloc_index);
86-
87- for (i = 0; i < ring->dirty; i++) {
88- rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
89-
90- /* Reset rdata values */
91- xgbe_unmap_rdata(pdata, rdata);
92-
93- if (xgbe_map_rx_buffer(pdata, ring, rdata))
94- break;
95-
96- hw_if->rx_desc_reset(rdata);
97-
98- ring->rx.realloc_index++;
99- }
100- ring->dirty = 0;
101-
102- DBGPR("<--xgbe_realloc_rx_buffer\n");
103-}
104-
105 void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
106 {
107 DBGPR("-->xgbe_init_function_ptrs_desc\n");
108@@ -657,7 +627,7 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
109 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
110 desc_if->free_ring_resources = xgbe_free_ring_resources;
111 desc_if->map_tx_skb = xgbe_map_tx_skb;
112- desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
113+ desc_if->map_rx_buffer = xgbe_map_rx_buffer;
114 desc_if->unmap_rdata = xgbe_unmap_rdata;
115 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
116 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
117diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
118index 4c66cd1..21d9497 100644
119--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
120+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
121@@ -115,6 +115,7 @@
122 */
123
124 #include <linux/phy.h>
125+#include <linux/mdio.h>
126 #include <linux/clk.h>
127 #include <linux/bitrev.h>
128 #include <linux/crc32.h>
129@@ -130,7 +131,7 @@ static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
130
131 DBGPR("-->xgbe_usec_to_riwt\n");
132
133- rate = clk_get_rate(pdata->sysclk);
134+ rate = pdata->sysclk_rate;
135
136 /*
137 * Convert the input usec value to the watchdog timer value. Each
138@@ -153,7 +154,7 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
139
140 DBGPR("-->xgbe_riwt_to_usec\n");
141
142- rate = clk_get_rate(pdata->sysclk);
143+ rate = pdata->sysclk_rate;
144
145 /*
146 * Convert the input watchdog timer value to the usec value. Each
147@@ -673,6 +674,9 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
148
149 static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
150 {
151+ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3)
152+ return 0;
153+
154 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
155
156 return 0;
157@@ -680,6 +684,9 @@ static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
158
159 static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
160 {
161+ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2)
162+ return 0;
163+
164 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
165
166 return 0;
167@@ -687,6 +694,9 @@ static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
168
169 static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
170 {
171+ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0)
172+ return 0;
173+
174 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
175
176 return 0;
177@@ -843,6 +853,22 @@ static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
178 return 0;
179 }
180
181+static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
182+{
183+ struct net_device *netdev = pdata->netdev;
184+ unsigned int pr_mode, am_mode;
185+
186+ pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
187+ am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
188+
189+ xgbe_set_promiscuous_mode(pdata, pr_mode);
190+ xgbe_set_all_multicast_mode(pdata, am_mode);
191+
192+ xgbe_add_mac_addresses(pdata);
193+
194+ return 0;
195+}
196+
197 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
198 int mmd_reg)
199 {
200@@ -881,6 +907,23 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
201 else
202 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
203
204+ /* If the PCS is changing modes, match the MAC speed to it */
205+ if (((mmd_address >> 16) == MDIO_MMD_PCS) &&
206+ ((mmd_address & 0xffff) == MDIO_CTRL2)) {
207+ struct phy_device *phydev = pdata->phydev;
208+
209+ if (mmd_data & MDIO_PCS_CTRL2_TYPE) {
210+ /* KX mode */
211+ if (phydev->supported & SUPPORTED_1000baseKX_Full)
212+ xgbe_set_gmii_speed(pdata);
213+ else
214+ xgbe_set_gmii_2500_speed(pdata);
215+ } else {
216+ /* KR mode */
217+ xgbe_set_xgmii_speed(pdata);
218+ }
219+ }
220+
221 /* The PCS registers are accessed using mmio. The underlying APB3
222 * management interface uses indirect addressing to access the MMD
223 * register sets. This requires accessing of the PCS register in two
224@@ -1041,7 +1084,7 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
225 rdesc->desc3 = 0;
226
227 /* Make sure ownership is written to the descriptor */
228- wmb();
229+ dma_wmb();
230 }
231
232 static void xgbe_tx_desc_init(struct xgbe_channel *channel)
233@@ -1074,9 +1117,24 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
234 DBGPR("<--tx_desc_init\n");
235 }
236
237-static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
238+static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
239+ struct xgbe_ring_data *rdata, unsigned int index)
240 {
241 struct xgbe_ring_desc *rdesc = rdata->rdesc;
242+ unsigned int rx_usecs = pdata->rx_usecs;
243+ unsigned int rx_frames = pdata->rx_frames;
244+ unsigned int inte;
245+
246+ if (!rx_usecs && !rx_frames) {
247+ /* No coalescing, interrupt for every descriptor */
248+ inte = 1;
249+ } else {
250+ /* Set interrupt based on Rx frame coalescing setting */
251+ if (rx_frames && !((index + 1) % rx_frames))
252+ inte = 1;
253+ else
254+ inte = 0;
255+ }
256
257 /* Reset the Rx descriptor
258 * Set buffer 1 (lo) address to header dma address (lo)
259@@ -1090,19 +1148,18 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
260 rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
261 rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
262
263- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
264- rdata->interrupt ? 1 : 0);
265+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
266
267 /* Since the Rx DMA engine is likely running, make sure everything
268 * is written to the descriptor(s) before setting the OWN bit
269 * for the descriptor
270 */
271- wmb();
272+ dma_wmb();
273
274 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
275
276 /* Make sure ownership is written to the descriptor */
277- wmb();
278+ dma_wmb();
279 }
280
281 static void xgbe_rx_desc_init(struct xgbe_channel *channel)
282@@ -1111,26 +1168,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
283 struct xgbe_ring *ring = channel->rx_ring;
284 struct xgbe_ring_data *rdata;
285 unsigned int start_index = ring->cur;
286- unsigned int rx_coalesce, rx_frames;
287 unsigned int i;
288
289 DBGPR("-->rx_desc_init\n");
290
291- rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0;
292- rx_frames = pdata->rx_frames;
293-
294 /* Initialize all descriptors */
295 for (i = 0; i < ring->rdesc_count; i++) {
296 rdata = XGBE_GET_DESC_DATA(ring, i);
297
298- /* Set interrupt on completion bit as appropriate */
299- if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
300- rdata->interrupt = 0;
301- else
302- rdata->interrupt = 1;
303-
304 /* Initialize Rx descriptor */
305- xgbe_rx_desc_reset(rdata);
306+ xgbe_rx_desc_reset(pdata, rdata, i);
307 }
308
309 /* Update the total number of Rx descriptors */
310@@ -1331,18 +1378,20 @@ static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
311 struct xgbe_prv_data *pdata = channel->pdata;
312 struct xgbe_ring_data *rdata;
313
314+ /* Make sure everything is written before the register write */
315+ wmb();
316+
317 /* Issue a poll command to Tx DMA by writing address
318 * of next immediate free descriptor */
319 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
320 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
321 lower_32_bits(rdata->rdesc_dma));
322
323- /* Start the Tx coalescing timer */
324+ /* Start the Tx timer */
325 if (pdata->tx_usecs && !channel->tx_timer_active) {
326 channel->tx_timer_active = 1;
327- hrtimer_start(&channel->tx_timer,
328- ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
329- HRTIMER_MODE_REL);
330+ mod_timer(&channel->tx_timer,
331+ jiffies + usecs_to_jiffies(pdata->tx_usecs));
332 }
333
334 ring->tx.xmit_more = 0;
335@@ -1359,6 +1408,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
336 unsigned int tso_context, vlan_context;
337 unsigned int tx_set_ic;
338 int start_index = ring->cur;
339+ int cur_index = ring->cur;
340 int i;
341
342 DBGPR("-->xgbe_dev_xmit\n");
343@@ -1401,7 +1451,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
344 else
345 tx_set_ic = 0;
346
347- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
348+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
349 rdesc = rdata->rdesc;
350
351 /* Create a context descriptor if this is a TSO packet */
352@@ -1444,8 +1494,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
353 ring->tx.cur_vlan_ctag = packet->vlan_ctag;
354 }
355
356- ring->cur++;
357- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
358+ cur_index++;
359+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
360 rdesc = rdata->rdesc;
361 }
362
363@@ -1473,7 +1523,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
364 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
365
366 /* Set OWN bit if not the first descriptor */
367- if (ring->cur != start_index)
368+ if (cur_index != start_index)
369 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
370
371 if (tso) {
372@@ -1497,9 +1547,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
373 packet->length);
374 }
375
376- for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
377- ring->cur++;
378- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
379+ for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
380+ cur_index++;
381+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
382 rdesc = rdata->rdesc;
383
384 /* Update buffer address */
385@@ -1537,7 +1587,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
386 * is written to the descriptor(s) before setting the OWN bit
387 * for the first descriptor
388 */
389- wmb();
390+ dma_wmb();
391
392 /* Set OWN bit for the first descriptor */
393 rdata = XGBE_GET_DESC_DATA(ring, start_index);
394@@ -1549,9 +1599,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
395 #endif
396
397 /* Make sure ownership is written to the descriptor */
398- wmb();
399+ dma_wmb();
400
401- ring->cur++;
402+ ring->cur = cur_index + 1;
403 if (!packet->skb->xmit_more ||
404 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
405 channel->queue_index)))
406@@ -1585,7 +1635,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
407 return 1;
408
409 /* Make sure descriptor fields are read after reading the OWN bit */
410- rmb();
411+ dma_rmb();
412
413 #ifdef XGMAC_ENABLE_RX_DESC_DUMP
414 xgbe_dump_rx_desc(ring, rdesc, ring->cur);
415@@ -1976,7 +2026,8 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
416 for (i = 0; i < pdata->tx_q_count; i++)
417 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
418
419- netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
420+ netdev_notice(pdata->netdev,
421+ "%d Tx hardware queues, %d byte fifo per queue\n",
422 pdata->tx_q_count, ((fifo_size + 1) * 256));
423 }
424
425@@ -1991,7 +2042,8 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
426 for (i = 0; i < pdata->rx_q_count; i++)
427 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
428
429- netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
430+ netdev_notice(pdata->netdev,
431+ "%d Rx hardware queues, %d byte fifo per queue\n",
432 pdata->rx_q_count, ((fifo_size + 1) * 256));
433 }
434
435@@ -2107,6 +2159,23 @@ static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
436 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
437 }
438
439+static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
440+{
441+ switch (pdata->phy_speed) {
442+ case SPEED_10000:
443+ xgbe_set_xgmii_speed(pdata);
444+ break;
445+
446+ case SPEED_2500:
447+ xgbe_set_gmii_2500_speed(pdata);
448+ break;
449+
450+ case SPEED_1000:
451+ xgbe_set_gmii_speed(pdata);
452+ break;
453+ }
454+}
455+
456 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
457 {
458 if (pdata->netdev->features & NETIF_F_RXCSUM)
459@@ -2755,8 +2824,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
460 * Initialize MAC related features
461 */
462 xgbe_config_mac_address(pdata);
463+ xgbe_config_rx_mode(pdata);
464 xgbe_config_jumbo_enable(pdata);
465 xgbe_config_flow_control(pdata);
466+ xgbe_config_mac_speed(pdata);
467 xgbe_config_checksum_offload(pdata);
468 xgbe_config_vlan_support(pdata);
469 xgbe_config_mmc(pdata);
470@@ -2773,10 +2844,8 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
471
472 hw_if->tx_complete = xgbe_tx_complete;
473
474- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
475- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
476- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
477 hw_if->set_mac_address = xgbe_set_mac_address;
478+ hw_if->config_rx_mode = xgbe_config_rx_mode;
479
480 hw_if->enable_rx_csum = xgbe_enable_rx_csum;
481 hw_if->disable_rx_csum = xgbe_disable_rx_csum;
482diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
483index e5ffb2c..343bf6a 100644
484--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
485+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
486@@ -129,7 +129,6 @@
487
488 static int xgbe_one_poll(struct napi_struct *, int);
489 static int xgbe_all_poll(struct napi_struct *, int);
490-static void xgbe_set_rx_mode(struct net_device *);
491
492 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
493 {
494@@ -225,6 +224,11 @@ static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
495 return (ring->rdesc_count - (ring->cur - ring->dirty));
496 }
497
498+static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
499+{
500+ return (ring->cur - ring->dirty);
501+}
502+
503 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
504 struct xgbe_ring *ring, unsigned int count)
505 {
506@@ -337,12 +341,13 @@ static irqreturn_t xgbe_isr(int irq, void *data)
507 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
508 DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
509
510- /* If we get a TI or RI interrupt that means per channel DMA
511- * interrupts are not enabled, so we use the private data napi
512- * structure, not the per channel napi structure
513+ /* The TI or RI interrupt bits may still be set even if using
514+ * per channel DMA interrupts. Check to be sure those are not
515+ * enabled before using the private data napi structure.
516 */
517- if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
518- XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
519+ if (!pdata->per_channel_irq &&
520+ (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
521+ XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
522 if (napi_schedule_prep(&pdata->napi)) {
523 /* Disable Tx and Rx interrupts */
524 xgbe_disable_rx_tx_ints(pdata);
525@@ -405,26 +410,20 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data)
526 return IRQ_HANDLED;
527 }
528
529-static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
530+static void xgbe_tx_timer(unsigned long data)
531 {
532- struct xgbe_channel *channel = container_of(timer,
533- struct xgbe_channel,
534- tx_timer);
535- struct xgbe_ring *ring = channel->tx_ring;
536+ struct xgbe_channel *channel = (struct xgbe_channel *)data;
537 struct xgbe_prv_data *pdata = channel->pdata;
538 struct napi_struct *napi;
539- unsigned long flags;
540
541 DBGPR("-->xgbe_tx_timer\n");
542
543 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
544
545- spin_lock_irqsave(&ring->lock, flags);
546-
547 if (napi_schedule_prep(napi)) {
548 /* Disable Tx and Rx interrupts */
549 if (pdata->per_channel_irq)
550- disable_irq(channel->dma_irq);
551+ disable_irq_nosync(channel->dma_irq);
552 else
553 xgbe_disable_rx_tx_ints(pdata);
554
555@@ -434,11 +433,7 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
556
557 channel->tx_timer_active = 0;
558
559- spin_unlock_irqrestore(&ring->lock, flags);
560-
561 DBGPR("<--xgbe_tx_timer\n");
562-
563- return HRTIMER_NORESTART;
564 }
565
566 static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
567@@ -454,9 +449,8 @@ static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
568 break;
569
570 DBGPR(" %s adding tx timer\n", channel->name);
571- hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
572- HRTIMER_MODE_REL);
573- channel->tx_timer.function = xgbe_tx_timer;
574+ setup_timer(&channel->tx_timer, xgbe_tx_timer,
575+ (unsigned long)channel);
576 }
577
578 DBGPR("<--xgbe_init_tx_timers\n");
579@@ -475,8 +469,7 @@ static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
580 break;
581
582 DBGPR(" %s deleting tx timer\n", channel->name);
583- channel->tx_timer_active = 0;
584- hrtimer_cancel(&channel->tx_timer);
585+ del_timer_sync(&channel->tx_timer);
586 }
587
588 DBGPR("<--xgbe_stop_tx_timers\n");
589@@ -519,6 +512,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
590 RXFIFOSIZE);
591 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
592 TXFIFOSIZE);
593+ hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
594 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
595 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
596 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
597@@ -553,6 +547,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
598 break;
599 }
600
601+ /* Translate the address width setting into actual number */
602+ switch (hw_feat->dma_width) {
603+ case 0:
604+ hw_feat->dma_width = 32;
605+ break;
606+ case 1:
607+ hw_feat->dma_width = 40;
608+ break;
609+ case 2:
610+ hw_feat->dma_width = 48;
611+ break;
612+ default:
613+ hw_feat->dma_width = 32;
614+ }
615+
616 /* The Queue, Channel and TC counts are zero based so increment them
617 * to get the actual number
618 */
619@@ -609,6 +618,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
620 }
621 }
622
623+static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
624+{
625+ struct xgbe_channel *channel;
626+ struct net_device *netdev = pdata->netdev;
627+ unsigned int i;
628+ int ret;
629+
630+ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
631+ netdev->name, pdata);
632+ if (ret) {
633+ netdev_alert(netdev, "error requesting irq %d\n",
634+ pdata->dev_irq);
635+ return ret;
636+ }
637+
638+ if (!pdata->per_channel_irq)
639+ return 0;
640+
641+ channel = pdata->channel;
642+ for (i = 0; i < pdata->channel_count; i++, channel++) {
643+ snprintf(channel->dma_irq_name,
644+ sizeof(channel->dma_irq_name) - 1,
645+ "%s-TxRx-%u", netdev_name(netdev),
646+ channel->queue_index);
647+
648+ ret = devm_request_irq(pdata->dev, channel->dma_irq,
649+ xgbe_dma_isr, 0,
650+ channel->dma_irq_name, channel);
651+ if (ret) {
652+ netdev_alert(netdev, "error requesting irq %d\n",
653+ channel->dma_irq);
654+ goto err_irq;
655+ }
656+ }
657+
658+ return 0;
659+
660+err_irq:
661+ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
662+ for (i--, channel--; i < pdata->channel_count; i--, channel--)
663+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
664+
665+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
666+
667+ return ret;
668+}
669+
670+static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
671+{
672+ struct xgbe_channel *channel;
673+ unsigned int i;
674+
675+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
676+
677+ if (!pdata->per_channel_irq)
678+ return;
679+
680+ channel = pdata->channel;
681+ for (i = 0; i < pdata->channel_count; i++, channel++)
682+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
683+}
684+
685 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
686 {
687 struct xgbe_hw_if *hw_if = &pdata->hw_if;
688@@ -630,6 +701,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
689 DBGPR("-->xgbe_init_rx_coalesce\n");
690
691 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
692+ pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
693 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
694
695 hw_if->config_rx_coalesce(pdata);
696@@ -694,7 +766,7 @@ static void xgbe_adjust_link(struct net_device *netdev)
697 struct phy_device *phydev = pdata->phydev;
698 int new_state = 0;
699
700- if (phydev == NULL)
701+ if (!phydev)
702 return;
703
704 if (phydev->link) {
705@@ -810,20 +882,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
706 return -EINVAL;
707 }
708
709- phy_stop(pdata->phydev);
710-
711 spin_lock_irqsave(&pdata->lock, flags);
712
713 if (caller == XGMAC_DRIVER_CONTEXT)
714 netif_device_detach(netdev);
715
716 netif_tx_stop_all_queues(netdev);
717- xgbe_napi_disable(pdata, 0);
718
719- /* Powerdown Tx/Rx */
720 hw_if->powerdown_tx(pdata);
721 hw_if->powerdown_rx(pdata);
722
723+ xgbe_napi_disable(pdata, 0);
724+
725+ phy_stop(pdata->phydev);
726+
727 pdata->power_down = 1;
728
729 spin_unlock_irqrestore(&pdata->lock, flags);
730@@ -854,14 +926,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
731
732 phy_start(pdata->phydev);
733
734- /* Enable Tx/Rx */
735+ xgbe_napi_enable(pdata, 0);
736+
737 hw_if->powerup_tx(pdata);
738 hw_if->powerup_rx(pdata);
739
740 if (caller == XGMAC_DRIVER_CONTEXT)
741 netif_device_attach(netdev);
742
743- xgbe_napi_enable(pdata, 0);
744 netif_tx_start_all_queues(netdev);
745
746 spin_unlock_irqrestore(&pdata->lock, flags);
747@@ -875,26 +947,39 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
748 {
749 struct xgbe_hw_if *hw_if = &pdata->hw_if;
750 struct net_device *netdev = pdata->netdev;
751+ int ret;
752
753 DBGPR("-->xgbe_start\n");
754
755- xgbe_set_rx_mode(netdev);
756-
757 hw_if->init(pdata);
758
759 phy_start(pdata->phydev);
760
761+ xgbe_napi_enable(pdata, 1);
762+
763+ ret = xgbe_request_irqs(pdata);
764+ if (ret)
765+ goto err_napi;
766+
767 hw_if->enable_tx(pdata);
768 hw_if->enable_rx(pdata);
769
770 xgbe_init_tx_timers(pdata);
771
772- xgbe_napi_enable(pdata, 1);
773 netif_tx_start_all_queues(netdev);
774
775 DBGPR("<--xgbe_start\n");
776
777 return 0;
778+
779+err_napi:
780+ xgbe_napi_disable(pdata, 1);
781+
782+ phy_stop(pdata->phydev);
783+
784+ hw_if->exit(pdata);
785+
786+ return ret;
787 }
788
789 static void xgbe_stop(struct xgbe_prv_data *pdata)
790@@ -907,16 +992,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
791
792 DBGPR("-->xgbe_stop\n");
793
794- phy_stop(pdata->phydev);
795-
796 netif_tx_stop_all_queues(netdev);
797- xgbe_napi_disable(pdata, 1);
798
799 xgbe_stop_tx_timers(pdata);
800
801 hw_if->disable_tx(pdata);
802 hw_if->disable_rx(pdata);
803
804+ xgbe_free_irqs(pdata);
805+
806+ xgbe_napi_disable(pdata, 1);
807+
808+ phy_stop(pdata->phydev);
809+
810+ hw_if->exit(pdata);
811+
812 channel = pdata->channel;
813 for (i = 0; i < pdata->channel_count; i++, channel++) {
814 if (!channel->tx_ring)
815@@ -929,12 +1019,8 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
816 DBGPR("<--xgbe_stop\n");
817 }
818
819-static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
820+static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
821 {
822- struct xgbe_channel *channel;
823- struct xgbe_hw_if *hw_if = &pdata->hw_if;
824- unsigned int i;
825-
826 DBGPR("-->xgbe_restart_dev\n");
827
828 /* If not running, "restart" will happen on open */
829@@ -942,20 +1028,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
830 return;
831
832 xgbe_stop(pdata);
833- synchronize_irq(pdata->dev_irq);
834- if (pdata->per_channel_irq) {
835- channel = pdata->channel;
836- for (i = 0; i < pdata->channel_count; i++, channel++)
837- synchronize_irq(channel->dma_irq);
838- }
839
840 xgbe_free_tx_data(pdata);
841 xgbe_free_rx_data(pdata);
842
843- /* Issue software reset to device if requested */
844- if (reset)
845- hw_if->exit(pdata);
846-
847 xgbe_start(pdata);
848
849 DBGPR("<--xgbe_restart_dev\n");
850@@ -969,7 +1045,7 @@ static void xgbe_restart(struct work_struct *work)
851
852 rtnl_lock();
853
854- xgbe_restart_dev(pdata, 1);
855+ xgbe_restart_dev(pdata);
856
857 rtnl_unlock();
858 }
859@@ -1284,10 +1360,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
860 static int xgbe_open(struct net_device *netdev)
861 {
862 struct xgbe_prv_data *pdata = netdev_priv(netdev);
863- struct xgbe_hw_if *hw_if = &pdata->hw_if;
864 struct xgbe_desc_if *desc_if = &pdata->desc_if;
865- struct xgbe_channel *channel = NULL;
866- unsigned int i = 0;
867 int ret;
868
869 DBGPR("-->xgbe_open\n");
870@@ -1330,55 +1403,14 @@ static int xgbe_open(struct net_device *netdev)
871 INIT_WORK(&pdata->restart_work, xgbe_restart);
872 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
873
874- /* Request interrupts */
875- ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
876- netdev->name, pdata);
877- if (ret) {
878- netdev_alert(netdev, "error requesting irq %d\n",
879- pdata->dev_irq);
880- goto err_rings;
881- }
882-
883- if (pdata->per_channel_irq) {
884- channel = pdata->channel;
885- for (i = 0; i < pdata->channel_count; i++, channel++) {
886- snprintf(channel->dma_irq_name,
887- sizeof(channel->dma_irq_name) - 1,
888- "%s-TxRx-%u", netdev_name(netdev),
889- channel->queue_index);
890-
891- ret = devm_request_irq(pdata->dev, channel->dma_irq,
892- xgbe_dma_isr, 0,
893- channel->dma_irq_name, channel);
894- if (ret) {
895- netdev_alert(netdev,
896- "error requesting irq %d\n",
897- channel->dma_irq);
898- goto err_irq;
899- }
900- }
901- }
902-
903 ret = xgbe_start(pdata);
904 if (ret)
905- goto err_start;
906+ goto err_rings;
907
908 DBGPR("<--xgbe_open\n");
909
910 return 0;
911
912-err_start:
913- hw_if->exit(pdata);
914-
915-err_irq:
916- if (pdata->per_channel_irq) {
917- /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
918- for (i--, channel--; i < pdata->channel_count; i--, channel--)
919- devm_free_irq(pdata->dev, channel->dma_irq, channel);
920- }
921-
922- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
923-
924 err_rings:
925 desc_if->free_ring_resources(pdata);
926
927@@ -1400,30 +1432,16 @@ err_phy_init:
928 static int xgbe_close(struct net_device *netdev)
929 {
930 struct xgbe_prv_data *pdata = netdev_priv(netdev);
931- struct xgbe_hw_if *hw_if = &pdata->hw_if;
932 struct xgbe_desc_if *desc_if = &pdata->desc_if;
933- struct xgbe_channel *channel;
934- unsigned int i;
935
936 DBGPR("-->xgbe_close\n");
937
938 /* Stop the device */
939 xgbe_stop(pdata);
940
941- /* Issue software reset to device */
942- hw_if->exit(pdata);
943-
944 /* Free the ring descriptors and buffers */
945 desc_if->free_ring_resources(pdata);
946
947- /* Release the interrupts */
948- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
949- if (pdata->per_channel_irq) {
950- channel = pdata->channel;
951- for (i = 0; i < pdata->channel_count; i++, channel++)
952- devm_free_irq(pdata->dev, channel->dma_irq, channel);
953- }
954-
955 /* Free the channel and ring structures */
956 xgbe_free_channels(pdata);
957
958@@ -1448,7 +1466,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
959 struct xgbe_ring *ring;
960 struct xgbe_packet_data *packet;
961 struct netdev_queue *txq;
962- unsigned long flags;
963 int ret;
964
965 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
966@@ -1460,8 +1477,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
967
968 ret = NETDEV_TX_OK;
969
970- spin_lock_irqsave(&ring->lock, flags);
971-
972 if (skb->len == 0) {
973 netdev_err(netdev, "empty skb received from stack\n");
974 dev_kfree_skb_any(skb);
975@@ -1508,10 +1523,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
976 ret = NETDEV_TX_OK;
977
978 tx_netdev_return:
979- spin_unlock_irqrestore(&ring->lock, flags);
980-
981- DBGPR("<--xgbe_xmit\n");
982-
983 return ret;
984 }
985
986@@ -1519,17 +1530,10 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
987 {
988 struct xgbe_prv_data *pdata = netdev_priv(netdev);
989 struct xgbe_hw_if *hw_if = &pdata->hw_if;
990- unsigned int pr_mode, am_mode;
991
992 DBGPR("-->xgbe_set_rx_mode\n");
993
994- pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
995- am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
996-
997- hw_if->set_promiscuous_mode(pdata, pr_mode);
998- hw_if->set_all_multicast_mode(pdata, am_mode);
999-
1000- hw_if->add_mac_addresses(pdata);
1001+ hw_if->config_rx_mode(pdata);
1002
1003 DBGPR("<--xgbe_set_rx_mode\n");
1004 }
1005@@ -1589,13 +1593,21 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
1006 pdata->rx_buf_size = ret;
1007 netdev->mtu = mtu;
1008
1009- xgbe_restart_dev(pdata, 0);
1010+ xgbe_restart_dev(pdata);
1011
1012 DBGPR("<--xgbe_change_mtu\n");
1013
1014 return 0;
1015 }
1016
1017+static void xgbe_tx_timeout(struct net_device *netdev)
1018+{
1019+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
1020+
1021+ netdev_warn(netdev, "tx timeout, device restarting\n");
1022+ schedule_work(&pdata->restart_work);
1023+}
1024+
1025 static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
1026 struct rtnl_link_stats64 *s)
1027 {
1028@@ -1760,6 +1772,7 @@ static const struct net_device_ops xgbe_netdev_ops = {
1029 .ndo_validate_addr = eth_validate_addr,
1030 .ndo_do_ioctl = xgbe_ioctl,
1031 .ndo_change_mtu = xgbe_change_mtu,
1032+ .ndo_tx_timeout = xgbe_tx_timeout,
1033 .ndo_get_stats64 = xgbe_get_stats64,
1034 .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
1035 .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
1036@@ -1778,29 +1791,44 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
1037 static void xgbe_rx_refresh(struct xgbe_channel *channel)
1038 {
1039 struct xgbe_prv_data *pdata = channel->pdata;
1040+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
1041 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1042 struct xgbe_ring *ring = channel->rx_ring;
1043 struct xgbe_ring_data *rdata;
1044
1045- desc_if->realloc_rx_buffer(channel);
1046+ while (ring->dirty != ring->cur) {
1047+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1048+
1049+ /* Reset rdata values */
1050+ desc_if->unmap_rdata(pdata, rdata);
1051+
1052+ if (desc_if->map_rx_buffer(pdata, ring, rdata))
1053+ break;
1054+
1055+ hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
1056+
1057+ ring->dirty++;
1058+ }
1059+
1060+ /* Make sure everything is written before the register write */
1061+ wmb();
1062
1063 /* Update the Rx Tail Pointer Register with address of
1064 * the last cleaned entry */
1065- rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
1066+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
1067 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1068 lower_32_bits(rdata->rdesc_dma));
1069 }
1070
1071-static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1072+static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
1073 struct xgbe_ring_data *rdata,
1074 unsigned int *len)
1075 {
1076- struct net_device *netdev = pdata->netdev;
1077 struct sk_buff *skb;
1078 u8 *packet;
1079 unsigned int copy_len;
1080
1081- skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len);
1082+ skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
1083 if (!skb)
1084 return NULL;
1085
1086@@ -1826,7 +1854,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1087 struct xgbe_ring_desc *rdesc;
1088 struct net_device *netdev = pdata->netdev;
1089 struct netdev_queue *txq;
1090- unsigned long flags;
1091 int processed = 0;
1092 unsigned int tx_packets = 0, tx_bytes = 0;
1093
1094@@ -1838,8 +1865,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1095
1096 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1097
1098- spin_lock_irqsave(&ring->lock, flags);
1099-
1100 while ((processed < XGBE_TX_DESC_MAX_PROC) &&
1101 (ring->dirty != ring->cur)) {
1102 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1103@@ -1850,7 +1875,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1104
1105 /* Make sure descriptor fields are read after reading the OWN
1106 * bit */
1107- rmb();
1108+ dma_rmb();
1109
1110 #ifdef XGMAC_ENABLE_TX_DESC_DUMP
1111 xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
1112@@ -1870,7 +1895,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1113 }
1114
1115 if (!processed)
1116- goto unlock;
1117+ return 0;
1118
1119 netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
1120
1121@@ -1882,9 +1907,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1122
1123 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
1124
1125-unlock:
1126- spin_unlock_irqrestore(&ring->lock, flags);
1127-
1128 return processed;
1129 }
1130
1131@@ -1936,7 +1958,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1132 read_again:
1133 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1134
1135- if (ring->dirty > (XGBE_RX_DESC_CNT >> 3))
1136+ if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
1137 xgbe_rx_refresh(channel);
1138
1139 if (hw_if->dev_read(channel))
1140@@ -1944,7 +1966,6 @@ read_again:
1141
1142 received++;
1143 ring->cur++;
1144- ring->dirty++;
1145
1146 incomplete = XGMAC_GET_BITS(packet->attributes,
1147 RX_PACKET_ATTRIBUTES,
1148@@ -1977,7 +1998,7 @@ read_again:
1149 rdata->rx.hdr.dma_len,
1150 DMA_FROM_DEVICE);
1151
1152- skb = xgbe_create_skb(pdata, rdata, &put_len);
1153+ skb = xgbe_create_skb(napi, rdata, &put_len);
1154 if (!skb) {
1155 error = 1;
1156 goto skip_data;
1157diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
1158index ebf4893..5f149e8 100644
1159--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
1160+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
1161@@ -291,7 +291,6 @@ static int xgbe_get_settings(struct net_device *netdev,
1162 return -ENODEV;
1163
1164 ret = phy_ethtool_gset(pdata->phydev, cmd);
1165- cmd->transceiver = XCVR_EXTERNAL;
1166
1167 DBGPR("<--xgbe_get_settings\n");
1168
1169@@ -378,18 +377,14 @@ static int xgbe_get_coalesce(struct net_device *netdev,
1170 struct ethtool_coalesce *ec)
1171 {
1172 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1173- struct xgbe_hw_if *hw_if = &pdata->hw_if;
1174- unsigned int riwt;
1175
1176 DBGPR("-->xgbe_get_coalesce\n");
1177
1178 memset(ec, 0, sizeof(struct ethtool_coalesce));
1179
1180- riwt = pdata->rx_riwt;
1181- ec->rx_coalesce_usecs = hw_if->riwt_to_usec(pdata, riwt);
1182+ ec->rx_coalesce_usecs = pdata->rx_usecs;
1183 ec->rx_max_coalesced_frames = pdata->rx_frames;
1184
1185- ec->tx_coalesce_usecs = pdata->tx_usecs;
1186 ec->tx_max_coalesced_frames = pdata->tx_frames;
1187
1188 DBGPR("<--xgbe_get_coalesce\n");
1189@@ -403,13 +398,14 @@ static int xgbe_set_coalesce(struct net_device *netdev,
1190 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1191 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1192 unsigned int rx_frames, rx_riwt, rx_usecs;
1193- unsigned int tx_frames, tx_usecs;
1194+ unsigned int tx_frames;
1195
1196 DBGPR("-->xgbe_set_coalesce\n");
1197
1198 /* Check for not supported parameters */
1199 if ((ec->rx_coalesce_usecs_irq) ||
1200 (ec->rx_max_coalesced_frames_irq) ||
1201+ (ec->tx_coalesce_usecs) ||
1202 (ec->tx_coalesce_usecs_irq) ||
1203 (ec->tx_max_coalesced_frames_irq) ||
1204 (ec->stats_block_coalesce_usecs) ||
1205@@ -428,28 +424,18 @@ static int xgbe_set_coalesce(struct net_device *netdev,
1206 (ec->rate_sample_interval))
1207 return -EOPNOTSUPP;
1208
1209- /* Can only change rx-frames when interface is down (see
1210- * rx_descriptor_init in xgbe-dev.c)
1211- */
1212- rx_frames = pdata->rx_frames;
1213- if (rx_frames != ec->rx_max_coalesced_frames && netif_running(netdev)) {
1214- netdev_alert(netdev,
1215- "interface must be down to change rx-frames\n");
1216- return -EINVAL;
1217- }
1218-
1219 rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
1220+ rx_usecs = ec->rx_coalesce_usecs;
1221 rx_frames = ec->rx_max_coalesced_frames;
1222
1223 /* Use smallest possible value if conversion resulted in zero */
1224- if (ec->rx_coalesce_usecs && !rx_riwt)
1225+ if (rx_usecs && !rx_riwt)
1226 rx_riwt = 1;
1227
1228 /* Check the bounds of values for Rx */
1229 if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
1230- rx_usecs = hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT);
1231 netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
1232- rx_usecs);
1233+ hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
1234 return -EINVAL;
1235 }
1236 if (rx_frames > pdata->rx_desc_count) {
1237@@ -458,7 +444,6 @@ static int xgbe_set_coalesce(struct net_device *netdev,
1238 return -EINVAL;
1239 }
1240
1241- tx_usecs = ec->tx_coalesce_usecs;
1242 tx_frames = ec->tx_max_coalesced_frames;
1243
1244 /* Check the bounds of values for Tx */
1245@@ -469,10 +454,10 @@ static int xgbe_set_coalesce(struct net_device *netdev,
1246 }
1247
1248 pdata->rx_riwt = rx_riwt;
1249+ pdata->rx_usecs = rx_usecs;
1250 pdata->rx_frames = rx_frames;
1251 hw_if->config_rx_coalesce(pdata);
1252
1253- pdata->tx_usecs = tx_usecs;
1254 pdata->tx_frames = tx_frames;
1255 hw_if->config_tx_coalesce(pdata);
1256
1257diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
1258index dbd3850..7149053 100644
1259--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
1260+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
1261@@ -123,7 +123,10 @@
1262 #include <linux/io.h>
1263 #include <linux/of.h>
1264 #include <linux/of_net.h>
1265+#include <linux/of_address.h>
1266 #include <linux/clk.h>
1267+#include <linux/property.h>
1268+#include <linux/acpi.h>
1269
1270 #include "xgbe.h"
1271 #include "xgbe-common.h"
1272@@ -148,6 +151,7 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
1273 pdata->pause_autoneg = 1;
1274 pdata->tx_pause = 1;
1275 pdata->rx_pause = 1;
1276+ pdata->phy_speed = SPEED_UNKNOWN;
1277 pdata->power_down = 0;
1278 pdata->default_autoneg = AUTONEG_ENABLE;
1279 pdata->default_speed = SPEED_10000;
1280@@ -161,6 +165,96 @@ static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
1281 xgbe_init_function_ptrs_desc(&pdata->desc_if);
1282 }
1283
1284+#ifdef CONFIG_ACPI
1285+static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
1286+{
1287+ struct acpi_device *adev = pdata->adev;
1288+ struct device *dev = pdata->dev;
1289+ u32 property;
1290+ acpi_handle handle;
1291+ acpi_status status;
1292+ unsigned long long data;
1293+ int cca;
1294+ int ret;
1295+
1296+ /* Obtain the system clock setting */
1297+ ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
1298+ if (ret) {
1299+ dev_err(dev, "unable to obtain %s property\n",
1300+ XGBE_ACPI_DMA_FREQ);
1301+ return ret;
1302+ }
1303+ pdata->sysclk_rate = property;
1304+
1305+ /* Obtain the PTP clock setting */
1306+ ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
1307+ if (ret) {
1308+ dev_err(dev, "unable to obtain %s property\n",
1309+ XGBE_ACPI_PTP_FREQ);
1310+ return ret;
1311+ }
1312+ pdata->ptpclk_rate = property;
1313+
1314+ /* Retrieve the device cache coherency value */
1315+ handle = adev->handle;
1316+ do {
1317+ status = acpi_evaluate_integer(handle, "_CCA", NULL, &data);
1318+ if (!ACPI_FAILURE(status)) {
1319+ cca = data;
1320+ break;
1321+ }
1322+
1323+ status = acpi_get_parent(handle, &handle);
1324+ } while (!ACPI_FAILURE(status));
1325+
1326+ if (ACPI_FAILURE(status)) {
1327+ dev_err(dev, "error obtaining acpi coherency value\n");
1328+ return -EINVAL;
1329+ }
1330+ pdata->coherent = !!cca;
1331+
1332+ return 0;
1333+}
1334+#else /* CONFIG_ACPI */
1335+static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
1336+{
1337+ return -EINVAL;
1338+}
1339+#endif /* CONFIG_ACPI */
1340+
1341+#ifdef CONFIG_OF
1342+static int xgbe_of_support(struct xgbe_prv_data *pdata)
1343+{
1344+ struct device *dev = pdata->dev;
1345+
1346+ /* Obtain the system clock setting */
1347+ pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
1348+ if (IS_ERR(pdata->sysclk)) {
1349+ dev_err(dev, "dma devm_clk_get failed\n");
1350+ return PTR_ERR(pdata->sysclk);
1351+ }
1352+ pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
1353+
1354+ /* Obtain the PTP clock setting */
1355+ pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
1356+ if (IS_ERR(pdata->ptpclk)) {
1357+ dev_err(dev, "ptp devm_clk_get failed\n");
1358+ return PTR_ERR(pdata->ptpclk);
1359+ }
1360+ pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
1361+
1362+ /* Retrieve the device cache coherency value */
1363+ pdata->coherent = of_dma_is_coherent(dev->of_node);
1364+
1365+ return 0;
1366+}
1367+#else /* CONFIG_OF */
1368+static int xgbe_of_support(struct xgbe_prv_data *pdata)
1369+{
1370+ return -EINVAL;
1371+}
1372+#endif /*CONFIG_OF */
1373+
1374 static int xgbe_probe(struct platform_device *pdev)
1375 {
1376 struct xgbe_prv_data *pdata;
1377@@ -169,7 +263,7 @@ static int xgbe_probe(struct platform_device *pdev)
1378 struct net_device *netdev;
1379 struct device *dev = &pdev->dev;
1380 struct resource *res;
1381- const u8 *mac_addr;
1382+ const char *phy_mode;
1383 unsigned int i;
1384 int ret;
1385
1386@@ -186,6 +280,7 @@ static int xgbe_probe(struct platform_device *pdev)
1387 pdata = netdev_priv(netdev);
1388 pdata->netdev = netdev;
1389 pdata->pdev = pdev;
1390+ pdata->adev = ACPI_COMPANION(dev);
1391 pdata->dev = dev;
1392 platform_set_drvdata(pdev, netdev);
1393
1394@@ -194,6 +289,9 @@ static int xgbe_probe(struct platform_device *pdev)
1395 mutex_init(&pdata->rss_mutex);
1396 spin_lock_init(&pdata->tstamp_lock);
1397
1398+ /* Check if we should use ACPI or DT */
1399+ pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
1400+
1401 /* Set and validate the number of descriptors for a ring */
1402 BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
1403 pdata->tx_desc_count = XGBE_TX_DESC_CNT;
1404@@ -212,22 +310,6 @@ static int xgbe_probe(struct platform_device *pdev)
1405 goto err_io;
1406 }
1407
1408- /* Obtain the system clock setting */
1409- pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
1410- if (IS_ERR(pdata->sysclk)) {
1411- dev_err(dev, "dma devm_clk_get failed\n");
1412- ret = PTR_ERR(pdata->sysclk);
1413- goto err_io;
1414- }
1415-
1416- /* Obtain the PTP clock setting */
1417- pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
1418- if (IS_ERR(pdata->ptpclk)) {
1419- dev_err(dev, "ptp devm_clk_get failed\n");
1420- ret = PTR_ERR(pdata->ptpclk);
1421- goto err_io;
1422- }
1423-
1424 /* Obtain the mmio areas for the device */
1425 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1426 pdata->xgmac_regs = devm_ioremap_resource(dev, res);
1427@@ -247,16 +329,42 @@ static int xgbe_probe(struct platform_device *pdev)
1428 }
1429 DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
1430
1431- /* Set the DMA mask */
1432- if (!dev->dma_mask)
1433- dev->dma_mask = &dev->coherent_dma_mask;
1434- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
1435- if (ret) {
1436- dev_err(dev, "dma_set_mask_and_coherent failed\n");
1437+ /* Retrieve the MAC address */
1438+ ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
1439+ pdata->mac_addr,
1440+ sizeof(pdata->mac_addr));
1441+ if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
1442+ dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
1443+ if (!ret)
1444+ ret = -EINVAL;
1445 goto err_io;
1446 }
1447
1448- if (of_property_read_bool(dev->of_node, "dma-coherent")) {
1449+ /* Retrieve the PHY mode - it must be "xgmii" */
1450+ ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
1451+ &phy_mode);
1452+ if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
1453+ dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
1454+ if (!ret)
1455+ ret = -EINVAL;
1456+ goto err_io;
1457+ }
1458+ pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
1459+
1460+ /* Check for per channel interrupt support */
1461+ if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
1462+ pdata->per_channel_irq = 1;
1463+
1464+ /* Obtain device settings unique to ACPI/OF */
1465+ if (pdata->use_acpi)
1466+ ret = xgbe_acpi_support(pdata);
1467+ else
1468+ ret = xgbe_of_support(pdata);
1469+ if (ret)
1470+ goto err_io;
1471+
1472+ /* Set the DMA coherency values */
1473+ if (pdata->coherent) {
1474 pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
1475 pdata->arcache = XGBE_DMA_OS_ARCACHE;
1476 pdata->awcache = XGBE_DMA_OS_AWCACHE;
1477@@ -266,10 +374,7 @@ static int xgbe_probe(struct platform_device *pdev)
1478 pdata->awcache = XGBE_DMA_SYS_AWCACHE;
1479 }
1480
1481- /* Check for per channel interrupt support */
1482- if (of_property_read_bool(dev->of_node, XGBE_DMA_IRQS))
1483- pdata->per_channel_irq = 1;
1484-
1485+ /* Get the device interrupt */
1486 ret = platform_get_irq(pdev, 0);
1487 if (ret < 0) {
1488 dev_err(dev, "platform_get_irq 0 failed\n");
1489@@ -279,6 +384,7 @@ static int xgbe_probe(struct platform_device *pdev)
1490
1491 netdev->irq = pdata->dev_irq;
1492 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
1493+ memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
1494
1495 /* Set all the function pointers */
1496 xgbe_init_all_fptrs(pdata);
1497@@ -291,26 +397,19 @@ static int xgbe_probe(struct platform_device *pdev)
1498 /* Populate the hardware features */
1499 xgbe_get_all_hw_features(pdata);
1500
1501- /* Retrieve the MAC address */
1502- mac_addr = of_get_mac_address(dev->of_node);
1503- if (!mac_addr) {
1504- dev_err(dev, "invalid mac address for this device\n");
1505- ret = -EINVAL;
1506- goto err_io;
1507- }
1508- memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
1509+ /* Set default configuration data */
1510+ xgbe_default_config(pdata);
1511
1512- /* Retrieve the PHY mode - it must be "xgmii" */
1513- pdata->phy_mode = of_get_phy_mode(dev->of_node);
1514- if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1515- dev_err(dev, "invalid phy-mode specified for this device\n");
1516- ret = -EINVAL;
1517+ /* Set the DMA mask */
1518+ if (!dev->dma_mask)
1519+ dev->dma_mask = &dev->coherent_dma_mask;
1520+ ret = dma_set_mask_and_coherent(dev,
1521+ DMA_BIT_MASK(pdata->hw_feat.dma_width));
1522+ if (ret) {
1523+ dev_err(dev, "dma_set_mask_and_coherent failed\n");
1524 goto err_io;
1525 }
1526
1527- /* Set default configuration data */
1528- xgbe_default_config(pdata);
1529-
1530 /* Calculate the number of Tx and Rx rings to be created
1531 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
1532 * the number of Tx queues to the number of Tx channels
1533@@ -392,6 +491,9 @@ static int xgbe_probe(struct platform_device *pdev)
1534
1535 netdev->priv_flags |= IFF_UNICAST_FLT;
1536
1537+ /* Use default watchdog timeout */
1538+ netdev->watchdog_timeo = 0;
1539+
1540 xgbe_init_rx_coalesce(pdata);
1541 xgbe_init_tx_coalesce(pdata);
1542
1543@@ -491,18 +593,35 @@ static int xgbe_resume(struct device *dev)
1544 }
1545 #endif /* CONFIG_PM */
1546
1547+#ifdef CONFIG_ACPI
1548+static const struct acpi_device_id xgbe_acpi_match[] = {
1549+ { "AMDI8001", 0 },
1550+ {},
1551+};
1552+
1553+MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
1554+#endif
1555+
1556+#ifdef CONFIG_OF
1557 static const struct of_device_id xgbe_of_match[] = {
1558 { .compatible = "amd,xgbe-seattle-v1a", },
1559 {},
1560 };
1561
1562 MODULE_DEVICE_TABLE(of, xgbe_of_match);
1563+#endif
1564+
1565 static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
1566
1567 static struct platform_driver xgbe_driver = {
1568 .driver = {
1569 .name = "amd-xgbe",
1570+#ifdef CONFIG_ACPI
1571+ .acpi_match_table = xgbe_acpi_match,
1572+#endif
1573+#ifdef CONFIG_OF
1574 .of_match_table = xgbe_of_match,
1575+#endif
1576 .pm = &xgbe_pm_ops,
1577 },
1578 .probe = xgbe_probe,
1579diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
1580index 363b210..59e267f 100644
1581--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
1582+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
1583@@ -205,25 +205,16 @@ void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
1584
1585 int xgbe_mdio_register(struct xgbe_prv_data *pdata)
1586 {
1587- struct device_node *phy_node;
1588 struct mii_bus *mii;
1589 struct phy_device *phydev;
1590 int ret = 0;
1591
1592 DBGPR("-->xgbe_mdio_register\n");
1593
1594- /* Retrieve the phy-handle */
1595- phy_node = of_parse_phandle(pdata->dev->of_node, "phy-handle", 0);
1596- if (!phy_node) {
1597- dev_err(pdata->dev, "unable to parse phy-handle\n");
1598- return -EINVAL;
1599- }
1600-
1601 mii = mdiobus_alloc();
1602- if (mii == NULL) {
1603+ if (!mii) {
1604 dev_err(pdata->dev, "mdiobus_alloc failed\n");
1605- ret = -ENOMEM;
1606- goto err_node_get;
1607+ return -ENOMEM;
1608 }
1609
1610 /* Register on the MDIO bus (don't probe any PHYs) */
1611@@ -252,18 +243,19 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
1612 request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
1613 MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
1614
1615- of_node_get(phy_node);
1616- phydev->dev.of_node = phy_node;
1617 ret = phy_device_register(phydev);
1618 if (ret) {
1619 dev_err(pdata->dev, "phy_device_register failed\n");
1620- of_node_put(phy_node);
1621+ goto err_phy_device;
1622+ }
1623+ if (!phydev->dev.driver) {
1624+ dev_err(pdata->dev, "phy driver probe failed\n");
1625+ ret = -EIO;
1626 goto err_phy_device;
1627 }
1628
1629 /* Add a reference to the PHY driver so it can't be unloaded */
1630- pdata->phy_module = phydev->dev.driver ?
1631- phydev->dev.driver->owner : NULL;
1632+ pdata->phy_module = phydev->dev.driver->owner;
1633 if (!try_module_get(pdata->phy_module)) {
1634 dev_err(pdata->dev, "try_module_get failed\n");
1635 ret = -EIO;
1636@@ -283,8 +275,6 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
1637
1638 pdata->phydev = phydev;
1639
1640- of_node_put(phy_node);
1641-
1642 DBGPHY_REGS(pdata);
1643
1644 DBGPR("<--xgbe_mdio_register\n");
1645@@ -300,9 +290,6 @@ err_mdiobus_register:
1646 err_mdiobus_alloc:
1647 mdiobus_free(mii);
1648
1649-err_node_get:
1650- of_node_put(phy_node);
1651-
1652 return ret;
1653 }
1654
1655diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
1656index a1bf9d1c..f0d0ac6 100644
1657--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
1658+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
1659@@ -171,21 +171,15 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
1660 struct xgbe_prv_data,
1661 ptp_clock_info);
1662 unsigned long flags;
1663- u64 nsec;
1664
1665 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1666-
1667- nsec = timecounter_read(&pdata->tstamp_tc);
1668-
1669- nsec += delta;
1670- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
1671-
1672+ timecounter_adjtime(&pdata->tstamp_tc, delta);
1673 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1674
1675 return 0;
1676 }
1677
1678-static int xgbe_gettime(struct ptp_clock_info *info, struct timespec *ts)
1679+static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
1680 {
1681 struct xgbe_prv_data *pdata = container_of(info,
1682 struct xgbe_prv_data,
1683@@ -199,12 +193,13 @@ static int xgbe_gettime(struct ptp_clock_info *info, struct timespec *ts)
1684
1685 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1686
1687- *ts = ns_to_timespec(nsec);
1688+ *ts = ns_to_timespec64(nsec);
1689
1690 return 0;
1691 }
1692
1693-static int xgbe_settime(struct ptp_clock_info *info, const struct timespec *ts)
1694+static int xgbe_settime(struct ptp_clock_info *info,
1695+ const struct timespec64 *ts)
1696 {
1697 struct xgbe_prv_data *pdata = container_of(info,
1698 struct xgbe_prv_data,
1699@@ -212,7 +207,7 @@ static int xgbe_settime(struct ptp_clock_info *info, const struct timespec *ts)
1700 unsigned long flags;
1701 u64 nsec;
1702
1703- nsec = timespec_to_ns(ts);
1704+ nsec = timespec64_to_ns(ts);
1705
1706 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1707
1708@@ -239,7 +234,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
1709 snprintf(info->name, sizeof(info->name), "%s",
1710 netdev_name(pdata->netdev));
1711 info->owner = THIS_MODULE;
1712- info->max_adj = clk_get_rate(pdata->ptpclk);
1713+ info->max_adj = pdata->ptpclk_rate;
1714 info->adjfreq = xgbe_adjfreq;
1715 info->adjtime = xgbe_adjtime;
1716 info->gettime = xgbe_gettime;
1717@@ -260,7 +255,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
1718 */
1719 dividend = 50000000;
1720 dividend <<= 32;
1721- pdata->tstamp_addend = div_u64(dividend, clk_get_rate(pdata->ptpclk));
1722+ pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
1723
1724 /* Setup the timecounter */
1725 cc->read = xgbe_cc_read;
1726diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
1727index f9ec762..2ef3ffb 100644
1728--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
1729+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
1730@@ -182,10 +182,18 @@
1731 #define XGBE_PHY_NAME "amd_xgbe_phy"
1732 #define XGBE_PRTAD 0
1733
1734+/* Common property names */
1735+#define XGBE_MAC_ADDR_PROPERTY "mac-address"
1736+#define XGBE_PHY_MODE_PROPERTY "phy-mode"
1737+#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
1738+
1739 /* Device-tree clock names */
1740 #define XGBE_DMA_CLOCK "dma_clk"
1741 #define XGBE_PTP_CLOCK "ptp_clk"
1742-#define XGBE_DMA_IRQS "amd,per-channel-interrupt"
1743+
1744+/* ACPI property names */
1745+#define XGBE_ACPI_DMA_FREQ "amd,dma-freq"
1746+#define XGBE_ACPI_PTP_FREQ "amd,ptp-freq"
1747
1748 /* Timestamp support - values based on 50MHz PTP clock
1749 * 50MHz => 20 nsec
1750@@ -214,7 +222,7 @@
1751 ((_idx) & ((_ring)->rdesc_count - 1)))
1752
1753 /* Default coalescing parameters */
1754-#define XGMAC_INIT_DMA_TX_USECS 50
1755+#define XGMAC_INIT_DMA_TX_USECS 1000
1756 #define XGMAC_INIT_DMA_TX_FRAMES 25
1757
1758 #define XGMAC_MAX_DMA_RIWT 0xff
1759@@ -317,8 +325,6 @@ struct xgbe_ring_data {
1760 struct xgbe_tx_ring_data tx; /* Tx-related data */
1761 struct xgbe_rx_ring_data rx; /* Rx-related data */
1762
1763- unsigned int interrupt; /* Interrupt indicator */
1764-
1765 unsigned int mapped_as_page;
1766
1767 /* Incomplete receive save location. If the budget is exhausted
1768@@ -361,8 +367,7 @@ struct xgbe_ring {
1769 * cur - Tx: index of descriptor to be used for current transfer
1770 * Rx: index of descriptor to check for packet availability
1771 * dirty - Tx: index of descriptor to check for transfer complete
1772- * Rx: count of descriptors in which a packet has been received
1773- * (used with skb_realloc_index to refresh the ring)
1774+ * Rx: index of descriptor to check for buffer reallocation
1775 */
1776 unsigned int cur;
1777 unsigned int dirty;
1778@@ -377,11 +382,6 @@ struct xgbe_ring {
1779 unsigned short cur_mss;
1780 unsigned short cur_vlan_ctag;
1781 } tx;
1782-
1783- struct {
1784- unsigned int realloc_index;
1785- unsigned int realloc_threshold;
1786- } rx;
1787 };
1788 } ____cacheline_aligned;
1789
1790@@ -408,7 +408,7 @@ struct xgbe_channel {
1791 unsigned int saved_ier;
1792
1793 unsigned int tx_timer_active;
1794- struct hrtimer tx_timer;
1795+ struct timer_list tx_timer;
1796
1797 struct xgbe_ring *tx_ring;
1798 struct xgbe_ring *rx_ring;
1799@@ -495,10 +495,8 @@ struct xgbe_mmc_stats {
1800 struct xgbe_hw_if {
1801 int (*tx_complete)(struct xgbe_ring_desc *);
1802
1803- int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int);
1804- int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int);
1805- int (*add_mac_addresses)(struct xgbe_prv_data *);
1806 int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
1807+ int (*config_rx_mode)(struct xgbe_prv_data *);
1808
1809 int (*enable_rx_csum)(struct xgbe_prv_data *);
1810 int (*disable_rx_csum)(struct xgbe_prv_data *);
1811@@ -534,8 +532,9 @@ struct xgbe_hw_if {
1812 int (*dev_read)(struct xgbe_channel *);
1813 void (*tx_desc_init)(struct xgbe_channel *);
1814 void (*rx_desc_init)(struct xgbe_channel *);
1815- void (*rx_desc_reset)(struct xgbe_ring_data *);
1816 void (*tx_desc_reset)(struct xgbe_ring_data *);
1817+ void (*rx_desc_reset)(struct xgbe_prv_data *, struct xgbe_ring_data *,
1818+ unsigned int);
1819 int (*is_last_desc)(struct xgbe_ring_desc *);
1820 int (*is_context_desc)(struct xgbe_ring_desc *);
1821 void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *);
1822@@ -596,7 +595,8 @@ struct xgbe_desc_if {
1823 int (*alloc_ring_resources)(struct xgbe_prv_data *);
1824 void (*free_ring_resources)(struct xgbe_prv_data *);
1825 int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
1826- void (*realloc_rx_buffer)(struct xgbe_channel *);
1827+ int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *,
1828+ struct xgbe_ring_data *);
1829 void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
1830 void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
1831 void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
1832@@ -617,7 +617,7 @@ struct xgbe_hw_features {
1833 unsigned int mgk; /* PMT magic packet */
1834 unsigned int mmc; /* RMON module */
1835 unsigned int aoe; /* ARP Offload */
1836- unsigned int ts; /* IEEE 1588-2008 Adavanced Timestamp */
1837+ unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */
1838 unsigned int eee; /* Energy Efficient Ethernet */
1839 unsigned int tx_coe; /* Tx Checksum Offload */
1840 unsigned int rx_coe; /* Rx Checksum Offload */
1841@@ -629,6 +629,7 @@ struct xgbe_hw_features {
1842 unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
1843 unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
1844 unsigned int adv_ts_hi; /* Advance Timestamping High Word */
1845+ unsigned int dma_width; /* DMA width */
1846 unsigned int dcb; /* DCB Feature */
1847 unsigned int sph; /* Split Header Feature */
1848 unsigned int tso; /* TCP Segmentation Offload */
1849@@ -650,8 +651,12 @@ struct xgbe_hw_features {
1850 struct xgbe_prv_data {
1851 struct net_device *netdev;
1852 struct platform_device *pdev;
1853+ struct acpi_device *adev;
1854 struct device *dev;
1855
1856+ /* ACPI or DT flag */
1857+ unsigned int use_acpi;
1858+
1859 /* XGMAC/XPCS related mmio registers */
1860 void __iomem *xgmac_regs; /* XGMAC CSRs */
1861 void __iomem *xpcs_regs; /* XPCS MMD registers */
1862@@ -672,6 +677,7 @@ struct xgbe_prv_data {
1863 struct xgbe_desc_if desc_if;
1864
1865 /* AXI DMA settings */
1866+ unsigned int coherent;
1867 unsigned int axdomain;
1868 unsigned int arcache;
1869 unsigned int awcache;
1870@@ -707,6 +713,7 @@ struct xgbe_prv_data {
1871
1872 /* Rx coalescing settings */
1873 unsigned int rx_riwt;
1874+ unsigned int rx_usecs;
1875 unsigned int rx_frames;
1876
1877 /* Current Rx buffer size */
1878@@ -739,6 +746,7 @@ struct xgbe_prv_data {
1879 unsigned int phy_rx_pause;
1880
1881 /* Netdev related settings */
1882+ unsigned char mac_addr[ETH_ALEN];
1883 netdev_features_t netdev_features;
1884 struct napi_struct napi;
1885 struct xgbe_mmc_stats mmc_stats;
1886@@ -748,7 +756,9 @@ struct xgbe_prv_data {
1887
1888 /* Device clocks */
1889 struct clk *sysclk;
1890+ unsigned long sysclk_rate;
1891 struct clk *ptpclk;
1892+ unsigned long ptpclk_rate;
1893
1894 /* Timestamp support */
1895 spinlock_t tstamp_lock;
1896diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
1897index 903dc3d..34a75cb 100644
1898--- a/drivers/net/phy/amd-xgbe-phy.c
1899+++ b/drivers/net/phy/amd-xgbe-phy.c
1900@@ -60,6 +60,7 @@
1901 #include <linux/interrupt.h>
1902 #include <linux/init.h>
1903 #include <linux/delay.h>
1904+#include <linux/workqueue.h>
1905 #include <linux/netdevice.h>
1906 #include <linux/etherdevice.h>
1907 #include <linux/skbuff.h>
1908@@ -74,6 +75,10 @@
1909 #include <linux/of_platform.h>
1910 #include <linux/of_device.h>
1911 #include <linux/uaccess.h>
1912+#include <linux/bitops.h>
1913+#include <linux/property.h>
1914+#include <linux/acpi.h>
1915+#include <linux/jiffies.h>
1916
1917 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
1918 MODULE_LICENSE("Dual BSD/GPL");
1919@@ -84,22 +89,47 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
1920 #define XGBE_PHY_MASK 0xfffffff0
1921
1922 #define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set"
1923+#define XGBE_PHY_BLWC_PROPERTY "amd,serdes-blwc"
1924+#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
1925+#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
1926+#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
1927+#define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
1928+#define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
1929+
1930+#define XGBE_PHY_SPEEDS 3
1931+#define XGBE_PHY_SPEED_1000 0
1932+#define XGBE_PHY_SPEED_2500 1
1933+#define XGBE_PHY_SPEED_10000 2
1934+
1935+#define XGBE_AN_MS_TIMEOUT 500
1936
1937 #define XGBE_AN_INT_CMPLT 0x01
1938 #define XGBE_AN_INC_LINK 0x02
1939 #define XGBE_AN_PG_RCV 0x04
1940+#define XGBE_AN_INT_MASK 0x07
1941
1942 #define XNP_MCF_NULL_MESSAGE 0x001
1943-#define XNP_ACK_PROCESSED (1 << 12)
1944-#define XNP_MP_FORMATTED (1 << 13)
1945-#define XNP_NP_EXCHANGE (1 << 15)
1946+#define XNP_ACK_PROCESSED BIT(12)
1947+#define XNP_MP_FORMATTED BIT(13)
1948+#define XNP_NP_EXCHANGE BIT(15)
1949
1950 #define XGBE_PHY_RATECHANGE_COUNT 500
1951
1952+#define XGBE_PHY_KR_TRAINING_START 0x01
1953+#define XGBE_PHY_KR_TRAINING_ENABLE 0x02
1954+
1955+#define XGBE_PHY_FEC_ENABLE 0x01
1956+#define XGBE_PHY_FEC_FORWARD 0x02
1957+#define XGBE_PHY_FEC_MASK 0x03
1958+
1959 #ifndef MDIO_PMA_10GBR_PMD_CTRL
1960 #define MDIO_PMA_10GBR_PMD_CTRL 0x0096
1961 #endif
1962
1963+#ifndef MDIO_PMA_10GBR_FEC_ABILITY
1964+#define MDIO_PMA_10GBR_FEC_ABILITY 0x00aa
1965+#endif
1966+
1967 #ifndef MDIO_PMA_10GBR_FEC_CTRL
1968 #define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
1969 #endif
1970@@ -108,6 +138,10 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
1971 #define MDIO_AN_XNP 0x0016
1972 #endif
1973
1974+#ifndef MDIO_AN_LPX
1975+#define MDIO_AN_LPX 0x0019
1976+#endif
1977+
1978 #ifndef MDIO_AN_INTMASK
1979 #define MDIO_AN_INTMASK 0x8001
1980 #endif
1981@@ -116,18 +150,10 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
1982 #define MDIO_AN_INT 0x8002
1983 #endif
1984
1985-#ifndef MDIO_AN_KR_CTRL
1986-#define MDIO_AN_KR_CTRL 0x8003
1987-#endif
1988-
1989 #ifndef MDIO_CTRL1_SPEED1G
1990 #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
1991 #endif
1992
1993-#ifndef MDIO_KR_CTRL_PDETECT
1994-#define MDIO_KR_CTRL_PDETECT 0x01
1995-#endif
1996-
1997 /* SerDes integration register offsets */
1998 #define SIR0_KR_RT_1 0x002c
1999 #define SIR0_STATUS 0x0040
2000@@ -140,10 +166,10 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
2001 #define SIR0_STATUS_RX_READY_WIDTH 1
2002 #define SIR0_STATUS_TX_READY_INDEX 8
2003 #define SIR0_STATUS_TX_READY_WIDTH 1
2004+#define SIR1_SPEED_CDR_RATE_INDEX 12
2005+#define SIR1_SPEED_CDR_RATE_WIDTH 4
2006 #define SIR1_SPEED_DATARATE_INDEX 4
2007 #define SIR1_SPEED_DATARATE_WIDTH 2
2008-#define SIR1_SPEED_PI_SPD_SEL_INDEX 12
2009-#define SIR1_SPEED_PI_SPD_SEL_WIDTH 4
2010 #define SIR1_SPEED_PLLSEL_INDEX 3
2011 #define SIR1_SPEED_PLLSEL_WIDTH 1
2012 #define SIR1_SPEED_RATECHANGE_INDEX 6
2013@@ -153,42 +179,52 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
2014 #define SIR1_SPEED_WORDMODE_INDEX 0
2015 #define SIR1_SPEED_WORDMODE_WIDTH 3
2016
2017+#define SPEED_10000_BLWC 0
2018 #define SPEED_10000_CDR 0x7
2019 #define SPEED_10000_PLL 0x1
2020+#define SPEED_10000_PQ 0x12
2021 #define SPEED_10000_RATE 0x0
2022 #define SPEED_10000_TXAMP 0xa
2023 #define SPEED_10000_WORD 0x7
2024+#define SPEED_10000_DFE_TAP_CONFIG 0x1
2025+#define SPEED_10000_DFE_TAP_ENABLE 0x7f
2026
2027+#define SPEED_2500_BLWC 1
2028 #define SPEED_2500_CDR 0x2
2029 #define SPEED_2500_PLL 0x0
2030+#define SPEED_2500_PQ 0xa
2031 #define SPEED_2500_RATE 0x1
2032 #define SPEED_2500_TXAMP 0xf
2033 #define SPEED_2500_WORD 0x1
2034+#define SPEED_2500_DFE_TAP_CONFIG 0x3
2035+#define SPEED_2500_DFE_TAP_ENABLE 0x0
2036
2037+#define SPEED_1000_BLWC 1
2038 #define SPEED_1000_CDR 0x2
2039 #define SPEED_1000_PLL 0x0
2040+#define SPEED_1000_PQ 0xa
2041 #define SPEED_1000_RATE 0x3
2042 #define SPEED_1000_TXAMP 0xf
2043 #define SPEED_1000_WORD 0x1
2044+#define SPEED_1000_DFE_TAP_CONFIG 0x3
2045+#define SPEED_1000_DFE_TAP_ENABLE 0x0
2046
2047 /* SerDes RxTx register offsets */
2048+#define RXTX_REG6 0x0018
2049 #define RXTX_REG20 0x0050
2050+#define RXTX_REG22 0x0058
2051 #define RXTX_REG114 0x01c8
2052+#define RXTX_REG129 0x0204
2053
2054 /* SerDes RxTx register entry bit positions and sizes */
2055+#define RXTX_REG6_RESETB_RXD_INDEX 8
2056+#define RXTX_REG6_RESETB_RXD_WIDTH 1
2057 #define RXTX_REG20_BLWC_ENA_INDEX 2
2058 #define RXTX_REG20_BLWC_ENA_WIDTH 1
2059 #define RXTX_REG114_PQ_REG_INDEX 9
2060 #define RXTX_REG114_PQ_REG_WIDTH 7
2061-
2062-#define RXTX_10000_BLWC 0
2063-#define RXTX_10000_PQ 0x1e
2064-
2065-#define RXTX_2500_BLWC 1
2066-#define RXTX_2500_PQ 0xa
2067-
2068-#define RXTX_1000_BLWC 1
2069-#define RXTX_1000_PQ 0xa
2070+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
2071+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
2072
2073 /* Bit setting and getting macros
2074 * The get macro will extract the current bit field value from within
2075@@ -291,23 +327,56 @@ do { \
2076 XRXTX_IOWRITE((_priv), _reg, reg_val); \
2077 } while (0)
2078
2079+static const u32 amd_xgbe_phy_serdes_blwc[] = {
2080+ SPEED_1000_BLWC,
2081+ SPEED_2500_BLWC,
2082+ SPEED_10000_BLWC,
2083+};
2084+
2085+static const u32 amd_xgbe_phy_serdes_cdr_rate[] = {
2086+ SPEED_1000_CDR,
2087+ SPEED_2500_CDR,
2088+ SPEED_10000_CDR,
2089+};
2090+
2091+static const u32 amd_xgbe_phy_serdes_pq_skew[] = {
2092+ SPEED_1000_PQ,
2093+ SPEED_2500_PQ,
2094+ SPEED_10000_PQ,
2095+};
2096+
2097+static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
2098+ SPEED_1000_TXAMP,
2099+ SPEED_2500_TXAMP,
2100+ SPEED_10000_TXAMP,
2101+};
2102+
2103+static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
2104+ SPEED_1000_DFE_TAP_CONFIG,
2105+ SPEED_2500_DFE_TAP_CONFIG,
2106+ SPEED_10000_DFE_TAP_CONFIG,
2107+};
2108+
2109+static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
2110+ SPEED_1000_DFE_TAP_ENABLE,
2111+ SPEED_2500_DFE_TAP_ENABLE,
2112+ SPEED_10000_DFE_TAP_ENABLE,
2113+};
2114+
2115 enum amd_xgbe_phy_an {
2116 AMD_XGBE_AN_READY = 0,
2117- AMD_XGBE_AN_START,
2118- AMD_XGBE_AN_EVENT,
2119 AMD_XGBE_AN_PAGE_RECEIVED,
2120 AMD_XGBE_AN_INCOMPAT_LINK,
2121 AMD_XGBE_AN_COMPLETE,
2122 AMD_XGBE_AN_NO_LINK,
2123- AMD_XGBE_AN_EXIT,
2124 AMD_XGBE_AN_ERROR,
2125 };
2126
2127 enum amd_xgbe_phy_rx {
2128- AMD_XGBE_RX_READY = 0,
2129- AMD_XGBE_RX_BPA,
2130+ AMD_XGBE_RX_BPA = 0,
2131 AMD_XGBE_RX_XNP,
2132 AMD_XGBE_RX_COMPLETE,
2133+ AMD_XGBE_RX_ERROR,
2134 };
2135
2136 enum amd_xgbe_phy_mode {
2137@@ -316,12 +385,13 @@ enum amd_xgbe_phy_mode {
2138 };
2139
2140 enum amd_xgbe_phy_speedset {
2141- AMD_XGBE_PHY_SPEEDSET_1000_10000,
2142+ AMD_XGBE_PHY_SPEEDSET_1000_10000 = 0,
2143 AMD_XGBE_PHY_SPEEDSET_2500_10000,
2144 };
2145
2146 struct amd_xgbe_phy_priv {
2147 struct platform_device *pdev;
2148+ struct acpi_device *adev;
2149 struct device *dev;
2150
2151 struct phy_device *phydev;
2152@@ -336,10 +406,26 @@ struct amd_xgbe_phy_priv {
2153 void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
2154 void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
2155
2156- /* Maintain link status for re-starting auto-negotiation */
2157- unsigned int link;
2158+ int an_irq;
2159+ char an_irq_name[IFNAMSIZ + 32];
2160+ struct work_struct an_irq_work;
2161+ unsigned int an_irq_allocated;
2162+
2163 unsigned int speed_set;
2164
2165+ /* SerDes UEFI configurable settings.
2166+ * Switching between modes/speeds requires new values for some
2167+ * SerDes settings. The values can be supplied as device
2168+ * properties in array format. The first array entry is for
2169+ * 1GbE, second for 2.5GbE and third for 10GbE
2170+ */
2171+ u32 serdes_blwc[XGBE_PHY_SPEEDS];
2172+ u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
2173+ u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
2174+ u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
2175+ u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
2176+ u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
2177+
2178 /* Auto-negotiation state machine support */
2179 struct mutex an_mutex;
2180 enum amd_xgbe_phy_an an_result;
2181@@ -348,7 +434,12 @@ struct amd_xgbe_phy_priv {
2182 enum amd_xgbe_phy_rx kx_state;
2183 struct work_struct an_work;
2184 struct workqueue_struct *an_workqueue;
2185+ unsigned int an_supported;
2186 unsigned int parallel_detect;
2187+ unsigned int fec_ability;
2188+ unsigned long an_start;
2189+
2190+ unsigned int lpm_ctrl; /* CTRL1 for resume */
2191 };
2192
2193 static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
2194@@ -359,7 +450,7 @@ static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
2195 if (ret < 0)
2196 return ret;
2197
2198- ret |= 0x02;
2199+ ret |= XGBE_PHY_KR_TRAINING_ENABLE;
2200 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
2201
2202 return 0;
2203@@ -373,7 +464,7 @@ static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
2204 if (ret < 0)
2205 return ret;
2206
2207- ret &= ~0x02;
2208+ ret &= ~XGBE_PHY_KR_TRAINING_ENABLE;
2209 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
2210
2211 return 0;
2212@@ -423,11 +514,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
2213 status = XSIR0_IOREAD(priv, SIR0_STATUS);
2214 if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
2215 XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
2216- return;
2217+ goto rx_reset;
2218 }
2219
2220 netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
2221 status);
2222+
2223+rx_reset:
2224+ /* Perform Rx reset for the DFE changes */
2225+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
2226+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
2227 }
2228
2229 static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
2230@@ -466,12 +562,20 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
2231
2232 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
2233 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
2234- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_10000_TXAMP);
2235 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
2236- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_10000_CDR);
2237
2238- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_10000_BLWC);
2239- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_10000_PQ);
2240+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
2241+ priv->serdes_cdr_rate[XGBE_PHY_SPEED_10000]);
2242+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
2243+ priv->serdes_tx_amp[XGBE_PHY_SPEED_10000]);
2244+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
2245+ priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
2246+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
2247+ priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
2248+ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
2249+ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
2250+ XRXTX_IOWRITE(priv, RXTX_REG22,
2251+ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
2252
2253 amd_xgbe_phy_serdes_complete_ratechange(phydev);
2254
2255@@ -514,12 +618,20 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
2256
2257 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
2258 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
2259- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_2500_TXAMP);
2260 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
2261- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_2500_CDR);
2262
2263- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_2500_BLWC);
2264- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_2500_PQ);
2265+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
2266+ priv->serdes_cdr_rate[XGBE_PHY_SPEED_2500]);
2267+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
2268+ priv->serdes_tx_amp[XGBE_PHY_SPEED_2500]);
2269+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
2270+ priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
2271+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
2272+ priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
2273+ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
2274+ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
2275+ XRXTX_IOWRITE(priv, RXTX_REG22,
2276+ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
2277
2278 amd_xgbe_phy_serdes_complete_ratechange(phydev);
2279
2280@@ -562,12 +674,20 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
2281
2282 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
2283 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
2284- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_1000_TXAMP);
2285 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
2286- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_1000_CDR);
2287
2288- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_1000_BLWC);
2289- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_1000_PQ);
2290+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
2291+ priv->serdes_cdr_rate[XGBE_PHY_SPEED_1000]);
2292+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
2293+ priv->serdes_tx_amp[XGBE_PHY_SPEED_1000]);
2294+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
2295+ priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
2296+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
2297+ priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
2298+ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
2299+ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
2300+ XRXTX_IOWRITE(priv, RXTX_REG22,
2301+ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
2302
2303 amd_xgbe_phy_serdes_complete_ratechange(phydev);
2304
2305@@ -635,6 +755,77 @@ static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
2306 return ret;
2307 }
2308
2309+static bool amd_xgbe_phy_use_xgmii_mode(struct phy_device *phydev)
2310+{
2311+ if (phydev->autoneg == AUTONEG_ENABLE) {
2312+ if (phydev->advertising & ADVERTISED_10000baseKR_Full)
2313+ return true;
2314+ } else {
2315+ if (phydev->speed == SPEED_10000)
2316+ return true;
2317+ }
2318+
2319+ return false;
2320+}
2321+
2322+static bool amd_xgbe_phy_use_gmii_2500_mode(struct phy_device *phydev)
2323+{
2324+ if (phydev->autoneg == AUTONEG_ENABLE) {
2325+ if (phydev->advertising & ADVERTISED_2500baseX_Full)
2326+ return true;
2327+ } else {
2328+ if (phydev->speed == SPEED_2500)
2329+ return true;
2330+ }
2331+
2332+ return false;
2333+}
2334+
2335+static bool amd_xgbe_phy_use_gmii_mode(struct phy_device *phydev)
2336+{
2337+ if (phydev->autoneg == AUTONEG_ENABLE) {
2338+ if (phydev->advertising & ADVERTISED_1000baseKX_Full)
2339+ return true;
2340+ } else {
2341+ if (phydev->speed == SPEED_1000)
2342+ return true;
2343+ }
2344+
2345+ return false;
2346+}
2347+
2348+static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
2349+ bool restart)
2350+{
2351+ int ret;
2352+
2353+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
2354+ if (ret < 0)
2355+ return ret;
2356+
2357+ ret &= ~MDIO_AN_CTRL1_ENABLE;
2358+
2359+ if (enable)
2360+ ret |= MDIO_AN_CTRL1_ENABLE;
2361+
2362+ if (restart)
2363+ ret |= MDIO_AN_CTRL1_RESTART;
2364+
2365+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
2366+
2367+ return 0;
2368+}
2369+
2370+static int amd_xgbe_phy_restart_an(struct phy_device *phydev)
2371+{
2372+ return amd_xgbe_phy_set_an(phydev, true, true);
2373+}
2374+
2375+static int amd_xgbe_phy_disable_an(struct phy_device *phydev)
2376+{
2377+ return amd_xgbe_phy_set_an(phydev, false, false);
2378+}
2379+
2380 static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
2381 enum amd_xgbe_phy_rx *state)
2382 {
2383@@ -645,7 +836,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
2384
2385 /* If we're not in KR mode then we're done */
2386 if (!amd_xgbe_phy_in_kr_mode(phydev))
2387- return AMD_XGBE_AN_EVENT;
2388+ return AMD_XGBE_AN_PAGE_RECEIVED;
2389
2390 /* Enable/Disable FEC */
2391 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
2392@@ -660,10 +851,9 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
2393 if (ret < 0)
2394 return AMD_XGBE_AN_ERROR;
2395
2396+ ret &= ~XGBE_PHY_FEC_MASK;
2397 if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
2398- ret |= 0x01;
2399- else
2400- ret &= ~0x01;
2401+ ret |= priv->fec_ability;
2402
2403 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
2404
2405@@ -672,14 +862,17 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
2406 if (ret < 0)
2407 return AMD_XGBE_AN_ERROR;
2408
2409- XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
2410+ if (ret & XGBE_PHY_KR_TRAINING_ENABLE) {
2411+ XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
2412
2413- ret |= 0x01;
2414- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
2415+ ret |= XGBE_PHY_KR_TRAINING_START;
2416+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
2417+ ret);
2418
2419- XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
2420+ XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
2421+ }
2422
2423- return AMD_XGBE_AN_EVENT;
2424+ return AMD_XGBE_AN_PAGE_RECEIVED;
2425 }
2426
2427 static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
2428@@ -696,7 +889,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
2429 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
2430 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
2431
2432- return AMD_XGBE_AN_EVENT;
2433+ return AMD_XGBE_AN_PAGE_RECEIVED;
2434 }
2435
2436 static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
2437@@ -735,11 +928,11 @@ static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
2438 int ad_reg, lp_reg;
2439
2440 /* Check Extended Next Page support */
2441- ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
2442+ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP);
2443 if (ad_reg < 0)
2444 return AMD_XGBE_AN_ERROR;
2445
2446- lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
2447+ lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPX);
2448 if (lp_reg < 0)
2449 return AMD_XGBE_AN_ERROR;
2450
2451@@ -748,226 +941,271 @@ static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
2452 amd_xgbe_an_tx_training(phydev, state);
2453 }
2454
2455-static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev)
2456+static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
2457 {
2458 struct amd_xgbe_phy_priv *priv = phydev->priv;
2459+ enum amd_xgbe_phy_rx *state;
2460+ unsigned long an_timeout;
2461 int ret;
2462
2463- /* Be sure we aren't looping trying to negotiate */
2464- if (amd_xgbe_phy_in_kr_mode(phydev)) {
2465- if (priv->kr_state != AMD_XGBE_RX_READY)
2466- return AMD_XGBE_AN_NO_LINK;
2467- priv->kr_state = AMD_XGBE_RX_BPA;
2468+ if (!priv->an_start) {
2469+ priv->an_start = jiffies;
2470 } else {
2471- if (priv->kx_state != AMD_XGBE_RX_READY)
2472- return AMD_XGBE_AN_NO_LINK;
2473- priv->kx_state = AMD_XGBE_RX_BPA;
2474+ an_timeout = priv->an_start +
2475+ msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
2476+ if (time_after(jiffies, an_timeout)) {
2477+ /* Auto-negotiation timed out, reset state */
2478+ priv->kr_state = AMD_XGBE_RX_BPA;
2479+ priv->kx_state = AMD_XGBE_RX_BPA;
2480+
2481+ priv->an_start = jiffies;
2482+ }
2483 }
2484
2485- /* Set up Advertisement register 3 first */
2486- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
2487- if (ret < 0)
2488- return AMD_XGBE_AN_ERROR;
2489-
2490- if (phydev->supported & SUPPORTED_10000baseR_FEC)
2491- ret |= 0xc000;
2492- else
2493- ret &= ~0xc000;
2494-
2495- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
2496+ state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
2497+ : &priv->kx_state;
2498
2499- /* Set up Advertisement register 2 next */
2500- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
2501- if (ret < 0)
2502- return AMD_XGBE_AN_ERROR;
2503+ switch (*state) {
2504+ case AMD_XGBE_RX_BPA:
2505+ ret = amd_xgbe_an_rx_bpa(phydev, state);
2506+ break;
2507
2508- if (phydev->supported & SUPPORTED_10000baseKR_Full)
2509- ret |= 0x80;
2510- else
2511- ret &= ~0x80;
2512+ case AMD_XGBE_RX_XNP:
2513+ ret = amd_xgbe_an_rx_xnp(phydev, state);
2514+ break;
2515
2516- if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
2517- (phydev->supported & SUPPORTED_2500baseX_Full))
2518- ret |= 0x20;
2519- else
2520- ret &= ~0x20;
2521+ default:
2522+ ret = AMD_XGBE_AN_ERROR;
2523+ }
2524
2525- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
2526+ return ret;
2527+}
2528
2529- /* Set up Advertisement register 1 last */
2530- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
2531- if (ret < 0)
2532- return AMD_XGBE_AN_ERROR;
2533+static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
2534+{
2535+ struct amd_xgbe_phy_priv *priv = phydev->priv;
2536+ int ret;
2537
2538- if (phydev->supported & SUPPORTED_Pause)
2539- ret |= 0x400;
2540- else
2541- ret &= ~0x400;
2542+ /* Be sure we aren't looping trying to negotiate */
2543+ if (amd_xgbe_phy_in_kr_mode(phydev)) {
2544+ priv->kr_state = AMD_XGBE_RX_ERROR;
2545
2546- if (phydev->supported & SUPPORTED_Asym_Pause)
2547- ret |= 0x800;
2548- else
2549- ret &= ~0x800;
2550+ if (!(phydev->advertising & SUPPORTED_1000baseKX_Full) &&
2551+ !(phydev->advertising & SUPPORTED_2500baseX_Full))
2552+ return AMD_XGBE_AN_NO_LINK;
2553
2554- /* We don't intend to perform XNP */
2555- ret &= ~XNP_NP_EXCHANGE;
2556+ if (priv->kx_state != AMD_XGBE_RX_BPA)
2557+ return AMD_XGBE_AN_NO_LINK;
2558+ } else {
2559+ priv->kx_state = AMD_XGBE_RX_ERROR;
2560
2561- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
2562+ if (!(phydev->advertising & SUPPORTED_10000baseKR_Full))
2563+ return AMD_XGBE_AN_NO_LINK;
2564
2565- /* Enable and start auto-negotiation */
2566- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
2567+ if (priv->kr_state != AMD_XGBE_RX_BPA)
2568+ return AMD_XGBE_AN_NO_LINK;
2569+ }
2570
2571- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL);
2572- if (ret < 0)
2573+ ret = amd_xgbe_phy_disable_an(phydev);
2574+ if (ret)
2575 return AMD_XGBE_AN_ERROR;
2576
2577- ret |= MDIO_KR_CTRL_PDETECT;
2578- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL, ret);
2579-
2580- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
2581- if (ret < 0)
2582+ ret = amd_xgbe_phy_switch_mode(phydev);
2583+ if (ret)
2584 return AMD_XGBE_AN_ERROR;
2585
2586- ret |= MDIO_AN_CTRL1_ENABLE;
2587- ret |= MDIO_AN_CTRL1_RESTART;
2588- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
2589+ ret = amd_xgbe_phy_restart_an(phydev);
2590+ if (ret)
2591+ return AMD_XGBE_AN_ERROR;
2592
2593- return AMD_XGBE_AN_EVENT;
2594+ return AMD_XGBE_AN_INCOMPAT_LINK;
2595 }
2596
2597-static enum amd_xgbe_phy_an amd_xgbe_an_event(struct phy_device *phydev)
2598+static irqreturn_t amd_xgbe_an_isr(int irq, void *data)
2599 {
2600- enum amd_xgbe_phy_an new_state;
2601- int ret;
2602+ struct amd_xgbe_phy_priv *priv = (struct amd_xgbe_phy_priv *)data;
2603
2604- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
2605- if (ret < 0)
2606- return AMD_XGBE_AN_ERROR;
2607+ /* Interrupt reason must be read and cleared outside of IRQ context */
2608+ disable_irq_nosync(priv->an_irq);
2609
2610- new_state = AMD_XGBE_AN_EVENT;
2611- if (ret & XGBE_AN_PG_RCV)
2612- new_state = AMD_XGBE_AN_PAGE_RECEIVED;
2613- else if (ret & XGBE_AN_INC_LINK)
2614- new_state = AMD_XGBE_AN_INCOMPAT_LINK;
2615- else if (ret & XGBE_AN_INT_CMPLT)
2616- new_state = AMD_XGBE_AN_COMPLETE;
2617+ queue_work(priv->an_workqueue, &priv->an_irq_work);
2618
2619- if (new_state != AMD_XGBE_AN_EVENT)
2620- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
2621+ return IRQ_HANDLED;
2622+}
2623+
2624+static void amd_xgbe_an_irq_work(struct work_struct *work)
2625+{
2626+ struct amd_xgbe_phy_priv *priv = container_of(work,
2627+ struct amd_xgbe_phy_priv,
2628+ an_irq_work);
2629
2630- return new_state;
2631+ /* Avoid a race between enabling the IRQ and exiting the work by
2632+ * waiting for the work to finish and then queueing it
2633+ */
2634+ flush_work(&priv->an_work);
2635+ queue_work(priv->an_workqueue, &priv->an_work);
2636 }
2637
2638-static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
2639+static void amd_xgbe_an_state_machine(struct work_struct *work)
2640 {
2641- struct amd_xgbe_phy_priv *priv = phydev->priv;
2642- enum amd_xgbe_phy_rx *state;
2643- int ret;
2644+ struct amd_xgbe_phy_priv *priv = container_of(work,
2645+ struct amd_xgbe_phy_priv,
2646+ an_work);
2647+ struct phy_device *phydev = priv->phydev;
2648+ enum amd_xgbe_phy_an cur_state = priv->an_state;
2649+ int int_reg, int_mask;
2650
2651- state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
2652- : &priv->kx_state;
2653+ mutex_lock(&priv->an_mutex);
2654
2655- switch (*state) {
2656- case AMD_XGBE_RX_BPA:
2657- ret = amd_xgbe_an_rx_bpa(phydev, state);
2658+ /* Read the interrupt */
2659+ int_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
2660+ if (!int_reg)
2661+ goto out;
2662+
2663+next_int:
2664+ if (int_reg < 0) {
2665+ priv->an_state = AMD_XGBE_AN_ERROR;
2666+ int_mask = XGBE_AN_INT_MASK;
2667+ } else if (int_reg & XGBE_AN_PG_RCV) {
2668+ priv->an_state = AMD_XGBE_AN_PAGE_RECEIVED;
2669+ int_mask = XGBE_AN_PG_RCV;
2670+ } else if (int_reg & XGBE_AN_INC_LINK) {
2671+ priv->an_state = AMD_XGBE_AN_INCOMPAT_LINK;
2672+ int_mask = XGBE_AN_INC_LINK;
2673+ } else if (int_reg & XGBE_AN_INT_CMPLT) {
2674+ priv->an_state = AMD_XGBE_AN_COMPLETE;
2675+ int_mask = XGBE_AN_INT_CMPLT;
2676+ } else {
2677+ priv->an_state = AMD_XGBE_AN_ERROR;
2678+ int_mask = 0;
2679+ }
2680+
2681+ /* Clear the interrupt to be processed */
2682+ int_reg &= ~int_mask;
2683+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
2684+
2685+ priv->an_result = priv->an_state;
2686+
2687+again:
2688+ cur_state = priv->an_state;
2689+
2690+ switch (priv->an_state) {
2691+ case AMD_XGBE_AN_READY:
2692+ priv->an_supported = 0;
2693 break;
2694
2695- case AMD_XGBE_RX_XNP:
2696- ret = amd_xgbe_an_rx_xnp(phydev, state);
2697+ case AMD_XGBE_AN_PAGE_RECEIVED:
2698+ priv->an_state = amd_xgbe_an_page_received(phydev);
2699+ priv->an_supported++;
2700+ break;
2701+
2702+ case AMD_XGBE_AN_INCOMPAT_LINK:
2703+ priv->an_supported = 0;
2704+ priv->parallel_detect = 0;
2705+ priv->an_state = amd_xgbe_an_incompat_link(phydev);
2706+ break;
2707+
2708+ case AMD_XGBE_AN_COMPLETE:
2709+ priv->parallel_detect = priv->an_supported ? 0 : 1;
2710+ netdev_dbg(phydev->attached_dev, "%s successful\n",
2711+ priv->an_supported ? "Auto negotiation"
2712+ : "Parallel detection");
2713+ break;
2714+
2715+ case AMD_XGBE_AN_NO_LINK:
2716 break;
2717
2718 default:
2719- ret = AMD_XGBE_AN_ERROR;
2720+ priv->an_state = AMD_XGBE_AN_ERROR;
2721 }
2722
2723- return ret;
2724-}
2725+ if (priv->an_state == AMD_XGBE_AN_NO_LINK) {
2726+ int_reg = 0;
2727+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
2728+ } else if (priv->an_state == AMD_XGBE_AN_ERROR) {
2729+ netdev_err(phydev->attached_dev,
2730+ "error during auto-negotiation, state=%u\n",
2731+ cur_state);
2732
2733-static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
2734-{
2735- int ret;
2736+ int_reg = 0;
2737+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
2738+ }
2739
2740- ret = amd_xgbe_phy_switch_mode(phydev);
2741- if (ret)
2742- return AMD_XGBE_AN_ERROR;
2743+ if (priv->an_state >= AMD_XGBE_AN_COMPLETE) {
2744+ priv->an_result = priv->an_state;
2745+ priv->an_state = AMD_XGBE_AN_READY;
2746+ priv->kr_state = AMD_XGBE_RX_BPA;
2747+ priv->kx_state = AMD_XGBE_RX_BPA;
2748+ priv->an_start = 0;
2749+ }
2750
2751- return AMD_XGBE_AN_START;
2752-}
2753+ if (cur_state != priv->an_state)
2754+ goto again;
2755
2756-static void amd_xgbe_an_state_machine(struct work_struct *work)
2757-{
2758- struct amd_xgbe_phy_priv *priv = container_of(work,
2759- struct amd_xgbe_phy_priv,
2760- an_work);
2761- struct phy_device *phydev = priv->phydev;
2762- enum amd_xgbe_phy_an cur_state;
2763- int sleep;
2764- unsigned int an_supported = 0;
2765+ if (int_reg)
2766+ goto next_int;
2767
2768- /* Start in KX mode */
2769- if (amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX))
2770- priv->an_state = AMD_XGBE_AN_ERROR;
2771+out:
2772+ enable_irq(priv->an_irq);
2773
2774- while (1) {
2775- mutex_lock(&priv->an_mutex);
2776+ mutex_unlock(&priv->an_mutex);
2777+}
2778
2779- cur_state = priv->an_state;
2780+static int amd_xgbe_an_init(struct phy_device *phydev)
2781+{
2782+ int ret;
2783
2784- switch (priv->an_state) {
2785- case AMD_XGBE_AN_START:
2786- an_supported = 0;
2787- priv->parallel_detect = 0;
2788- priv->an_state = amd_xgbe_an_start(phydev);
2789- break;
2790+ /* Set up Advertisement register 3 first */
2791+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
2792+ if (ret < 0)
2793+ return ret;
2794
2795- case AMD_XGBE_AN_EVENT:
2796- priv->an_state = amd_xgbe_an_event(phydev);
2797- break;
2798+ if (phydev->advertising & SUPPORTED_10000baseR_FEC)
2799+ ret |= 0xc000;
2800+ else
2801+ ret &= ~0xc000;
2802
2803- case AMD_XGBE_AN_PAGE_RECEIVED:
2804- priv->an_state = amd_xgbe_an_page_received(phydev);
2805- an_supported++;
2806- break;
2807+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
2808
2809- case AMD_XGBE_AN_INCOMPAT_LINK:
2810- priv->an_state = amd_xgbe_an_incompat_link(phydev);
2811- break;
2812+ /* Set up Advertisement register 2 next */
2813+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
2814+ if (ret < 0)
2815+ return ret;
2816
2817- case AMD_XGBE_AN_COMPLETE:
2818- priv->parallel_detect = an_supported ? 0 : 1;
2819- netdev_info(phydev->attached_dev, "%s successful\n",
2820- an_supported ? "Auto negotiation"
2821- : "Parallel detection");
2822- /* fall through */
2823+ if (phydev->advertising & SUPPORTED_10000baseKR_Full)
2824+ ret |= 0x80;
2825+ else
2826+ ret &= ~0x80;
2827
2828- case AMD_XGBE_AN_NO_LINK:
2829- case AMD_XGBE_AN_EXIT:
2830- goto exit_unlock;
2831+ if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
2832+ (phydev->advertising & SUPPORTED_2500baseX_Full))
2833+ ret |= 0x20;
2834+ else
2835+ ret &= ~0x20;
2836
2837- default:
2838- priv->an_state = AMD_XGBE_AN_ERROR;
2839- }
2840+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
2841
2842- if (priv->an_state == AMD_XGBE_AN_ERROR) {
2843- netdev_err(phydev->attached_dev,
2844- "error during auto-negotiation, state=%u\n",
2845- cur_state);
2846- goto exit_unlock;
2847- }
2848+ /* Set up Advertisement register 1 last */
2849+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
2850+ if (ret < 0)
2851+ return ret;
2852
2853- sleep = (priv->an_state == AMD_XGBE_AN_EVENT) ? 1 : 0;
2854+ if (phydev->advertising & SUPPORTED_Pause)
2855+ ret |= 0x400;
2856+ else
2857+ ret &= ~0x400;
2858
2859- mutex_unlock(&priv->an_mutex);
2860+ if (phydev->advertising & SUPPORTED_Asym_Pause)
2861+ ret |= 0x800;
2862+ else
2863+ ret &= ~0x800;
2864
2865- if (sleep)
2866- usleep_range(20, 50);
2867- }
2868+ /* We don't intend to perform XNP */
2869+ ret &= ~XNP_NP_EXCHANGE;
2870
2871-exit_unlock:
2872- priv->an_result = priv->an_state;
2873- priv->an_state = AMD_XGBE_AN_READY;
2874+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
2875
2876- mutex_unlock(&priv->an_mutex);
2877+ return 0;
2878 }
2879
2880 static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
2881@@ -992,33 +1230,68 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
2882 if (ret & MDIO_CTRL1_RESET)
2883 return -ETIMEDOUT;
2884
2885- /* Make sure the XPCS and SerDes are in compatible states */
2886- return amd_xgbe_phy_xgmii_mode(phydev);
2887+ /* Disable auto-negotiation for now */
2888+ ret = amd_xgbe_phy_disable_an(phydev);
2889+ if (ret < 0)
2890+ return ret;
2891+
2892+ /* Clear auto-negotiation interrupts */
2893+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
2894+
2895+ return 0;
2896 }
2897
2898 static int amd_xgbe_phy_config_init(struct phy_device *phydev)
2899 {
2900 struct amd_xgbe_phy_priv *priv = phydev->priv;
2901+ struct net_device *netdev = phydev->attached_dev;
2902+ int ret;
2903
2904- /* Initialize supported features */
2905- phydev->supported = SUPPORTED_Autoneg;
2906- phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
2907- phydev->supported |= SUPPORTED_Backplane;
2908- phydev->supported |= SUPPORTED_10000baseKR_Full |
2909- SUPPORTED_10000baseR_FEC;
2910- switch (priv->speed_set) {
2911- case AMD_XGBE_PHY_SPEEDSET_1000_10000:
2912- phydev->supported |= SUPPORTED_1000baseKX_Full;
2913- break;
2914- case AMD_XGBE_PHY_SPEEDSET_2500_10000:
2915- phydev->supported |= SUPPORTED_2500baseX_Full;
2916- break;
2917+ if (!priv->an_irq_allocated) {
2918+ /* Allocate the auto-negotiation workqueue and interrupt */
2919+ snprintf(priv->an_irq_name, sizeof(priv->an_irq_name) - 1,
2920+ "%s-pcs", netdev_name(netdev));
2921+
2922+ priv->an_workqueue =
2923+ create_singlethread_workqueue(priv->an_irq_name);
2924+ if (!priv->an_workqueue) {
2925+ netdev_err(netdev, "phy workqueue creation failed\n");
2926+ return -ENOMEM;
2927+ }
2928+
2929+ ret = devm_request_irq(priv->dev, priv->an_irq,
2930+ amd_xgbe_an_isr, 0, priv->an_irq_name,
2931+ priv);
2932+ if (ret) {
2933+ netdev_err(netdev, "phy irq request failed\n");
2934+ destroy_workqueue(priv->an_workqueue);
2935+ return ret;
2936+ }
2937+
2938+ priv->an_irq_allocated = 1;
2939 }
2940- phydev->advertising = phydev->supported;
2941
2942- /* Turn off and clear interrupts */
2943- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
2944- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
2945+ /* Set initial mode - call the mode setting routines
2946+ * directly to insure we are properly configured
2947+ */
2948+ if (amd_xgbe_phy_use_xgmii_mode(phydev))
2949+ ret = amd_xgbe_phy_xgmii_mode(phydev);
2950+ else if (amd_xgbe_phy_use_gmii_mode(phydev))
2951+ ret = amd_xgbe_phy_gmii_mode(phydev);
2952+ else if (amd_xgbe_phy_use_gmii_2500_mode(phydev))
2953+ ret = amd_xgbe_phy_gmii_2500_mode(phydev);
2954+ else
2955+ ret = -EINVAL;
2956+ if (ret < 0)
2957+ return ret;
2958+
2959+ /* Set up advertisement registers based on current settings */
2960+ ret = amd_xgbe_an_init(phydev);
2961+ if (ret)
2962+ return ret;
2963+
2964+ /* Enable auto-negotiation interrupts */
2965+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
2966
2967 return 0;
2968 }
2969@@ -1028,25 +1301,19 @@ static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
2970 int ret;
2971
2972 /* Disable auto-negotiation */
2973- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
2974+ ret = amd_xgbe_phy_disable_an(phydev);
2975 if (ret < 0)
2976 return ret;
2977
2978- ret &= ~MDIO_AN_CTRL1_ENABLE;
2979- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
2980-
2981 /* Validate/Set specified speed */
2982 switch (phydev->speed) {
2983 case SPEED_10000:
2984- ret = amd_xgbe_phy_xgmii_mode(phydev);
2985+ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
2986 break;
2987
2988 case SPEED_2500:
2989- ret = amd_xgbe_phy_gmii_2500_mode(phydev);
2990- break;
2991-
2992 case SPEED_1000:
2993- ret = amd_xgbe_phy_gmii_mode(phydev);
2994+ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
2995 break;
2996
2997 default:
2998@@ -1066,10 +1333,11 @@ static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
2999 return 0;
3000 }
3001
3002-static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
3003+static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
3004 {
3005 struct amd_xgbe_phy_priv *priv = phydev->priv;
3006 u32 mmd_mask = phydev->c45_ids.devices_in_package;
3007+ int ret;
3008
3009 if (phydev->autoneg != AUTONEG_ENABLE)
3010 return amd_xgbe_phy_setup_forced(phydev);
3011@@ -1078,56 +1346,79 @@ static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
3012 if (!(mmd_mask & MDIO_DEVS_AN))
3013 return -EINVAL;
3014
3015- /* Start/Restart the auto-negotiation state machine */
3016- mutex_lock(&priv->an_mutex);
3017+ /* Disable auto-negotiation interrupt */
3018+ disable_irq(priv->an_irq);
3019+
3020+ /* Start auto-negotiation in a supported mode */
3021+ if (phydev->advertising & SUPPORTED_10000baseKR_Full)
3022+ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
3023+ else if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
3024+ (phydev->advertising & SUPPORTED_2500baseX_Full))
3025+ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
3026+ else
3027+ ret = -EINVAL;
3028+ if (ret < 0) {
3029+ enable_irq(priv->an_irq);
3030+ return ret;
3031+ }
3032+
3033+ /* Disable and stop any in progress auto-negotiation */
3034+ ret = amd_xgbe_phy_disable_an(phydev);
3035+ if (ret < 0)
3036+ return ret;
3037+
3038+ /* Clear any auto-negotitation interrupts */
3039+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
3040+
3041 priv->an_result = AMD_XGBE_AN_READY;
3042- priv->an_state = AMD_XGBE_AN_START;
3043- priv->kr_state = AMD_XGBE_RX_READY;
3044- priv->kx_state = AMD_XGBE_RX_READY;
3045- mutex_unlock(&priv->an_mutex);
3046+ priv->an_state = AMD_XGBE_AN_READY;
3047+ priv->kr_state = AMD_XGBE_RX_BPA;
3048+ priv->kx_state = AMD_XGBE_RX_BPA;
3049
3050- queue_work(priv->an_workqueue, &priv->an_work);
3051+ /* Re-enable auto-negotiation interrupt */
3052+ enable_irq(priv->an_irq);
3053
3054- return 0;
3055+ /* Set up advertisement registers based on current settings */
3056+ ret = amd_xgbe_an_init(phydev);
3057+ if (ret)
3058+ return ret;
3059+
3060+ /* Enable and start auto-negotiation */
3061+ return amd_xgbe_phy_restart_an(phydev);
3062 }
3063
3064-static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
3065+static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
3066 {
3067 struct amd_xgbe_phy_priv *priv = phydev->priv;
3068- enum amd_xgbe_phy_an state;
3069+ int ret;
3070
3071 mutex_lock(&priv->an_mutex);
3072- state = priv->an_result;
3073+
3074+ ret = __amd_xgbe_phy_config_aneg(phydev);
3075+
3076 mutex_unlock(&priv->an_mutex);
3077
3078- return (state == AMD_XGBE_AN_COMPLETE);
3079+ return ret;
3080+}
3081+
3082+static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
3083+{
3084+ struct amd_xgbe_phy_priv *priv = phydev->priv;
3085+
3086+ return (priv->an_result == AMD_XGBE_AN_COMPLETE);
3087 }
3088
3089 static int amd_xgbe_phy_update_link(struct phy_device *phydev)
3090 {
3091 struct amd_xgbe_phy_priv *priv = phydev->priv;
3092- enum amd_xgbe_phy_an state;
3093- unsigned int check_again, autoneg;
3094 int ret;
3095
3096 /* If we're doing auto-negotiation don't report link down */
3097- mutex_lock(&priv->an_mutex);
3098- state = priv->an_state;
3099- mutex_unlock(&priv->an_mutex);
3100-
3101- if (state != AMD_XGBE_AN_READY) {
3102+ if (priv->an_state != AMD_XGBE_AN_READY) {
3103 phydev->link = 1;
3104 return 0;
3105 }
3106
3107- /* Since the device can be in the wrong mode when a link is
3108- * (re-)established (cable connected after the interface is
3109- * up, etc.), the link status may report no link. If there
3110- * is no link, try switching modes and checking the status
3111- * again if auto negotiation is enabled.
3112- */
3113- check_again = (phydev->autoneg == AUTONEG_ENABLE) ? 1 : 0;
3114-again:
3115 /* Link status is latched low, so read once to clear
3116 * and then read again to get current state
3117 */
3118@@ -1141,25 +1432,6 @@ again:
3119
3120 phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
3121
3122- if (!phydev->link) {
3123- if (check_again) {
3124- ret = amd_xgbe_phy_switch_mode(phydev);
3125- if (ret < 0)
3126- return ret;
3127- check_again = 0;
3128- goto again;
3129- }
3130- }
3131-
3132- autoneg = (phydev->link && !priv->link) ? 1 : 0;
3133- priv->link = phydev->link;
3134- if (autoneg) {
3135- /* Link is (back) up, re-start auto-negotiation */
3136- ret = amd_xgbe_phy_config_aneg(phydev);
3137- if (ret < 0)
3138- return ret;
3139- }
3140-
3141 return 0;
3142 }
3143
3144@@ -1249,6 +1521,7 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev)
3145
3146 static int amd_xgbe_phy_suspend(struct phy_device *phydev)
3147 {
3148+ struct amd_xgbe_phy_priv *priv = phydev->priv;
3149 int ret;
3150
3151 mutex_lock(&phydev->lock);
3152@@ -1257,6 +1530,8 @@ static int amd_xgbe_phy_suspend(struct phy_device *phydev)
3153 if (ret < 0)
3154 goto unlock;
3155
3156+ priv->lpm_ctrl = ret;
3157+
3158 ret |= MDIO_CTRL1_LPOWER;
3159 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
3160
3161@@ -1270,69 +1545,106 @@ unlock:
3162
3163 static int amd_xgbe_phy_resume(struct phy_device *phydev)
3164 {
3165- int ret;
3166+ struct amd_xgbe_phy_priv *priv = phydev->priv;
3167
3168 mutex_lock(&phydev->lock);
3169
3170- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
3171- if (ret < 0)
3172- goto unlock;
3173+ priv->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
3174+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, priv->lpm_ctrl);
3175
3176- ret &= ~MDIO_CTRL1_LPOWER;
3177- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
3178+ mutex_unlock(&phydev->lock);
3179
3180- ret = 0;
3181+ return 0;
3182+}
3183
3184-unlock:
3185- mutex_unlock(&phydev->lock);
3186+static unsigned int amd_xgbe_phy_resource_count(struct platform_device *pdev,
3187+ unsigned int type)
3188+{
3189+ unsigned int count;
3190+ int i;
3191
3192- return ret;
3193+ for (i = 0, count = 0; i < pdev->num_resources; i++) {
3194+ struct resource *r = &pdev->resource[i];
3195+
3196+ if (type == resource_type(r))
3197+ count++;
3198+ }
3199+
3200+ return count;
3201 }
3202
3203 static int amd_xgbe_phy_probe(struct phy_device *phydev)
3204 {
3205 struct amd_xgbe_phy_priv *priv;
3206- struct platform_device *pdev;
3207- struct device *dev;
3208- char *wq_name;
3209- const __be32 *property;
3210- unsigned int speed_set;
3211+ struct platform_device *phy_pdev;
3212+ struct device *dev, *phy_dev;
3213+ unsigned int phy_resnum, phy_irqnum;
3214 int ret;
3215
3216- if (!phydev->dev.of_node)
3217+ if (!phydev->bus || !phydev->bus->parent)
3218 return -EINVAL;
3219
3220- pdev = of_find_device_by_node(phydev->dev.of_node);
3221- if (!pdev)
3222- return -EINVAL;
3223- dev = &pdev->dev;
3224-
3225- wq_name = kasprintf(GFP_KERNEL, "%s-amd-xgbe-phy", phydev->bus->name);
3226- if (!wq_name) {
3227- ret = -ENOMEM;
3228- goto err_pdev;
3229- }
3230+ dev = phydev->bus->parent;
3231
3232 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
3233- if (!priv) {
3234- ret = -ENOMEM;
3235- goto err_name;
3236- }
3237+ if (!priv)
3238+ return -ENOMEM;
3239
3240- priv->pdev = pdev;
3241+ priv->pdev = to_platform_device(dev);
3242+ priv->adev = ACPI_COMPANION(dev);
3243 priv->dev = dev;
3244 priv->phydev = phydev;
3245+ mutex_init(&priv->an_mutex);
3246+ INIT_WORK(&priv->an_irq_work, amd_xgbe_an_irq_work);
3247+ INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
3248+
3249+ if (!priv->adev || acpi_disabled) {
3250+ struct device_node *bus_node;
3251+ struct device_node *phy_node;
3252+
3253+ bus_node = priv->dev->of_node;
3254+ phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
3255+ if (!phy_node) {
3256+ dev_err(dev, "unable to parse phy-handle\n");
3257+ ret = -EINVAL;
3258+ goto err_priv;
3259+ }
3260+
3261+ phy_pdev = of_find_device_by_node(phy_node);
3262+ of_node_put(phy_node);
3263+
3264+ if (!phy_pdev) {
3265+ dev_err(dev, "unable to obtain phy device\n");
3266+ ret = -EINVAL;
3267+ goto err_priv;
3268+ }
3269+
3270+ phy_resnum = 0;
3271+ phy_irqnum = 0;
3272+ } else {
3273+ /* In ACPI, the XGBE and PHY resources are the grouped
3274+ * together with the PHY resources at the end
3275+ */
3276+ phy_pdev = priv->pdev;
3277+ phy_resnum = amd_xgbe_phy_resource_count(phy_pdev,
3278+ IORESOURCE_MEM) - 3;
3279+ phy_irqnum = amd_xgbe_phy_resource_count(phy_pdev,
3280+ IORESOURCE_IRQ) - 1;
3281+ }
3282+ phy_dev = &phy_pdev->dev;
3283
3284 /* Get the device mmio areas */
3285- priv->rxtx_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3286+ priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
3287+ phy_resnum++);
3288 priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
3289 if (IS_ERR(priv->rxtx_regs)) {
3290 dev_err(dev, "rxtx ioremap failed\n");
3291 ret = PTR_ERR(priv->rxtx_regs);
3292- goto err_priv;
3293+ goto err_put;
3294 }
3295
3296- priv->sir0_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3297+ priv->sir0_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
3298+ phy_resnum++);
3299 priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
3300 if (IS_ERR(priv->sir0_regs)) {
3301 dev_err(dev, "sir0 ioremap failed\n");
3302@@ -1340,7 +1652,8 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
3303 goto err_rxtx;
3304 }
3305
3306- priv->sir1_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
3307+ priv->sir1_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
3308+ phy_resnum++);
3309 priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
3310 if (IS_ERR(priv->sir1_regs)) {
3311 dev_err(dev, "sir1 ioremap failed\n");
3312@@ -1348,40 +1661,153 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
3313 goto err_sir0;
3314 }
3315
3316+ /* Get the auto-negotiation interrupt */
3317+ ret = platform_get_irq(phy_pdev, phy_irqnum);
3318+ if (ret < 0) {
3319+ dev_err(dev, "platform_get_irq failed\n");
3320+ goto err_sir1;
3321+ }
3322+ priv->an_irq = ret;
3323+
3324 /* Get the device speed set property */
3325- speed_set = 0;
3326- property = of_get_property(dev->of_node, XGBE_PHY_SPEEDSET_PROPERTY,
3327- NULL);
3328- if (property)
3329- speed_set = be32_to_cpu(*property);
3330-
3331- switch (speed_set) {
3332- case 0:
3333- priv->speed_set = AMD_XGBE_PHY_SPEEDSET_1000_10000;
3334- break;
3335- case 1:
3336- priv->speed_set = AMD_XGBE_PHY_SPEEDSET_2500_10000;
3337+ ret = device_property_read_u32(phy_dev, XGBE_PHY_SPEEDSET_PROPERTY,
3338+ &priv->speed_set);
3339+ if (ret) {
3340+ dev_err(dev, "invalid %s property\n",
3341+ XGBE_PHY_SPEEDSET_PROPERTY);
3342+ goto err_sir1;
3343+ }
3344+
3345+ switch (priv->speed_set) {
3346+ case AMD_XGBE_PHY_SPEEDSET_1000_10000:
3347+ case AMD_XGBE_PHY_SPEEDSET_2500_10000:
3348 break;
3349 default:
3350- dev_err(dev, "invalid amd,speed-set property\n");
3351+ dev_err(dev, "invalid %s property\n",
3352+ XGBE_PHY_SPEEDSET_PROPERTY);
3353 ret = -EINVAL;
3354 goto err_sir1;
3355 }
3356
3357- priv->link = 1;
3358+ if (device_property_present(phy_dev, XGBE_PHY_BLWC_PROPERTY)) {
3359+ ret = device_property_read_u32_array(phy_dev,
3360+ XGBE_PHY_BLWC_PROPERTY,
3361+ priv->serdes_blwc,
3362+ XGBE_PHY_SPEEDS);
3363+ if (ret) {
3364+ dev_err(dev, "invalid %s property\n",
3365+ XGBE_PHY_BLWC_PROPERTY);
3366+ goto err_sir1;
3367+ }
3368+ } else {
3369+ memcpy(priv->serdes_blwc, amd_xgbe_phy_serdes_blwc,
3370+ sizeof(priv->serdes_blwc));
3371+ }
3372
3373- mutex_init(&priv->an_mutex);
3374- INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
3375- priv->an_workqueue = create_singlethread_workqueue(wq_name);
3376- if (!priv->an_workqueue) {
3377- ret = -ENOMEM;
3378- goto err_sir1;
3379+ if (device_property_present(phy_dev, XGBE_PHY_CDR_RATE_PROPERTY)) {
3380+ ret = device_property_read_u32_array(phy_dev,
3381+ XGBE_PHY_CDR_RATE_PROPERTY,
3382+ priv->serdes_cdr_rate,
3383+ XGBE_PHY_SPEEDS);
3384+ if (ret) {
3385+ dev_err(dev, "invalid %s property\n",
3386+ XGBE_PHY_CDR_RATE_PROPERTY);
3387+ goto err_sir1;
3388+ }
3389+ } else {
3390+ memcpy(priv->serdes_cdr_rate, amd_xgbe_phy_serdes_cdr_rate,
3391+ sizeof(priv->serdes_cdr_rate));
3392+ }
3393+
3394+ if (device_property_present(phy_dev, XGBE_PHY_PQ_SKEW_PROPERTY)) {
3395+ ret = device_property_read_u32_array(phy_dev,
3396+ XGBE_PHY_PQ_SKEW_PROPERTY,
3397+ priv->serdes_pq_skew,
3398+ XGBE_PHY_SPEEDS);
3399+ if (ret) {
3400+ dev_err(dev, "invalid %s property\n",
3401+ XGBE_PHY_PQ_SKEW_PROPERTY);
3402+ goto err_sir1;
3403+ }
3404+ } else {
3405+ memcpy(priv->serdes_pq_skew, amd_xgbe_phy_serdes_pq_skew,
3406+ sizeof(priv->serdes_pq_skew));
3407+ }
3408+
3409+ if (device_property_present(phy_dev, XGBE_PHY_TX_AMP_PROPERTY)) {
3410+ ret = device_property_read_u32_array(phy_dev,
3411+ XGBE_PHY_TX_AMP_PROPERTY,
3412+ priv->serdes_tx_amp,
3413+ XGBE_PHY_SPEEDS);
3414+ if (ret) {
3415+ dev_err(dev, "invalid %s property\n",
3416+ XGBE_PHY_TX_AMP_PROPERTY);
3417+ goto err_sir1;
3418+ }
3419+ } else {
3420+ memcpy(priv->serdes_tx_amp, amd_xgbe_phy_serdes_tx_amp,
3421+ sizeof(priv->serdes_tx_amp));
3422+ }
3423+
3424+ if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
3425+ ret = device_property_read_u32_array(phy_dev,
3426+ XGBE_PHY_DFE_CFG_PROPERTY,
3427+ priv->serdes_dfe_tap_cfg,
3428+ XGBE_PHY_SPEEDS);
3429+ if (ret) {
3430+ dev_err(dev, "invalid %s property\n",
3431+ XGBE_PHY_DFE_CFG_PROPERTY);
3432+ goto err_sir1;
3433+ }
3434+ } else {
3435+ memcpy(priv->serdes_dfe_tap_cfg,
3436+ amd_xgbe_phy_serdes_dfe_tap_cfg,
3437+ sizeof(priv->serdes_dfe_tap_cfg));
3438 }
3439
3440+ if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
3441+ ret = device_property_read_u32_array(phy_dev,
3442+ XGBE_PHY_DFE_ENA_PROPERTY,
3443+ priv->serdes_dfe_tap_ena,
3444+ XGBE_PHY_SPEEDS);
3445+ if (ret) {
3446+ dev_err(dev, "invalid %s property\n",
3447+ XGBE_PHY_DFE_ENA_PROPERTY);
3448+ goto err_sir1;
3449+ }
3450+ } else {
3451+ memcpy(priv->serdes_dfe_tap_ena,
3452+ amd_xgbe_phy_serdes_dfe_tap_ena,
3453+ sizeof(priv->serdes_dfe_tap_ena));
3454+ }
3455+
3456+ /* Initialize supported features */
3457+ phydev->supported = SUPPORTED_Autoneg;
3458+ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
3459+ phydev->supported |= SUPPORTED_Backplane;
3460+ phydev->supported |= SUPPORTED_10000baseKR_Full;
3461+ switch (priv->speed_set) {
3462+ case AMD_XGBE_PHY_SPEEDSET_1000_10000:
3463+ phydev->supported |= SUPPORTED_1000baseKX_Full;
3464+ break;
3465+ case AMD_XGBE_PHY_SPEEDSET_2500_10000:
3466+ phydev->supported |= SUPPORTED_2500baseX_Full;
3467+ break;
3468+ }
3469+
3470+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
3471+ if (ret < 0)
3472+ return ret;
3473+ priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
3474+ if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
3475+ phydev->supported |= SUPPORTED_10000baseR_FEC;
3476+
3477+ phydev->advertising = phydev->supported;
3478+
3479 phydev->priv = priv;
3480
3481- kfree(wq_name);
3482- of_dev_put(pdev);
3483+ if (!priv->adev || acpi_disabled)
3484+ platform_device_put(phy_pdev);
3485
3486 return 0;
3487
3488@@ -1400,15 +1826,13 @@ err_rxtx:
3489 devm_release_mem_region(dev, priv->rxtx_res->start,
3490 resource_size(priv->rxtx_res));
3491
3492+err_put:
3493+ if (!priv->adev || acpi_disabled)
3494+ platform_device_put(phy_pdev);
3495+
3496 err_priv:
3497 devm_kfree(dev, priv);
3498
3499-err_name:
3500- kfree(wq_name);
3501-
3502-err_pdev:
3503- of_dev_put(pdev);
3504-
3505 return ret;
3506 }
3507
3508@@ -1417,13 +1841,12 @@ static void amd_xgbe_phy_remove(struct phy_device *phydev)
3509 struct amd_xgbe_phy_priv *priv = phydev->priv;
3510 struct device *dev = priv->dev;
3511
3512- /* Stop any in process auto-negotiation */
3513- mutex_lock(&priv->an_mutex);
3514- priv->an_state = AMD_XGBE_AN_EXIT;
3515- mutex_unlock(&priv->an_mutex);
3516+ if (priv->an_irq_allocated) {
3517+ devm_free_irq(dev, priv->an_irq, priv);
3518
3519- flush_workqueue(priv->an_workqueue);
3520- destroy_workqueue(priv->an_workqueue);
3521+ flush_workqueue(priv->an_workqueue);
3522+ destroy_workqueue(priv->an_workqueue);
3523+ }
3524
3525 /* Release resources */
3526 devm_iounmap(dev, priv->sir1_regs);
3527@@ -1452,6 +1875,7 @@ static struct phy_driver amd_xgbe_phy_driver[] = {
3528 .phy_id_mask = XGBE_PHY_MASK,
3529 .name = "AMD XGBE PHY",
3530 .features = 0,
3531+ .flags = PHY_IS_INTERNAL,
3532 .probe = amd_xgbe_phy_probe,
3533 .remove = amd_xgbe_phy_remove,
3534 .soft_reset = amd_xgbe_phy_soft_reset,
3535diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
3536index abcafaa..6bdf476 100644
3537--- a/include/linux/clocksource.h
3538+++ b/include/linux/clocksource.h
3539@@ -87,6 +87,15 @@ static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
3540 }
3541
3542 /**
3543+ * timecounter_adjtime - Shifts the time of the clock.
3544+ * @delta: Desired change in nanoseconds.
3545+ */
3546+static inline void timecounter_adjtime(struct timecounter *tc, s64 delta)
3547+{
3548+ tc->nsec += delta;
3549+}
3550+
3551+/**
3552 * timecounter_init - initialize a time counter
3553 * @tc: Pointer to time counter which is to be initialized/reset
3554 * @cc: A cycle counter, ready to be used.
3555--
35561.9.1
3557