summaryrefslogtreecommitdiffstats
path: root/recipes-kernel/linux/linux-ampere/0001-Upgrade-i40e-drivers-to-2.11.29.patch
blob: 88c41827e7e85cf840660473daa9f3e372693f23 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994
4995
4996
4997
4998
4999
5000
5001
5002
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013
5014
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363
5364
5365
5366
5367
5368
5369
5370
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426
5427
5428
5429
5430
5431
5432
5433
5434
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558
5559
5560
5561
5562
5563
5564
5565
5566
5567
5568
5569
5570
5571
5572
5573
5574
5575
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626
5627
5628
5629
5630
5631
5632
5633
5634
5635
5636
5637
5638
5639
5640
5641
5642
5643
5644
5645
5646
5647
5648
5649
5650
5651
5652
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687
5688
5689
5690
5691
5692
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706
5707
5708
5709
5710
5711
5712
5713
5714
5715
5716
5717
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753
5754
5755
5756
5757
5758
5759
5760
5761
5762
5763
5764
5765
5766
5767
5768
5769
5770
5771
5772
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796
5797
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807
5808
5809
5810
5811
5812
5813
5814
5815
5816
5817
5818
5819
5820
5821
5822
5823
5824
5825
5826
5827
5828
5829
5830
5831
5832
5833
5834
5835
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874
5875
5876
5877
5878
5879
5880
5881
5882
5883
5884
5885
5886
5887
5888
5889
5890
5891
5892
5893
5894
5895
5896
5897
5898
5899
5900
5901
5902
5903
5904
5905
5906
5907
5908
5909
5910
5911
5912
5913
5914
5915
5916
5917
5918
5919
5920
5921
5922
5923
5924
5925
5926
5927
5928
5929
5930
5931
5932
5933
5934
5935
5936
5937
5938
5939
5940
5941
5942
5943
5944
5945
5946
5947
5948
5949
5950
5951
5952
5953
5954
5955
5956
5957
5958
5959
5960
5961
5962
5963
5964
5965
5966
5967
5968
5969
5970
5971
5972
5973
5974
5975
5976
5977
5978
5979
5980
5981
5982
5983
5984
5985
5986
5987
5988
5989
5990
5991
5992
5993
5994
5995
5996
5997
5998
5999
6000
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012
6013
6014
6015
6016
6017
6018
6019
6020
6021
6022
6023
6024
6025
6026
6027
6028
6029
6030
6031
6032
6033
6034
6035
6036
6037
6038
6039
6040
6041
6042
6043
6044
6045
6046
6047
6048
6049
6050
6051
6052
6053
6054
6055
6056
6057
6058
6059
6060
6061
6062
6063
6064
6065
6066
6067
6068
6069
6070
6071
6072
6073
6074
6075
6076
6077
6078
6079
6080
6081
6082
6083
6084
6085
6086
6087
6088
6089
6090
6091
6092
6093
6094
6095
6096
6097
6098
6099
6100
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111
6112
6113
6114
6115
6116
6117
6118
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128
6129
6130
6131
6132
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145
6146
6147
6148
6149
6150
6151
6152
6153
6154
6155
6156
6157
6158
6159
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170
6171
6172
6173
6174
6175
6176
6177
6178
6179
6180
6181
6182
6183
6184
6185
6186
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203
6204
6205
6206
6207
6208
6209
6210
6211
6212
6213
6214
6215
6216
6217
6218
6219
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230
6231
6232
6233
6234
6235
6236
6237
6238
6239
6240
6241
6242
6243
6244
6245
6246
6247
6248
6249
6250
6251
6252
6253
6254
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265
6266
6267
6268
6269
6270
6271
6272
6273
6274
6275
6276
6277
6278
6279
6280
6281
6282
6283
6284
6285
6286
6287
6288
6289
6290
6291
6292
6293
6294
6295
6296
6297
6298
6299
6300
6301
6302
6303
6304
6305
6306
6307
6308
6309
6310
6311
6312
6313
6314
6315
6316
6317
6318
6319
6320
6321
6322
6323
6324
6325
6326
6327
6328
6329
6330
6331
6332
6333
6334
6335
6336
6337
6338
6339
6340
6341
6342
6343
6344
6345
6346
6347
6348
6349
6350
6351
6352
6353
6354
6355
6356
6357
6358
6359
6360
6361
6362
6363
6364
6365
6366
6367
6368
6369
6370
6371
6372
6373
6374
6375
6376
6377
6378
6379
6380
6381
6382
6383
6384
6385
6386
6387
6388
6389
6390
6391
6392
6393
6394
6395
6396
6397
6398
6399
6400
6401
6402
6403
6404
6405
6406
6407
6408
6409
6410
6411
6412
6413
6414
6415
6416
6417
6418
6419
6420
6421
6422
6423
6424
6425
6426
6427
6428
6429
6430
6431
6432
6433
6434
6435
6436
6437
6438
6439
6440
6441
6442
6443
6444
6445
6446
6447
6448
6449
6450
6451
6452
6453
6454
6455
6456
6457
6458
6459
6460
6461
6462
6463
6464
6465
6466
6467
6468
6469
6470
6471
6472
6473
6474
6475
6476
6477
6478
6479
6480
6481
6482
6483
6484
6485
6486
6487
6488
6489
6490
6491
6492
6493
6494
6495
6496
6497
6498
6499
6500
6501
6502
6503
6504
6505
6506
6507
6508
6509
6510
6511
6512
6513
6514
6515
6516
6517
6518
6519
6520
6521
6522
6523
6524
6525
6526
6527
6528
6529
6530
6531
6532
6533
6534
6535
6536
6537
6538
6539
6540
6541
6542
6543
6544
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554
6555
6556
6557
6558
6559
6560
6561
6562
6563
6564
6565
6566
6567
6568
6569
6570
6571
6572
6573
6574
6575
6576
6577
6578
6579
6580
6581
6582
6583
6584
6585
6586
6587
6588
6589
6590
6591
6592
6593
6594
6595
6596
6597
6598
6599
6600
6601
6602
6603
6604
6605
6606
6607
6608
6609
6610
6611
6612
6613
6614
6615
6616
6617
6618
6619
6620
6621
6622
6623
6624
6625
6626
6627
6628
6629
6630
6631
6632
6633
6634
6635
6636
6637
6638
6639
6640
6641
6642
6643
6644
6645
6646
6647
6648
6649
6650
6651
6652
6653
6654
6655
6656
6657
6658
6659
6660
6661
6662
6663
6664
6665
6666
6667
6668
6669
6670
6671
6672
6673
6674
6675
6676
6677
6678
6679
6680
6681
6682
6683
6684
6685
6686
6687
6688
6689
6690
6691
6692
6693
6694
6695
6696
6697
6698
6699
6700
6701
6702
6703
6704
6705
6706
6707
6708
6709
6710
6711
6712
6713
6714
6715
6716
6717
6718
6719
6720
6721
6722
6723
6724
6725
6726
6727
6728
6729
6730
6731
6732
6733
6734
6735
6736
6737
6738
6739
6740
6741
6742
6743
6744
6745
6746
6747
6748
6749
6750
6751
6752
6753
6754
6755
6756
6757
6758
6759
6760
6761
6762
6763
6764
6765
6766
6767
6768
6769
6770
6771
6772
6773
6774
6775
6776
6777
6778
6779
6780
6781
6782
6783
6784
6785
6786
6787
6788
6789
6790
6791
6792
6793
6794
6795
6796
6797
6798
6799
6800
6801
6802
6803
6804
6805
6806
6807
6808
6809
6810
6811
6812
6813
6814
6815
6816
6817
6818
6819
6820
6821
6822
6823
6824
6825
6826
6827
6828
6829
6830
6831
6832
6833
6834
6835
6836
6837
6838
6839
6840
6841
6842
6843
6844
6845
6846
6847
6848
6849
6850
6851
6852
6853
6854
6855
6856
6857
6858
6859
6860
6861
6862
6863
6864
6865
6866
6867
6868
6869
6870
6871
6872
6873
6874
6875
6876
6877
6878
6879
6880
6881
6882
6883
6884
6885
6886
6887
6888
6889
6890
6891
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901
6902
6903
6904
6905
6906
6907
6908
6909
6910
6911
6912
6913
6914
6915
6916
6917
6918
6919
6920
6921
6922
6923
6924
6925
6926
6927
6928
6929
6930
6931
6932
6933
6934
6935
6936
6937
6938
6939
6940
6941
6942
6943
6944
6945
6946
6947
6948
6949
6950
6951
6952
6953
6954
6955
6956
6957
6958
6959
6960
6961
6962
6963
6964
6965
6966
6967
6968
6969
6970
6971
6972
6973
6974
6975
6976
6977
6978
6979
6980
6981
6982
6983
6984
6985
6986
6987
6988
6989
6990
6991
6992
6993
6994
6995
6996
6997
6998
6999
7000
7001
7002
7003
7004
7005
7006
7007
7008
7009
7010
7011
7012
7013
7014
7015
7016
7017
7018
7019
7020
7021
7022
7023
7024
7025
7026
7027
7028
7029
7030
7031
7032
7033
7034
7035
7036
7037
7038
7039
7040
7041
7042
7043
7044
7045
7046
7047
7048
7049
7050
7051
7052
7053
7054
7055
7056
7057
7058
7059
7060
7061
7062
7063
7064
7065
7066
7067
7068
7069
7070
7071
7072
7073
7074
7075
7076
7077
7078
7079
7080
7081
7082
7083
7084
7085
7086
7087
7088
7089
7090
7091
7092
7093
7094
7095
7096
7097
7098
7099
7100
7101
7102
7103
7104
7105
7106
7107
7108
7109
7110
7111
7112
7113
7114
7115
7116
7117
7118
7119
7120
7121
7122
7123
7124
7125
7126
7127
7128
7129
7130
7131
7132
7133
7134
7135
7136
7137
7138
7139
7140
7141
7142
7143
7144
7145
7146
7147
7148
7149
7150
7151
7152
7153
7154
7155
7156
7157
7158
7159
7160
7161
7162
7163
7164
7165
7166
7167
7168
7169
7170
7171
7172
7173
7174
7175
7176
7177
7178
7179
7180
7181
7182
7183
7184
7185
7186
7187
7188
7189
7190
7191
7192
7193
7194
7195
7196
7197
7198
7199
7200
7201
7202
7203
7204
7205
7206
7207
7208
7209
7210
7211
7212
7213
7214
7215
7216
7217
7218
7219
7220
7221
7222
7223
7224
7225
7226
7227
7228
7229
7230
7231
7232
7233
7234
7235
7236
7237
7238
7239
7240
7241
7242
7243
7244
7245
7246
7247
7248
7249
7250
7251
7252
7253
7254
7255
7256
7257
7258
7259
7260
7261
7262
7263
7264
7265
7266
7267
7268
7269
7270
7271
7272
7273
7274
7275
7276
7277
7278
7279
7280
7281
7282
7283
7284
7285
7286
7287
7288
7289
7290
7291
7292
7293
7294
7295
7296
7297
7298
7299
7300
7301
7302
7303
7304
7305
7306
7307
7308
7309
7310
7311
7312
7313
7314
7315
7316
7317
7318
7319
7320
7321
7322
7323
7324
7325
7326
7327
7328
7329
7330
7331
7332
7333
7334
7335
7336
7337
7338
7339
7340
7341
7342
7343
7344
7345
7346
7347
7348
7349
7350
7351
7352
7353
7354
7355
7356
7357
7358
7359
7360
7361
7362
7363
7364
7365
7366
7367
7368
7369
7370
7371
7372
7373
7374
7375
7376
7377
7378
7379
7380
7381
7382
7383
7384
7385
7386
7387
7388
7389
7390
7391
7392
7393
7394
7395
7396
7397
7398
7399
7400
7401
7402
7403
7404
7405
7406
7407
7408
7409
7410
7411
7412
7413
7414
7415
7416
7417
7418
7419
7420
7421
7422
7423
7424
7425
7426
7427
7428
7429
7430
7431
7432
7433
7434
7435
7436
7437
7438
7439
7440
7441
7442
7443
7444
7445
7446
7447
7448
7449
7450
7451
7452
7453
7454
7455
7456
7457
7458
7459
7460
7461
7462
7463
7464
7465
7466
7467
7468
7469
7470
7471
7472
7473
7474
7475
7476
7477
7478
7479
7480
7481
7482
7483
7484
7485
7486
7487
7488
7489
7490
7491
7492
7493
7494
7495
7496
7497
7498
7499
7500
7501
7502
7503
7504
7505
7506
7507
7508
7509
7510
7511
7512
7513
7514
7515
7516
7517
7518
7519
7520
7521
7522
7523
7524
7525
7526
7527
7528
7529
7530
7531
7532
7533
7534
7535
7536
7537
7538
7539
7540
7541
7542
7543
7544
7545
7546
7547
7548
7549
7550
7551
7552
7553
7554
7555
7556
7557
7558
7559
7560
7561
7562
7563
7564
7565
7566
7567
7568
7569
7570
7571
7572
7573
7574
7575
7576
7577
7578
7579
7580
7581
7582
7583
7584
7585
7586
7587
7588
7589
7590
7591
7592
7593
7594
7595
7596
7597
7598
7599
7600
7601
7602
7603
7604
7605
7606
7607
7608
7609
7610
7611
7612
7613
7614
7615
7616
7617
7618
7619
7620
7621
7622
7623
7624
7625
7626
7627
7628
7629
7630
7631
7632
7633
7634
7635
7636
7637
7638
7639
7640
7641
7642
7643
7644
7645
7646
7647
7648
7649
7650
7651
7652
7653
7654
7655
7656
7657
7658
7659
7660
7661
7662
7663
7664
7665
7666
7667
7668
7669
7670
7671
7672
7673
7674
7675
7676
7677
7678
7679
7680
7681
7682
7683
7684
7685
7686
7687
7688
7689
7690
7691
7692
7693
7694
7695
7696
7697
7698
7699
7700
7701
7702
7703
7704
7705
7706
7707
7708
7709
7710
7711
7712
7713
7714
7715
7716
7717
7718
7719
7720
7721
7722
7723
7724
7725
7726
7727
7728
7729
7730
7731
7732
7733
7734
7735
7736
7737
7738
7739
7740
7741
7742
7743
7744
7745
7746
7747
7748
7749
7750
7751
7752
7753
7754
7755
7756
7757
7758
7759
7760
7761
7762
7763
7764
7765
7766
7767
7768
7769
7770
7771
7772
7773
7774
7775
7776
7777
7778
7779
7780
7781
7782
7783
7784
7785
7786
7787
7788
7789
7790
7791
7792
7793
7794
7795
7796
7797
7798
7799
7800
7801
7802
7803
7804
7805
7806
7807
7808
7809
7810
7811
7812
7813
7814
7815
7816
7817
7818
7819
7820
7821
7822
7823
7824
7825
7826
7827
7828
7829
7830
7831
7832
7833
7834
7835
7836
7837
7838
7839
7840
7841
7842
7843
7844
7845
7846
7847
7848
7849
7850
7851
7852
7853
7854
7855
7856
7857
7858
7859
7860
7861
7862
7863
7864
7865
7866
7867
7868
7869
7870
7871
7872
7873
7874
7875
7876
7877
7878
7879
7880
7881
7882
7883
7884
7885
7886
7887
7888
7889
7890
7891
7892
7893
7894
7895
7896
7897
7898
7899
7900
7901
7902
7903
7904
7905
7906
7907
7908
7909
7910
7911
7912
7913
7914
7915
7916
7917
7918
7919
7920
7921
7922
7923
7924
7925
7926
7927
7928
7929
7930
7931
7932
7933
7934
7935
7936
7937
7938
7939
7940
7941
7942
7943
7944
7945
7946
7947
7948
7949
7950
7951
7952
7953
7954
7955
7956
7957
7958
7959
7960
7961
7962
7963
7964
7965
7966
7967
7968
7969
7970
7971
7972
7973
7974
7975
7976
7977
7978
7979
7980
7981
7982
7983
7984
7985
7986
7987
7988
7989
7990
7991
7992
7993
7994
7995
7996
7997
7998
7999
8000
8001
8002
8003
8004
8005
8006
8007
8008
8009
8010
8011
8012
8013
8014
8015
8016
8017
8018
8019
8020
8021
8022
8023
8024
8025
8026
8027
8028
8029
8030
8031
8032
8033
8034
8035
8036
8037
8038
8039
8040
8041
8042
8043
8044
8045
8046
8047
8048
8049
8050
8051
8052
8053
8054
8055
8056
8057
8058
8059
8060
8061
8062
8063
8064
8065
8066
8067
8068
8069
8070
8071
8072
8073
8074
8075
8076
8077
8078
8079
8080
8081
8082
8083
8084
8085
8086
8087
8088
8089
8090
8091
8092
8093
8094
8095
8096
8097
8098
8099
8100
8101
8102
8103
8104
8105
8106
8107
8108
8109
8110
8111
8112
8113
8114
8115
8116
8117
8118
8119
8120
8121
8122
8123
8124
8125
8126
8127
8128
8129
8130
8131
8132
8133
8134
8135
8136
8137
8138
8139
8140
8141
8142
8143
8144
8145
8146
8147
8148
8149
8150
8151
8152
8153
8154
8155
8156
8157
8158
8159
8160
8161
8162
8163
8164
8165
8166
8167
8168
8169
8170
8171
8172
8173
8174
8175
8176
8177
8178
8179
8180
8181
8182
8183
8184
8185
8186
8187
8188
8189
8190
8191
8192
8193
8194
8195
8196
8197
8198
8199
8200
8201
8202
8203
8204
8205
8206
8207
8208
8209
8210
8211
8212
8213
8214
8215
8216
8217
8218
8219
8220
8221
8222
8223
8224
8225
8226
8227
8228
8229
8230
8231
8232
8233
8234
8235
8236
8237
8238
8239
8240
8241
8242
8243
8244
8245
8246
8247
8248
8249
8250
8251
8252
8253
8254
8255
8256
8257
8258
8259
8260
8261
8262
8263
8264
8265
8266
8267
8268
8269
8270
8271
8272
8273
8274
8275
8276
8277
8278
8279
8280
8281
8282
8283
8284
8285
8286
8287
8288
8289
8290
8291
8292
8293
8294
8295
8296
8297
8298
8299
8300
8301
8302
8303
8304
8305
8306
8307
8308
8309
8310
8311
8312
8313
8314
8315
8316
8317
8318
8319
8320
8321
8322
8323
8324
8325
8326
8327
8328
8329
8330
8331
8332
8333
8334
8335
8336
8337
8338
8339
8340
8341
8342
8343
8344
8345
8346
8347
8348
8349
8350
8351
8352
8353
8354
8355
8356
8357
8358
8359
8360
8361
8362
8363
8364
8365
8366
8367
8368
8369
8370
8371
8372
8373
8374
8375
8376
8377
8378
8379
8380
8381
8382
8383
8384
8385
8386
8387
8388
8389
8390
8391
8392
8393
8394
8395
8396
8397
8398
8399
8400
8401
8402
8403
8404
8405
8406
8407
8408
8409
8410
8411
8412
8413
8414
8415
8416
8417
8418
8419
8420
8421
8422
8423
8424
8425
8426
8427
8428
8429
8430
8431
8432
8433
8434
8435
8436
8437
8438
8439
8440
8441
8442
8443
8444
8445
8446
8447
8448
8449
8450
8451
8452
8453
8454
8455
8456
8457
8458
8459
8460
8461
8462
8463
8464
8465
8466
8467
8468
8469
8470
8471
8472
8473
8474
8475
8476
8477
8478
8479
8480
8481
8482
8483
8484
8485
8486
8487
8488
8489
8490
8491
8492
8493
8494
8495
8496
8497
8498
8499
8500
8501
8502
8503
8504
8505
8506
8507
8508
8509
8510
8511
8512
8513
8514
8515
8516
8517
8518
8519
8520
8521
8522
8523
8524
8525
8526
8527
8528
8529
8530
8531
8532
8533
8534
8535
8536
8537
8538
8539
8540
8541
8542
8543
8544
8545
8546
8547
8548
8549
8550
8551
8552
8553
8554
8555
8556
8557
8558
8559
8560
8561
8562
8563
8564
8565
8566
8567
8568
8569
8570
8571
8572
8573
8574
8575
8576
8577
8578
8579
8580
8581
8582
8583
8584
8585
8586
8587
8588
8589
8590
8591
8592
8593
8594
8595
8596
8597
8598
8599
8600
8601
8602
8603
8604
8605
8606
8607
8608
8609
8610
8611
8612
8613
8614
8615
8616
8617
8618
8619
8620
8621
8622
8623
8624
8625
8626
8627
8628
8629
8630
8631
8632
8633
8634
8635
8636
8637
8638
8639
8640
8641
8642
8643
8644
8645
8646
8647
8648
8649
8650
8651
8652
8653
8654
8655
8656
8657
8658
8659
8660
8661
8662
8663
8664
8665
8666
8667
8668
8669
8670
8671
8672
8673
8674
8675
8676
8677
8678
8679
8680
8681
8682
8683
8684
8685
8686
8687
8688
8689
8690
8691
8692
8693
8694
8695
8696
8697
8698
8699
8700
8701
8702
8703
8704
8705
8706
8707
8708
8709
8710
8711
8712
8713
8714
8715
8716
8717
8718
8719
8720
8721
8722
8723
8724
8725
8726
8727
8728
8729
8730
8731
8732
8733
8734
8735
8736
8737
8738
8739
8740
8741
8742
8743
8744
8745
8746
8747
8748
8749
8750
8751
8752
8753
8754
8755
8756
8757
8758
8759
8760
8761
8762
8763
8764
8765
8766
8767
8768
8769
8770
8771
8772
8773
8774
8775
8776
8777
8778
8779
8780
8781
8782
8783
8784
8785
8786
8787
8788
8789
8790
8791
8792
8793
8794
8795
8796
8797
8798
8799
8800
8801
8802
8803
8804
8805
8806
8807
8808
8809
8810
8811
8812
8813
8814
8815
8816
8817
8818
8819
8820
8821
8822
8823
8824
8825
8826
8827
8828
8829
8830
8831
8832
8833
8834
8835
8836
8837
8838
8839
8840
8841
8842
8843
8844
8845
8846
8847
8848
8849
8850
8851
8852
8853
8854
8855
8856
8857
8858
8859
8860
8861
8862
8863
8864
8865
8866
8867
8868
8869
8870
8871
8872
8873
8874
8875
8876
8877
8878
8879
8880
8881
8882
8883
8884
8885
8886
8887
8888
8889
8890
8891
8892
8893
8894
8895
8896
8897
8898
8899
8900
8901
8902
8903
8904
8905
8906
8907
8908
8909
8910
8911
8912
8913
8914
8915
8916
8917
8918
8919
8920
8921
8922
8923
8924
8925
8926
8927
8928
8929
8930
8931
8932
8933
8934
8935
8936
8937
8938
8939
8940
8941
8942
8943
8944
8945
8946
8947
8948
8949
8950
8951
8952
8953
8954
8955
8956
8957
8958
8959
8960
8961
8962
8963
8964
8965
8966
8967
8968
8969
8970
8971
8972
8973
8974
8975
8976
8977
8978
8979
8980
8981
8982
8983
8984
8985
8986
8987
8988
8989
8990
8991
8992
8993
8994
8995
8996
8997
8998
8999
9000
9001
9002
9003
9004
9005
9006
9007
9008
9009
9010
9011
9012
9013
9014
9015
9016
9017
9018
9019
9020
9021
9022
9023
9024
9025
9026
9027
9028
9029
9030
9031
9032
9033
9034
9035
9036
9037
9038
9039
9040
9041
9042
9043
9044
9045
9046
9047
9048
9049
9050
9051
9052
9053
9054
9055
9056
9057
9058
9059
9060
9061
9062
9063
9064
9065
9066
9067
9068
9069
9070
9071
9072
9073
9074
9075
9076
9077
9078
9079
9080
9081
9082
9083
9084
9085
9086
9087
9088
9089
9090
9091
9092
9093
9094
9095
9096
9097
9098
9099
9100
9101
9102
9103
9104
9105
9106
9107
9108
9109
9110
9111
9112
9113
9114
9115
9116
9117
9118
9119
9120
9121
9122
9123
9124
9125
9126
9127
9128
9129
9130
9131
9132
9133
9134
9135
9136
9137
9138
9139
9140
9141
9142
9143
9144
9145
9146
9147
9148
9149
9150
9151
9152
9153
9154
9155
9156
9157
9158
9159
9160
9161
9162
9163
9164
9165
9166
9167
9168
9169
9170
9171
9172
9173
9174
9175
9176
9177
9178
9179
9180
9181
9182
9183
9184
9185
9186
9187
9188
9189
9190
9191
9192
9193
9194
9195
9196
9197
9198
9199
9200
9201
9202
9203
9204
9205
9206
9207
9208
9209
9210
9211
9212
9213
9214
9215
9216
9217
9218
9219
9220
9221
9222
9223
9224
9225
9226
9227
9228
9229
9230
9231
9232
9233
9234
9235
9236
9237
9238
9239
9240
9241
9242
9243
9244
9245
9246
9247
9248
9249
9250
9251
9252
9253
9254
9255
9256
9257
9258
9259
9260
9261
9262
9263
9264
9265
9266
9267
9268
9269
9270
9271
9272
9273
9274
9275
9276
9277
9278
9279
9280
9281
9282
9283
9284
9285
9286
9287
9288
9289
9290
9291
9292
9293
9294
9295
9296
9297
9298
9299
9300
9301
9302
9303
9304
9305
9306
9307
9308
9309
9310
9311
9312
9313
9314
9315
9316
9317
9318
9319
9320
9321
9322
9323
9324
9325
9326
9327
9328
9329
9330
9331
9332
9333
9334
9335
9336
9337
9338
9339
9340
9341
9342
9343
9344
9345
9346
9347
9348
9349
9350
9351
9352
9353
9354
9355
9356
9357
9358
9359
9360
9361
9362
9363
9364
9365
9366
9367
9368
9369
9370
9371
9372
9373
9374
9375
9376
9377
9378
9379
9380
9381
9382
9383
9384
9385
9386
9387
9388
9389
9390
9391
9392
9393
9394
9395
9396
9397
9398
9399
9400
9401
9402
9403
9404
9405
9406
9407
9408
9409
9410
9411
9412
9413
9414
9415
9416
9417
9418
9419
9420
9421
9422
9423
9424
9425
9426
9427
9428
9429
9430
9431
9432
9433
9434
9435
9436
9437
9438
9439
9440
9441
9442
9443
9444
9445
9446
9447
9448
9449
9450
9451
9452
9453
9454
9455
9456
9457
9458
9459
9460
9461
9462
9463
9464
9465
9466
9467
9468
9469
9470
9471
9472
9473
9474
9475
9476
9477
9478
9479
9480
9481
9482
9483
9484
9485
9486
9487
9488
9489
9490
9491
9492
9493
9494
9495
9496
9497
9498
9499
9500
9501
9502
9503
9504
9505
9506
9507
9508
9509
9510
9511
9512
9513
9514
9515
9516
9517
9518
9519
9520
9521
9522
9523
9524
9525
9526
9527
9528
9529
9530
9531
9532
9533
9534
9535
9536
9537
9538
9539
9540
9541
9542
9543
9544
9545
9546
9547
9548
9549
9550
9551
9552
9553
9554
9555
9556
9557
9558
9559
9560
9561
9562
9563
9564
9565
9566
9567
9568
9569
9570
9571
9572
9573
9574
9575
9576
9577
9578
9579
9580
9581
9582
9583
9584
9585
9586
9587
9588
9589
9590
9591
9592
9593
9594
9595
9596
9597
9598
9599
9600
9601
9602
9603
9604
9605
9606
9607
9608
9609
9610
9611
9612
9613
9614
9615
9616
9617
9618
9619
9620
9621
9622
9623
9624
9625
9626
9627
9628
9629
9630
9631
9632
9633
9634
9635
9636
9637
9638
9639
9640
9641
9642
9643
9644
9645
9646
9647
9648
9649
9650
9651
9652
9653
9654
9655
9656
9657
9658
9659
9660
9661
9662
9663
9664
9665
9666
9667
9668
9669
9670
9671
9672
9673
9674
9675
9676
9677
9678
9679
9680
9681
9682
9683
9684
9685
9686
9687
9688
9689
9690
9691
9692
9693
9694
9695
9696
9697
9698
9699
9700
9701
9702
9703
9704
9705
9706
9707
9708
9709
9710
9711
9712
9713
9714
9715
9716
9717
9718
9719
9720
9721
9722
9723
9724
9725
9726
9727
9728
9729
9730
9731
9732
9733
9734
9735
9736
9737
9738
9739
9740
9741
9742
9743
9744
9745
9746
9747
9748
9749
9750
9751
9752
9753
9754
9755
9756
9757
9758
9759
9760
9761
9762
9763
9764
9765
9766
9767
9768
9769
9770
9771
9772
9773
9774
9775
9776
9777
9778
9779
9780
9781
9782
9783
9784
9785
9786
9787
9788
9789
9790
9791
9792
9793
9794
9795
9796
9797
9798
9799
9800
9801
9802
9803
9804
9805
9806
9807
9808
9809
9810
9811
9812
9813
9814
9815
9816
9817
9818
9819
9820
9821
9822
9823
9824
9825
9826
9827
9828
9829
9830
9831
9832
9833
9834
9835
9836
9837
9838
9839
9840
9841
9842
9843
9844
9845
9846
9847
9848
9849
9850
9851
9852
9853
9854
9855
9856
9857
9858
9859
9860
9861
9862
9863
9864
9865
9866
9867
9868
9869
9870
9871
9872
9873
9874
9875
9876
9877
9878
9879
9880
9881
9882
9883
9884
9885
9886
9887
9888
9889
9890
9891
9892
9893
9894
9895
9896
9897
9898
9899
9900
9901
9902
9903
9904
9905
9906
9907
9908
9909
9910
9911
9912
9913
9914
9915
9916
9917
9918
9919
9920
9921
9922
9923
9924
9925
9926
9927
9928
9929
9930
9931
9932
9933
9934
9935
9936
9937
9938
9939
9940
9941
9942
9943
9944
9945
9946
9947
9948
9949
9950
9951
9952
9953
9954
9955
9956
9957
9958
9959
9960
9961
9962
9963
9964
9965
9966
9967
9968
9969
9970
9971
9972
9973
9974
9975
9976
9977
9978
9979
9980
9981
9982
9983
9984
9985
9986
9987
9988
9989
9990
9991
9992
9993
9994
9995
9996
9997
9998
9999
10000
10001
10002
10003
10004
10005
10006
10007
10008
10009
10010
10011
10012
10013
10014
10015
10016
10017
10018
10019
10020
10021
10022
10023
10024
10025
10026
10027
10028
10029
10030
10031
10032
10033
10034
10035
10036
10037
10038
10039
10040
10041
10042
10043
10044
10045
10046
10047
10048
10049
10050
10051
10052
10053
10054
10055
10056
10057
10058
10059
10060
10061
10062
10063
10064
10065
10066
10067
10068
10069
10070
10071
10072
10073
10074
10075
10076
10077
10078
10079
10080
10081
10082
10083
10084
10085
10086
10087
10088
10089
10090
10091
10092
10093
10094
10095
10096
10097
10098
10099
10100
10101
10102
10103
10104
10105
10106
10107
10108
10109
10110
10111
10112
10113
10114
10115
10116
10117
10118
10119
10120
10121
10122
10123
10124
10125
10126
10127
10128
10129
10130
10131
10132
10133
10134
10135
10136
10137
10138
10139
10140
10141
10142
10143
10144
10145
10146
10147
10148
10149
10150
10151
10152
10153
10154
10155
10156
10157
10158
10159
10160
10161
10162
10163
10164
10165
10166
10167
10168
10169
10170
10171
10172
10173
10174
10175
10176
10177
10178
10179
10180
10181
10182
10183
10184
10185
10186
10187
10188
10189
10190
10191
10192
10193
10194
10195
10196
10197
10198
10199
10200
10201
10202
10203
10204
10205
10206
10207
10208
10209
10210
10211
10212
10213
10214
10215
10216
10217
10218
10219
10220
10221
10222
10223
10224
10225
10226
10227
10228
10229
10230
10231
10232
10233
10234
10235
10236
10237
10238
10239
10240
10241
10242
10243
10244
10245
10246
10247
10248
10249
10250
10251
10252
10253
10254
10255
10256
10257
10258
10259
10260
10261
10262
10263
10264
10265
10266
10267
10268
10269
10270
10271
10272
10273
10274
10275
10276
10277
10278
10279
10280
10281
10282
10283
10284
10285
10286
10287
10288
10289
10290
10291
10292
10293
10294
10295
10296
10297
10298
10299
10300
10301
10302
10303
10304
10305
10306
10307
10308
10309
10310
10311
10312
10313
10314
10315
10316
10317
10318
10319
10320
10321
10322
10323
10324
10325
10326
10327
10328
10329
10330
10331
10332
10333
10334
10335
10336
10337
10338
10339
10340
10341
10342
10343
10344
10345
10346
10347
10348
10349
10350
10351
10352
10353
10354
10355
10356
10357
10358
10359
10360
10361
10362
10363
10364
10365
10366
10367
10368
10369
10370
10371
10372
10373
10374
10375
10376
10377
10378
10379
10380
10381
10382
10383
10384
10385
10386
10387
10388
10389
10390
10391
10392
10393
10394
10395
10396
10397
10398
10399
10400
10401
10402
10403
10404
10405
10406
10407
10408
10409
10410
10411
10412
10413
10414
10415
10416
10417
10418
10419
10420
10421
10422
10423
10424
10425
10426
10427
10428
10429
10430
10431
10432
10433
10434
10435
10436
10437
10438
10439
10440
10441
10442
10443
10444
10445
10446
10447
10448
10449
10450
10451
10452
10453
10454
10455
10456
10457
10458
10459
10460
10461
10462
10463
10464
10465
10466
10467
10468
10469
10470
10471
10472
10473
10474
10475
10476
10477
10478
10479
10480
10481
10482
10483
10484
10485
10486
10487
10488
10489
10490
10491
10492
10493
10494
10495
10496
10497
10498
10499
10500
10501
10502
10503
10504
10505
10506
10507
10508
10509
10510
10511
10512
10513
10514
10515
10516
10517
10518
10519
10520
10521
10522
10523
10524
10525
10526
10527
10528
10529
10530
10531
10532
10533
10534
10535
10536
10537
10538
10539
10540
10541
10542
10543
10544
10545
10546
10547
10548
10549
10550
10551
10552
10553
10554
10555
10556
10557
10558
10559
10560
10561
10562
10563
10564
10565
10566
10567
10568
10569
10570
10571
10572
10573
10574
10575
10576
10577
10578
10579
10580
10581
10582
10583
10584
10585
10586
10587
10588
10589
10590
10591
10592
10593
10594
10595
10596
10597
10598
10599
10600
10601
10602
10603
10604
10605
10606
10607
10608
10609
10610
10611
10612
10613
10614
10615
10616
10617
10618
10619
10620
10621
10622
10623
10624
10625
10626
10627
10628
10629
10630
10631
10632
10633
10634
10635
10636
10637
10638
10639
10640
10641
10642
10643
10644
10645
10646
10647
10648
10649
10650
10651
10652
10653
10654
10655
10656
10657
10658
10659
10660
10661
10662
10663
10664
10665
10666
10667
10668
10669
10670
10671
10672
10673
10674
10675
10676
10677
10678
10679
10680
10681
10682
10683
10684
10685
10686
10687
10688
10689
10690
10691
10692
10693
10694
10695
10696
10697
10698
10699
10700
10701
10702
10703
10704
10705
10706
10707
10708
10709
10710
10711
10712
10713
10714
10715
10716
10717
10718
10719
10720
10721
10722
10723
10724
10725
10726
10727
10728
10729
10730
10731
10732
10733
10734
10735
10736
10737
10738
10739
10740
10741
10742
10743
10744
10745
10746
10747
10748
10749
10750
10751
10752
10753
10754
10755
10756
10757
10758
10759
10760
10761
10762
10763
10764
10765
10766
10767
10768
10769
10770
10771
10772
10773
10774
10775
10776
10777
10778
10779
10780
10781
10782
10783
10784
10785
10786
10787
10788
10789
10790
10791
10792
10793
10794
10795
10796
10797
10798
10799
10800
10801
10802
10803
10804
10805
10806
10807
10808
10809
10810
10811
10812
10813
10814
10815
10816
10817
10818
10819
10820
10821
10822
10823
10824
10825
10826
10827
10828
10829
10830
10831
10832
10833
10834
10835
10836
10837
10838
10839
10840
10841
10842
10843
10844
10845
10846
10847
10848
10849
10850
10851
10852
10853
10854
10855
10856
10857
10858
10859
10860
10861
10862
10863
10864
10865
10866
10867
10868
10869
10870
10871
10872
10873
10874
10875
10876
10877
10878
10879
10880
10881
10882
10883
10884
10885
10886
10887
10888
10889
10890
10891
10892
10893
10894
10895
10896
10897
10898
10899
10900
10901
10902
10903
10904
10905
10906
10907
10908
10909
10910
10911
10912
10913
10914
10915
10916
10917
10918
10919
10920
10921
10922
10923
10924
10925
10926
10927
10928
10929
10930
10931
10932
10933
10934
10935
10936
10937
10938
10939
10940
10941
10942
10943
10944
10945
10946
10947
10948
10949
10950
10951
10952
10953
10954
10955
10956
10957
10958
10959
10960
10961
10962
10963
10964
10965
10966
10967
10968
10969
10970
10971
10972
10973
10974
10975
10976
10977
10978
10979
10980
10981
10982
10983
10984
10985
10986
10987
10988
10989
10990
10991
10992
10993
10994
10995
10996
10997
10998
10999
11000
11001
11002
11003
11004
11005
11006
11007
11008
11009
11010
11011
11012
11013
11014
11015
11016
11017
11018
11019
11020
11021
11022
11023
11024
11025
11026
11027
11028
11029
11030
11031
11032
11033
11034
11035
11036
11037
11038
11039
11040
11041
11042
11043
11044
11045
11046
11047
11048
11049
11050
11051
11052
11053
11054
11055
11056
11057
11058
11059
11060
11061
11062
11063
11064
11065
11066
11067
11068
11069
11070
11071
11072
11073
11074
11075
11076
11077
11078
11079
11080
11081
11082
11083
11084
11085
11086
11087
11088
11089
11090
11091
11092
11093
11094
11095
11096
11097
11098
11099
11100
11101
11102
11103
11104
11105
11106
11107
11108
11109
11110
11111
11112
11113
11114
11115
11116
11117
11118
11119
11120
11121
11122
11123
11124
11125
11126
11127
11128
11129
11130
11131
11132
11133
11134
11135
11136
11137
11138
11139
11140
11141
11142
11143
11144
11145
11146
11147
11148
11149
11150
11151
11152
11153
11154
11155
11156
11157
11158
11159
11160
11161
11162
11163
11164
11165
11166
11167
11168
11169
11170
11171
11172
11173
11174
11175
11176
11177
11178
11179
11180
11181
11182
11183
11184
11185
11186
11187
11188
11189
11190
11191
11192
11193
11194
11195
11196
11197
11198
11199
11200
11201
11202
11203
11204
11205
11206
11207
11208
11209
11210
11211
11212
11213
11214
11215
11216
11217
11218
11219
11220
11221
11222
11223
11224
11225
11226
11227
11228
11229
11230
11231
11232
11233
11234
11235
11236
11237
11238
11239
11240
11241
11242
11243
11244
11245
11246
11247
11248
11249
11250
11251
11252
11253
11254
11255
11256
11257
11258
11259
11260
11261
11262
11263
11264
11265
11266
11267
11268
11269
11270
11271
11272
11273
11274
11275
11276
11277
11278
11279
11280
11281
11282
11283
11284
11285
11286
11287
11288
11289
11290
11291
11292
11293
11294
11295
11296
11297
11298
11299
11300
11301
11302
11303
11304
11305
11306
11307
11308
11309
11310
11311
11312
11313
11314
11315
11316
11317
11318
11319
11320
11321
11322
11323
11324
11325
11326
11327
11328
11329
11330
11331
11332
11333
11334
11335
11336
11337
11338
11339
11340
11341
11342
11343
11344
11345
11346
11347
11348
11349
11350
11351
11352
11353
11354
11355
11356
11357
11358
11359
11360
11361
11362
11363
11364
11365
11366
11367
11368
11369
11370
11371
11372
11373
11374
11375
11376
11377
11378
11379
11380
11381
11382
11383
11384
11385
11386
11387
11388
11389
11390
11391
11392
11393
11394
11395
11396
11397
11398
11399
11400
11401
11402
11403
11404
11405
11406
11407
11408
11409
11410
11411
11412
11413
11414
11415
11416
11417
11418
11419
11420
11421
11422
11423
11424
11425
11426
11427
11428
11429
11430
11431
11432
11433
11434
11435
11436
11437
11438
11439
11440
11441
11442
11443
11444
11445
11446
11447
11448
11449
11450
11451
11452
11453
11454
11455
11456
11457
11458
11459
11460
11461
11462
11463
11464
11465
11466
11467
11468
11469
11470
11471
11472
11473
11474
11475
11476
11477
11478
11479
11480
11481
11482
11483
11484
11485
11486
11487
11488
11489
11490
11491
11492
11493
11494
11495
11496
11497
11498
11499
11500
11501
11502
11503
11504
11505
11506
11507
11508
11509
11510
11511
11512
11513
11514
11515
11516
11517
11518
11519
11520
11521
11522
11523
11524
11525
11526
11527
11528
11529
11530
11531
11532
11533
11534
11535
11536
11537
11538
11539
11540
11541
11542
11543
11544
11545
11546
11547
11548
11549
11550
11551
11552
11553
11554
11555
11556
11557
11558
11559
11560
11561
11562
11563
11564
11565
11566
11567
11568
11569
11570
11571
11572
11573
11574
11575
11576
11577
11578
11579
11580
11581
11582
11583
11584
11585
11586
11587
11588
11589
11590
11591
11592
11593
11594
11595
11596
11597
11598
11599
11600
11601
11602
11603
11604
11605
11606
11607
11608
11609
11610
11611
11612
11613
11614
11615
11616
11617
11618
11619
11620
11621
11622
11623
11624
11625
11626
11627
11628
11629
11630
11631
11632
11633
11634
11635
11636
11637
11638
11639
11640
11641
11642
11643
11644
11645
11646
11647
11648
11649
11650
11651
11652
11653
11654
11655
11656
11657
11658
11659
11660
11661
11662
11663
11664
11665
11666
11667
11668
11669
11670
11671
11672
11673
11674
11675
11676
11677
11678
11679
11680
11681
11682
11683
11684
11685
11686
11687
11688
11689
11690
11691
11692
11693
11694
11695
11696
11697
11698
11699
11700
11701
11702
11703
11704
11705
11706
11707
11708
11709
11710
11711
11712
11713
11714
11715
11716
11717
11718
11719
11720
11721
11722
11723
11724
11725
11726
11727
11728
11729
11730
11731
11732
11733
11734
11735
11736
11737
11738
11739
11740
11741
11742
11743
11744
11745
11746
11747
11748
11749
11750
11751
11752
11753
11754
11755
11756
11757
11758
11759
11760
11761
11762
11763
11764
11765
11766
11767
11768
11769
11770
11771
11772
11773
11774
11775
11776
11777
11778
11779
11780
11781
11782
11783
11784
11785
11786
11787
11788
11789
11790
11791
11792
11793
11794
11795
11796
11797
11798
11799
11800
11801
11802
11803
11804
11805
11806
11807
11808
11809
11810
11811
11812
11813
11814
11815
11816
11817
11818
11819
11820
11821
11822
11823
11824
11825
11826
11827
11828
11829
11830
11831
11832
11833
11834
11835
11836
11837
11838
11839
11840
11841
11842
11843
11844
11845
11846
11847
11848
11849
11850
11851
11852
11853
11854
11855
11856
11857
11858
11859
11860
11861
11862
11863
11864
11865
11866
11867
11868
11869
11870
11871
11872
11873
11874
11875
11876
11877
11878
11879
11880
11881
11882
11883
11884
11885
11886
11887
11888
11889
11890
11891
11892
11893
11894
11895
11896
11897
11898
11899
11900
11901
11902
11903
11904
11905
11906
11907
11908
11909
11910
11911
11912
11913
11914
11915
11916
11917
11918
11919
11920
11921
11922
11923
11924
11925
11926
11927
11928
11929
11930
11931
11932
11933
11934
11935
11936
11937
11938
11939
11940
11941
11942
11943
11944
11945
11946
11947
11948
11949
11950
11951
11952
11953
11954
11955
11956
11957
11958
11959
11960
11961
11962
11963
11964
11965
11966
11967
11968
11969
11970
11971
11972
11973
11974
11975
11976
11977
11978
11979
11980
11981
11982
11983
11984
11985
11986
11987
11988
11989
11990
11991
11992
11993
11994
11995
11996
11997
11998
11999
12000
12001
12002
12003
12004
12005
12006
12007
12008
12009
12010
12011
12012
12013
12014
12015
12016
12017
12018
12019
12020
12021
12022
12023
12024
12025
12026
12027
12028
12029
12030
12031
12032
12033
12034
12035
12036
12037
12038
12039
12040
12041
12042
12043
12044
12045
12046
12047
12048
12049
12050
12051
12052
12053
12054
12055
12056
12057
12058
12059
12060
12061
12062
12063
12064
12065
12066
12067
12068
12069
12070
12071
12072
12073
12074
12075
12076
12077
12078
12079
12080
12081
12082
12083
12084
12085
12086
12087
12088
12089
12090
12091
12092
12093
12094
12095
12096
12097
12098
12099
12100
12101
12102
12103
12104
12105
12106
12107
12108
12109
12110
12111
12112
12113
12114
12115
12116
12117
12118
12119
12120
12121
12122
12123
12124
12125
12126
12127
12128
12129
12130
12131
12132
12133
12134
12135
12136
12137
12138
12139
12140
12141
12142
12143
12144
12145
12146
12147
12148
12149
12150
12151
12152
12153
12154
12155
12156
12157
12158
12159
12160
12161
12162
12163
12164
12165
12166
12167
12168
12169
12170
12171
12172
12173
12174
12175
12176
12177
12178
12179
12180
12181
12182
12183
12184
12185
12186
12187
12188
12189
12190
12191
12192
12193
12194
12195
12196
12197
12198
12199
12200
12201
12202
12203
12204
12205
12206
12207
12208
12209
12210
12211
12212
12213
12214
12215
12216
12217
12218
12219
12220
12221
12222
12223
12224
12225
12226
12227
12228
12229
12230
12231
12232
12233
12234
12235
12236
12237
12238
12239
12240
12241
12242
12243
12244
12245
12246
12247
12248
12249
12250
12251
12252
12253
12254
12255
12256
12257
12258
12259
12260
12261
12262
12263
12264
12265
12266
12267
12268
12269
12270
12271
12272
12273
12274
12275
12276
12277
12278
12279
12280
12281
12282
12283
12284
12285
12286
12287
12288
12289
12290
12291
12292
12293
12294
12295
12296
12297
12298
12299
12300
12301
12302
12303
12304
12305
12306
12307
12308
12309
12310
12311
12312
12313
12314
12315
12316
12317
12318
12319
12320
12321
12322
12323
12324
12325
12326
12327
12328
12329
12330
12331
12332
12333
12334
12335
12336
12337
12338
12339
12340
12341
12342
12343
12344
12345
12346
12347
12348
12349
12350
12351
12352
12353
12354
12355
12356
12357
12358
12359
12360
12361
12362
12363
12364
12365
12366
12367
12368
12369
12370
12371
12372
12373
12374
12375
12376
12377
12378
12379
12380
12381
12382
12383
12384
12385
12386
12387
12388
12389
12390
12391
12392
12393
12394
12395
12396
12397
12398
12399
12400
12401
12402
12403
12404
12405
12406
12407
12408
12409
12410
12411
12412
12413
12414
12415
12416
12417
12418
12419
12420
12421
12422
12423
12424
12425
12426
12427
12428
12429
12430
12431
12432
12433
12434
12435
12436
12437
12438
12439
12440
12441
12442
12443
12444
12445
12446
12447
12448
12449
12450
12451
12452
12453
12454
12455
12456
12457
12458
12459
12460
12461
12462
12463
12464
12465
12466
12467
12468
12469
12470
12471
12472
12473
12474
12475
12476
12477
12478
12479
12480
12481
12482
12483
12484
12485
12486
12487
12488
12489
12490
12491
12492
12493
12494
12495
12496
12497
12498
12499
12500
12501
12502
12503
12504
12505
12506
12507
12508
12509
12510
12511
12512
12513
12514
12515
12516
12517
12518
12519
12520
12521
12522
12523
12524
12525
12526
12527
12528
12529
12530
12531
12532
12533
12534
12535
12536
12537
12538
12539
12540
12541
12542
12543
12544
12545
12546
12547
12548
12549
12550
12551
12552
12553
12554
12555
12556
12557
12558
12559
12560
12561
12562
12563
12564
12565
12566
12567
12568
12569
12570
12571
12572
12573
12574
12575
12576
12577
12578
12579
12580
12581
12582
12583
12584
12585
12586
12587
12588
12589
12590
12591
12592
12593
12594
12595
12596
12597
12598
12599
12600
12601
12602
12603
12604
12605
12606
12607
12608
12609
12610
12611
12612
12613
12614
12615
12616
12617
12618
12619
12620
12621
12622
12623
12624
12625
12626
12627
12628
12629
12630
12631
12632
12633
12634
12635
12636
12637
12638
12639
12640
12641
12642
12643
12644
12645
12646
12647
12648
12649
12650
12651
12652
12653
12654
12655
12656
12657
12658
12659
12660
12661
12662
12663
12664
12665
12666
12667
12668
12669
12670
12671
12672
12673
12674
12675
12676
12677
12678
12679
12680
12681
12682
12683
12684
12685
12686
12687
12688
12689
12690
12691
12692
12693
12694
12695
12696
12697
12698
12699
12700
12701
12702
12703
12704
12705
12706
12707
12708
12709
12710
12711
12712
12713
12714
12715
12716
12717
12718
12719
12720
12721
12722
12723
12724
12725
12726
12727
12728
12729
12730
12731
12732
12733
12734
12735
12736
12737
12738
12739
12740
12741
12742
12743
12744
12745
12746
12747
12748
12749
12750
12751
12752
12753
12754
12755
12756
12757
12758
12759
12760
12761
12762
12763
12764
12765
12766
12767
12768
12769
12770
12771
12772
12773
12774
12775
12776
12777
12778
12779
12780
12781
12782
12783
12784
12785
12786
12787
12788
12789
12790
12791
12792
12793
12794
12795
12796
12797
12798
12799
12800
12801
12802
12803
12804
12805
12806
12807
12808
12809
12810
12811
12812
12813
12814
12815
12816
12817
12818
12819
12820
12821
12822
12823
12824
12825
12826
12827
12828
12829
12830
12831
12832
12833
12834
12835
12836
12837
12838
12839
12840
12841
12842
12843
12844
12845
12846
12847
12848
12849
12850
12851
12852
12853
12854
12855
12856
12857
12858
12859
12860
12861
12862
12863
12864
12865
12866
12867
12868
12869
12870
12871
12872
12873
12874
12875
12876
12877
12878
12879
12880
12881
12882
12883
12884
12885
12886
12887
12888
12889
12890
12891
12892
12893
12894
12895
12896
12897
12898
12899
12900
12901
12902
12903
12904
12905
12906
12907
12908
12909
12910
12911
12912
12913
12914
12915
12916
12917
12918
12919
12920
12921
12922
12923
12924
12925
12926
12927
12928
12929
12930
12931
12932
12933
12934
12935
12936
12937
12938
12939
12940
12941
12942
12943
12944
12945
12946
12947
12948
12949
12950
12951
12952
12953
12954
12955
12956
12957
12958
12959
12960
12961
12962
12963
12964
12965
12966
12967
12968
12969
12970
12971
12972
12973
12974
12975
12976
12977
12978
12979
12980
12981
12982
12983
12984
12985
12986
12987
12988
12989
12990
12991
12992
12993
12994
12995
12996
12997
12998
12999
13000
13001
13002
13003
13004
13005
13006
13007
13008
13009
13010
13011
13012
13013
13014
13015
13016
13017
13018
13019
13020
13021
13022
13023
13024
13025
13026
13027
13028
13029
13030
13031
13032
13033
13034
13035
13036
13037
13038
13039
13040
13041
13042
13043
13044
13045
13046
13047
13048
13049
13050
13051
13052
13053
13054
13055
13056
13057
13058
13059
13060
13061
13062
13063
13064
13065
13066
13067
13068
13069
13070
13071
13072
13073
13074
13075
13076
13077
13078
13079
13080
13081
13082
13083
13084
13085
13086
13087
13088
13089
13090
13091
13092
13093
13094
13095
13096
13097
13098
13099
13100
13101
13102
13103
13104
13105
13106
13107
13108
13109
13110
13111
13112
13113
13114
13115
13116
13117
13118
13119
13120
13121
13122
13123
13124
13125
13126
13127
13128
13129
13130
13131
13132
13133
13134
13135
13136
13137
13138
13139
13140
13141
13142
13143
13144
13145
13146
13147
13148
13149
13150
13151
13152
13153
13154
13155
13156
13157
13158
13159
13160
13161
13162
13163
13164
13165
13166
13167
13168
13169
13170
13171
13172
13173
13174
13175
13176
13177
13178
13179
13180
13181
13182
13183
13184
13185
13186
13187
13188
13189
13190
13191
13192
13193
13194
13195
13196
13197
13198
13199
13200
13201
13202
13203
13204
13205
13206
13207
13208
13209
13210
13211
13212
13213
13214
13215
13216
13217
13218
13219
13220
13221
13222
13223
13224
13225
13226
13227
13228
13229
13230
13231
13232
13233
13234
13235
13236
13237
13238
13239
13240
13241
13242
13243
13244
13245
13246
13247
13248
13249
13250
13251
13252
13253
13254
13255
13256
13257
13258
13259
13260
13261
13262
13263
13264
13265
13266
13267
13268
13269
13270
13271
13272
13273
13274
13275
13276
13277
13278
13279
13280
13281
13282
13283
13284
13285
13286
13287
13288
13289
13290
13291
13292
13293
13294
13295
13296
13297
13298
13299
13300
13301
13302
13303
13304
13305
13306
13307
13308
13309
13310
13311
13312
13313
13314
13315
13316
13317
13318
13319
13320
13321
13322
13323
13324
13325
13326
13327
13328
13329
13330
13331
13332
13333
13334
13335
13336
13337
13338
13339
13340
13341
13342
13343
13344
13345
13346
13347
13348
13349
13350
13351
13352
13353
13354
13355
13356
13357
13358
13359
13360
13361
13362
13363
13364
13365
13366
13367
13368
13369
13370
13371
13372
13373
13374
13375
13376
13377
13378
13379
13380
13381
13382
13383
13384
13385
13386
13387
13388
13389
13390
13391
13392
13393
13394
13395
13396
13397
13398
13399
13400
13401
13402
13403
13404
13405
13406
13407
13408
13409
13410
13411
13412
13413
13414
13415
13416
13417
13418
13419
13420
13421
13422
13423
13424
13425
13426
13427
13428
13429
13430
13431
13432
13433
13434
13435
13436
13437
13438
13439
13440
13441
13442
13443
13444
13445
13446
13447
13448
13449
13450
13451
13452
13453
13454
13455
13456
13457
13458
13459
13460
13461
13462
13463
13464
13465
13466
13467
13468
13469
13470
13471
13472
13473
13474
13475
13476
13477
13478
13479
13480
13481
13482
13483
13484
13485
13486
13487
13488
13489
13490
13491
13492
13493
13494
13495
13496
13497
13498
13499
13500
13501
13502
13503
13504
13505
13506
13507
13508
13509
13510
13511
13512
13513
13514
13515
13516
13517
13518
13519
13520
13521
13522
13523
13524
13525
13526
13527
13528
13529
13530
13531
13532
13533
13534
13535
13536
13537
13538
13539
13540
13541
13542
13543
13544
13545
13546
13547
13548
13549
13550
13551
13552
13553
13554
13555
13556
13557
13558
13559
13560
13561
13562
13563
13564
13565
13566
13567
13568
13569
13570
13571
13572
13573
13574
13575
13576
13577
13578
13579
13580
13581
13582
13583
13584
13585
13586
13587
13588
13589
13590
13591
13592
13593
13594
13595
13596
13597
13598
13599
13600
13601
13602
13603
13604
13605
13606
13607
13608
13609
13610
13611
13612
13613
13614
13615
13616
13617
13618
13619
13620
13621
13622
13623
13624
13625
13626
13627
13628
13629
13630
13631
13632
13633
13634
13635
13636
13637
13638
13639
13640
13641
13642
13643
13644
13645
13646
13647
13648
13649
13650
13651
13652
13653
13654
13655
13656
13657
13658
13659
13660
13661
13662
13663
13664
13665
13666
13667
13668
13669
13670
13671
13672
13673
13674
13675
13676
13677
13678
13679
13680
13681
13682
13683
13684
13685
13686
13687
13688
13689
13690
13691
13692
13693
13694
13695
13696
13697
13698
13699
13700
13701
13702
13703
13704
13705
13706
13707
13708
13709
13710
13711
13712
13713
13714
13715
13716
13717
13718
13719
13720
13721
13722
13723
13724
13725
13726
13727
13728
13729
13730
13731
13732
13733
13734
13735
13736
13737
13738
13739
13740
13741
13742
13743
13744
13745
13746
13747
13748
13749
13750
13751
13752
13753
13754
13755
13756
13757
13758
13759
13760
13761
13762
13763
13764
13765
13766
13767
13768
13769
13770
13771
13772
13773
13774
13775
13776
13777
13778
13779
13780
13781
13782
13783
13784
13785
13786
13787
13788
13789
13790
13791
13792
13793
13794
13795
13796
13797
13798
13799
13800
13801
13802
13803
13804
13805
13806
13807
13808
13809
13810
13811
13812
13813
13814
13815
13816
13817
13818
13819
13820
13821
13822
13823
13824
13825
13826
13827
13828
13829
13830
13831
13832
13833
13834
13835
13836
13837
13838
13839
13840
13841
13842
13843
13844
13845
13846
13847
13848
13849
13850
13851
13852
13853
13854
13855
13856
13857
13858
13859
13860
13861
13862
13863
13864
13865
13866
13867
13868
13869
13870
13871
13872
13873
13874
13875
13876
13877
13878
13879
13880
13881
13882
13883
13884
13885
13886
13887
13888
13889
13890
13891
13892
13893
13894
13895
13896
13897
13898
13899
13900
13901
13902
13903
13904
13905
13906
13907
13908
13909
13910
13911
13912
13913
13914
13915
13916
13917
13918
13919
13920
13921
13922
13923
13924
13925
13926
13927
13928
13929
13930
13931
13932
13933
13934
13935
13936
13937
13938
13939
13940
13941
13942
13943
13944
13945
13946
13947
13948
13949
13950
13951
13952
13953
13954
13955
13956
13957
13958
13959
13960
13961
13962
13963
13964
13965
13966
13967
13968
13969
13970
13971
13972
13973
13974
13975
13976
13977
13978
13979
13980
13981
13982
13983
13984
13985
13986
13987
13988
13989
13990
13991
13992
13993
13994
13995
13996
13997
13998
13999
14000
14001
14002
14003
14004
14005
14006
14007
14008
14009
14010
14011
14012
14013
14014
14015
14016
14017
14018
14019
14020
14021
14022
14023
14024
14025
14026
14027
14028
14029
14030
14031
14032
14033
14034
14035
14036
14037
14038
14039
14040
14041
14042
14043
14044
14045
14046
14047
14048
14049
14050
14051
14052
14053
14054
14055
14056
14057
14058
14059
14060
14061
14062
14063
14064
14065
14066
14067
14068
14069
14070
14071
14072
14073
14074
14075
14076
14077
14078
14079
14080
14081
14082
14083
14084
14085
14086
14087
14088
14089
14090
14091
14092
14093
14094
14095
14096
14097
14098
14099
14100
14101
14102
14103
14104
14105
14106
14107
14108
14109
14110
14111
14112
14113
14114
14115
14116
14117
14118
14119
14120
14121
14122
14123
14124
14125
14126
14127
14128
14129
14130
14131
14132
14133
14134
14135
14136
14137
14138
14139
14140
14141
14142
14143
14144
14145
14146
14147
14148
14149
14150
14151
14152
14153
14154
14155
14156
14157
14158
14159
14160
14161
14162
14163
14164
14165
14166
14167
14168
14169
14170
14171
14172
14173
14174
14175
14176
14177
14178
14179
14180
14181
14182
14183
14184
14185
14186
14187
14188
14189
14190
14191
14192
14193
14194
14195
14196
14197
14198
14199
14200
14201
14202
14203
14204
14205
14206
14207
14208
14209
14210
14211
14212
14213
14214
14215
14216
14217
14218
14219
14220
14221
14222
14223
14224
14225
14226
14227
14228
14229
14230
14231
14232
14233
14234
14235
14236
14237
14238
14239
14240
14241
14242
14243
14244
14245
14246
14247
14248
14249
14250
14251
14252
14253
14254
14255
14256
14257
14258
14259
14260
14261
14262
14263
14264
14265
14266
14267
14268
14269
14270
14271
14272
14273
14274
14275
14276
14277
14278
14279
14280
14281
14282
14283
14284
14285
14286
14287
14288
14289
14290
14291
14292
14293
14294
14295
14296
14297
14298
14299
14300
14301
14302
14303
14304
14305
14306
14307
14308
14309
14310
14311
14312
14313
14314
14315
14316
14317
14318
14319
14320
14321
14322
14323
14324
14325
14326
14327
14328
14329
14330
14331
14332
14333
14334
14335
14336
14337
14338
14339
14340
14341
14342
14343
14344
14345
14346
14347
14348
14349
14350
14351
14352
14353
14354
14355
14356
14357
14358
14359
14360
14361
14362
14363
14364
14365
14366
14367
14368
14369
14370
14371
14372
14373
14374
14375
14376
14377
14378
14379
14380
14381
14382
14383
14384
14385
14386
14387
14388
14389
14390
14391
14392
14393
14394
14395
14396
14397
14398
14399
14400
14401
14402
14403
14404
14405
14406
14407
14408
14409
14410
14411
14412
14413
14414
14415
14416
14417
14418
14419
14420
14421
14422
14423
14424
14425
14426
14427
14428
14429
14430
14431
14432
14433
14434
14435
14436
14437
14438
14439
14440
14441
14442
14443
14444
14445
14446
14447
14448
14449
14450
14451
14452
14453
14454
14455
14456
14457
14458
14459
14460
14461
14462
14463
14464
14465
14466
14467
14468
14469
14470
14471
14472
14473
14474
14475
14476
14477
14478
14479
14480
14481
14482
14483
14484
14485
14486
14487
14488
14489
14490
14491
14492
14493
14494
14495
14496
14497
14498
14499
14500
14501
14502
14503
14504
14505
14506
14507
14508
14509
14510
14511
14512
14513
14514
14515
14516
14517
14518
14519
14520
14521
14522
14523
14524
14525
14526
14527
14528
14529
14530
14531
14532
14533
14534
14535
14536
14537
14538
14539
14540
14541
14542
14543
14544
14545
14546
14547
14548
14549
14550
14551
14552
14553
14554
14555
14556
14557
14558
14559
14560
14561
14562
14563
14564
14565
14566
14567
14568
14569
14570
14571
14572
14573
14574
14575
14576
14577
14578
14579
14580
14581
14582
14583
14584
14585
14586
14587
14588
14589
14590
14591
14592
14593
14594
14595
14596
14597
14598
14599
14600
14601
14602
14603
14604
14605
14606
14607
14608
14609
14610
14611
14612
14613
14614
14615
14616
14617
14618
14619
14620
14621
14622
14623
14624
14625
14626
14627
14628
14629
14630
14631
14632
14633
14634
14635
14636
14637
14638
14639
14640
14641
14642
14643
14644
14645
14646
14647
14648
14649
14650
14651
14652
14653
14654
14655
14656
14657
14658
14659
14660
14661
14662
14663
14664
14665
14666
14667
14668
14669
14670
14671
14672
14673
14674
14675
14676
14677
14678
14679
14680
14681
14682
14683
14684
14685
14686
14687
14688
14689
14690
14691
14692
14693
14694
14695
14696
14697
14698
14699
14700
14701
14702
14703
14704
14705
14706
14707
14708
14709
14710
14711
14712
14713
14714
14715
14716
14717
14718
14719
14720
14721
14722
14723
14724
14725
14726
14727
14728
14729
14730
14731
14732
14733
14734
14735
14736
14737
14738
14739
14740
14741
14742
14743
14744
14745
14746
14747
14748
14749
14750
14751
14752
14753
14754
14755
14756
14757
14758
14759
14760
14761
14762
14763
14764
14765
14766
14767
14768
14769
14770
14771
14772
14773
14774
14775
14776
14777
14778
14779
14780
14781
14782
14783
14784
14785
14786
14787
14788
14789
14790
14791
14792
14793
14794
14795
14796
14797
14798
14799
14800
14801
14802
14803
14804
14805
14806
14807
14808
14809
14810
14811
14812
14813
14814
14815
14816
14817
14818
14819
14820
14821
14822
14823
14824
14825
14826
14827
14828
14829
14830
14831
14832
14833
14834
14835
14836
14837
14838
14839
14840
14841
14842
14843
14844
14845
14846
14847
14848
14849
14850
14851
14852
14853
14854
14855
14856
14857
14858
14859
14860
14861
14862
14863
14864
14865
14866
14867
14868
14869
14870
14871
14872
14873
14874
14875
14876
14877
14878
14879
14880
14881
14882
14883
14884
14885
14886
14887
14888
14889
14890
14891
14892
14893
14894
14895
14896
14897
14898
14899
14900
14901
14902
14903
14904
14905
14906
14907
14908
14909
14910
14911
14912
14913
14914
14915
14916
14917
14918
14919
14920
14921
14922
14923
14924
14925
14926
14927
14928
14929
14930
14931
14932
14933
14934
14935
14936
14937
14938
14939
14940
14941
14942
14943
14944
14945
14946
14947
14948
14949
14950
14951
14952
14953
14954
14955
14956
14957
14958
14959
14960
14961
14962
14963
14964
14965
14966
14967
14968
14969
14970
14971
14972
14973
14974
14975
14976
14977
14978
14979
14980
14981
14982
14983
14984
14985
14986
14987
14988
14989
14990
14991
14992
14993
14994
14995
14996
14997
14998
14999
15000
15001
15002
15003
15004
15005
15006
15007
15008
15009
15010
15011
15012
15013
15014
15015
15016
15017
15018
15019
15020
15021
15022
15023
15024
15025
15026
15027
15028
15029
15030
15031
15032
15033
15034
15035
15036
15037
15038
15039
15040
15041
15042
15043
15044
15045
15046
15047
15048
15049
15050
15051
15052
15053
15054
15055
15056
15057
15058
15059
15060
15061
15062
15063
15064
15065
15066
15067
15068
15069
15070
15071
15072
15073
15074
15075
15076
15077
15078
15079
15080
15081
15082
15083
15084
15085
15086
15087
15088
15089
15090
15091
15092
15093
15094
15095
15096
15097
15098
15099
15100
15101
15102
15103
15104
15105
15106
15107
15108
15109
15110
15111
15112
15113
15114
15115
15116
15117
15118
15119
15120
15121
15122
15123
15124
15125
15126
15127
15128
15129
15130
15131
15132
15133
15134
15135
15136
15137
15138
15139
15140
15141
15142
15143
15144
15145
15146
15147
15148
15149
15150
15151
15152
15153
15154
15155
15156
15157
15158
15159
15160
15161
15162
15163
15164
15165
15166
15167
15168
15169
15170
15171
15172
15173
15174
15175
15176
15177
15178
15179
15180
15181
15182
15183
15184
15185
15186
15187
15188
15189
15190
15191
15192
15193
15194
15195
15196
15197
15198
15199
15200
15201
15202
15203
15204
15205
15206
15207
15208
15209
15210
15211
15212
15213
15214
15215
15216
15217
15218
15219
15220
15221
15222
15223
15224
15225
15226
15227
15228
15229
15230
15231
15232
15233
15234
15235
15236
15237
15238
15239
15240
15241
15242
15243
15244
15245
15246
15247
15248
15249
15250
15251
15252
15253
15254
15255
15256
15257
15258
15259
15260
15261
15262
15263
15264
15265
15266
15267
15268
15269
15270
15271
15272
15273
15274
15275
15276
15277
15278
15279
15280
15281
15282
15283
15284
15285
15286
15287
15288
15289
15290
15291
15292
15293
15294
15295
15296
15297
15298
15299
15300
15301
15302
15303
15304
15305
15306
15307
15308
15309
15310
15311
15312
15313
15314
15315
15316
15317
15318
15319
15320
15321
15322
15323
15324
15325
15326
15327
15328
15329
15330
15331
15332
15333
15334
15335
15336
15337
15338
15339
15340
15341
15342
15343
15344
15345
15346
15347
15348
15349
15350
15351
15352
15353
15354
15355
15356
15357
15358
15359
15360
15361
15362
15363
15364
15365
15366
15367
15368
15369
15370
15371
15372
15373
15374
15375
15376
15377
15378
15379
15380
15381
15382
15383
15384
15385
15386
15387
15388
15389
15390
15391
15392
15393
15394
15395
15396
15397
15398
15399
15400
15401
15402
15403
15404
15405
15406
15407
15408
15409
15410
15411
15412
15413
15414
15415
15416
15417
15418
15419
15420
15421
15422
15423
15424
15425
15426
15427
15428
15429
15430
15431
15432
15433
15434
15435
15436
15437
15438
15439
15440
15441
15442
15443
15444
15445
15446
15447
15448
15449
15450
15451
15452
15453
15454
15455
15456
15457
15458
15459
15460
15461
15462
15463
15464
15465
15466
15467
15468
15469
15470
15471
15472
15473
15474
15475
15476
15477
15478
15479
15480
15481
15482
15483
15484
15485
15486
15487
15488
15489
15490
15491
15492
15493
15494
15495
15496
15497
15498
15499
15500
15501
15502
15503
15504
15505
15506
15507
15508
15509
15510
15511
15512
15513
15514
15515
15516
15517
15518
15519
15520
15521
15522
15523
15524
15525
15526
15527
15528
15529
15530
15531
15532
15533
15534
15535
15536
15537
15538
15539
15540
15541
15542
15543
15544
15545
15546
15547
15548
15549
15550
15551
15552
15553
15554
15555
15556
15557
15558
15559
15560
15561
15562
15563
15564
15565
15566
15567
15568
15569
15570
15571
15572
15573
15574
15575
15576
15577
15578
15579
15580
15581
15582
15583
15584
15585
15586
15587
15588
15589
15590
15591
15592
15593
15594
15595
15596
15597
15598
15599
15600
15601
15602
15603
15604
15605
15606
15607
15608
15609
15610
15611
15612
15613
15614
15615
15616
15617
15618
15619
15620
15621
15622
15623
15624
15625
15626
15627
15628
15629
15630
15631
15632
15633
15634
15635
15636
15637
15638
15639
15640
15641
15642
15643
15644
15645
15646
15647
15648
15649
15650
15651
15652
15653
15654
15655
15656
15657
15658
15659
15660
15661
15662
15663
15664
15665
15666
15667
15668
15669
15670
15671
15672
15673
15674
15675
15676
15677
15678
15679
15680
15681
15682
15683
15684
15685
15686
15687
15688
15689
15690
15691
15692
15693
15694
15695
15696
15697
15698
15699
15700
15701
15702
15703
15704
15705
15706
15707
15708
15709
15710
15711
15712
15713
15714
15715
15716
15717
15718
15719
15720
15721
15722
15723
15724
15725
15726
15727
15728
15729
15730
15731
15732
15733
15734
15735
15736
15737
15738
15739
15740
15741
15742
15743
15744
15745
15746
15747
15748
15749
15750
15751
15752
15753
15754
15755
15756
15757
15758
15759
15760
15761
15762
15763
15764
15765
15766
15767
15768
15769
15770
15771
15772
15773
15774
15775
15776
15777
15778
15779
15780
15781
15782
15783
15784
15785
15786
15787
15788
15789
15790
15791
15792
15793
15794
15795
15796
15797
15798
15799
15800
15801
15802
15803
15804
15805
15806
15807
15808
15809
15810
15811
15812
15813
15814
15815
15816
15817
15818
15819
15820
15821
15822
15823
15824
15825
15826
15827
15828
15829
15830
15831
15832
15833
15834
15835
15836
15837
15838
15839
15840
15841
15842
15843
15844
15845
15846
15847
15848
15849
15850
15851
15852
15853
15854
15855
15856
15857
15858
15859
15860
15861
15862
15863
15864
15865
15866
15867
15868
15869
15870
15871
15872
15873
15874
15875
15876
15877
15878
15879
15880
15881
15882
15883
15884
15885
15886
15887
15888
15889
15890
15891
15892
15893
15894
15895
15896
15897
15898
15899
15900
15901
15902
15903
15904
15905
15906
15907
15908
15909
15910
15911
15912
15913
15914
15915
15916
15917
15918
15919
15920
15921
15922
15923
15924
15925
15926
15927
15928
15929
15930
15931
15932
15933
15934
15935
15936
15937
15938
15939
15940
15941
15942
15943
15944
15945
15946
15947
15948
15949
15950
15951
15952
15953
15954
15955
15956
15957
15958
15959
15960
15961
15962
15963
15964
15965
15966
15967
15968
15969
15970
15971
15972
15973
15974
15975
15976
15977
15978
15979
15980
15981
15982
15983
15984
15985
15986
15987
15988
15989
15990
15991
15992
15993
15994
15995
15996
15997
15998
15999
16000
16001
16002
16003
16004
16005
16006
16007
16008
16009
16010
16011
16012
16013
16014
16015
16016
16017
16018
16019
16020
16021
16022
16023
16024
16025
16026
16027
16028
16029
16030
16031
16032
16033
16034
16035
16036
16037
16038
16039
16040
16041
16042
16043
16044
16045
16046
16047
16048
16049
16050
16051
16052
16053
16054
16055
16056
16057
16058
16059
16060
16061
16062
16063
16064
16065
16066
16067
16068
16069
16070
16071
16072
16073
16074
16075
16076
16077
16078
16079
16080
16081
16082
16083
16084
16085
16086
16087
16088
16089
16090
16091
16092
16093
16094
16095
16096
16097
16098
16099
16100
16101
16102
16103
16104
16105
16106
16107
16108
16109
16110
16111
16112
16113
16114
16115
16116
16117
16118
16119
16120
16121
16122
16123
16124
16125
16126
16127
16128
16129
16130
16131
16132
16133
16134
16135
16136
16137
16138
16139
16140
16141
16142
16143
16144
16145
16146
16147
16148
16149
16150
16151
16152
16153
16154
16155
16156
16157
16158
16159
16160
16161
16162
16163
16164
16165
16166
16167
16168
16169
16170
16171
16172
16173
16174
16175
16176
16177
16178
16179
16180
16181
16182
16183
16184
16185
16186
16187
16188
16189
16190
16191
16192
16193
16194
16195
16196
16197
16198
16199
16200
16201
16202
16203
16204
16205
16206
16207
16208
16209
16210
16211
16212
16213
16214
16215
16216
16217
16218
16219
16220
16221
16222
16223
16224
16225
16226
16227
16228
16229
16230
16231
16232
16233
16234
16235
16236
16237
16238
16239
16240
16241
16242
16243
16244
16245
16246
16247
16248
16249
16250
16251
16252
16253
16254
16255
16256
16257
16258
16259
16260
16261
16262
16263
16264
16265
16266
16267
16268
16269
16270
16271
16272
16273
16274
16275
16276
16277
16278
16279
16280
16281
16282
16283
16284
16285
16286
16287
16288
16289
16290
16291
16292
16293
16294
16295
16296
16297
16298
16299
16300
16301
16302
16303
16304
16305
16306
16307
16308
16309
16310
16311
16312
16313
16314
16315
16316
16317
16318
16319
16320
16321
16322
16323
16324
16325
16326
16327
16328
16329
16330
16331
16332
16333
16334
16335
16336
16337
16338
16339
16340
16341
16342
16343
16344
16345
16346
16347
16348
16349
16350
16351
16352
16353
16354
16355
16356
16357
16358
16359
16360
16361
16362
16363
16364
16365
16366
16367
16368
16369
16370
16371
16372
16373
16374
16375
16376
16377
16378
16379
16380
16381
16382
16383
16384
16385
16386
16387
16388
16389
16390
16391
16392
16393
16394
16395
16396
16397
16398
16399
16400
16401
16402
16403
16404
16405
16406
16407
16408
16409
16410
16411
16412
16413
16414
16415
16416
16417
16418
16419
16420
16421
16422
16423
16424
16425
16426
16427
16428
16429
16430
16431
16432
16433
16434
16435
16436
16437
16438
16439
16440
16441
16442
16443
16444
16445
16446
16447
16448
16449
16450
16451
16452
16453
16454
16455
16456
16457
16458
16459
16460
16461
16462
16463
16464
16465
16466
16467
16468
16469
16470
16471
16472
16473
16474
16475
16476
16477
16478
16479
16480
16481
16482
16483
16484
16485
16486
16487
16488
16489
16490
16491
16492
16493
16494
16495
16496
16497
16498
16499
16500
16501
16502
16503
16504
16505
16506
16507
16508
16509
16510
16511
16512
16513
16514
16515
16516
16517
16518
16519
16520
16521
16522
16523
16524
16525
16526
16527
16528
16529
16530
16531
16532
16533
16534
16535
16536
16537
16538
16539
16540
16541
16542
16543
16544
16545
16546
16547
16548
16549
16550
16551
16552
16553
16554
16555
16556
16557
16558
16559
16560
16561
16562
16563
16564
16565
16566
16567
16568
16569
16570
16571
16572
16573
16574
16575
16576
16577
16578
16579
16580
16581
16582
16583
16584
16585
16586
16587
16588
16589
16590
16591
16592
16593
16594
16595
16596
16597
16598
16599
16600
16601
16602
16603
16604
16605
16606
16607
16608
16609
16610
16611
16612
16613
16614
16615
16616
16617
16618
16619
16620
16621
16622
16623
16624
16625
16626
16627
16628
16629
16630
16631
16632
16633
16634
16635
16636
16637
16638
16639
16640
16641
16642
16643
16644
16645
16646
16647
16648
16649
16650
16651
16652
16653
16654
16655
16656
16657
16658
16659
16660
16661
16662
16663
16664
16665
16666
16667
16668
16669
16670
16671
16672
16673
16674
16675
16676
16677
16678
16679
16680
16681
16682
16683
16684
16685
16686
16687
16688
16689
16690
16691
16692
16693
16694
16695
16696
16697
16698
16699
16700
16701
16702
16703
16704
16705
16706
16707
16708
16709
16710
16711
16712
16713
16714
16715
16716
16717
16718
16719
16720
16721
16722
16723
16724
16725
16726
16727
16728
16729
16730
16731
16732
16733
16734
16735
16736
16737
16738
16739
16740
16741
16742
16743
16744
16745
16746
16747
16748
16749
16750
16751
16752
16753
16754
16755
16756
16757
16758
16759
16760
16761
16762
16763
16764
16765
16766
16767
16768
16769
16770
16771
16772
16773
16774
16775
16776
16777
16778
16779
16780
16781
16782
16783
16784
16785
16786
16787
16788
16789
16790
16791
16792
16793
16794
16795
16796
16797
16798
16799
16800
16801
16802
16803
16804
16805
16806
16807
16808
16809
16810
16811
16812
16813
16814
16815
16816
16817
16818
16819
16820
16821
16822
16823
16824
16825
16826
16827
16828
16829
16830
16831
16832
16833
16834
16835
16836
16837
16838
16839
16840
16841
16842
16843
16844
16845
16846
16847
16848
16849
16850
16851
16852
16853
16854
16855
16856
16857
16858
16859
16860
16861
16862
16863
16864
16865
16866
16867
16868
16869
16870
16871
16872
16873
16874
16875
16876
16877
16878
16879
16880
16881
16882
16883
16884
16885
16886
16887
16888
16889
16890
16891
16892
16893
16894
16895
16896
16897
16898
16899
16900
16901
16902
16903
16904
16905
16906
16907
16908
16909
16910
16911
16912
16913
16914
16915
16916
16917
16918
16919
16920
16921
16922
16923
16924
16925
16926
16927
16928
16929
16930
16931
16932
16933
16934
16935
16936
16937
16938
16939
16940
16941
16942
16943
16944
16945
16946
16947
16948
16949
16950
16951
16952
16953
16954
16955
16956
16957
16958
16959
16960
16961
16962
16963
16964
16965
16966
16967
16968
16969
16970
16971
16972
16973
16974
16975
16976
16977
16978
16979
16980
16981
16982
16983
16984
16985
16986
16987
16988
16989
16990
16991
16992
16993
16994
16995
16996
16997
16998
16999
17000
17001
17002
17003
17004
17005
17006
17007
17008
17009
17010
17011
17012
17013
17014
17015
17016
17017
17018
17019
17020
17021
17022
17023
17024
17025
17026
17027
17028
17029
17030
17031
17032
17033
17034
17035
17036
17037
17038
17039
17040
17041
17042
17043
17044
17045
17046
17047
17048
17049
17050
17051
17052
17053
17054
17055
17056
17057
17058
17059
17060
17061
17062
17063
17064
17065
17066
17067
17068
17069
17070
17071
17072
17073
17074
17075
17076
17077
17078
17079
17080
17081
17082
17083
17084
17085
17086
17087
17088
17089
17090
17091
17092
17093
17094
17095
17096
17097
17098
17099
17100
17101
17102
17103
17104
17105
17106
17107
17108
17109
17110
17111
17112
17113
17114
17115
17116
17117
17118
17119
17120
17121
17122
17123
17124
17125
17126
17127
17128
17129
17130
17131
17132
17133
17134
17135
17136
17137
17138
17139
17140
17141
17142
17143
17144
17145
17146
17147
17148
17149
17150
17151
17152
17153
17154
17155
17156
17157
17158
17159
17160
17161
17162
17163
17164
17165
17166
17167
17168
17169
17170
17171
17172
17173
17174
17175
17176
17177
17178
17179
17180
17181
17182
17183
17184
17185
17186
17187
17188
17189
17190
17191
17192
17193
17194
17195
17196
17197
17198
17199
17200
17201
17202
17203
17204
17205
17206
17207
17208
17209
17210
17211
17212
17213
17214
17215
17216
17217
17218
17219
17220
17221
17222
17223
17224
17225
17226
17227
17228
17229
17230
17231
17232
17233
17234
17235
17236
17237
17238
17239
17240
17241
17242
17243
17244
17245
17246
17247
17248
17249
17250
17251
17252
17253
17254
17255
17256
17257
17258
17259
17260
17261
17262
17263
17264
17265
17266
17267
17268
17269
17270
17271
17272
17273
17274
17275
17276
17277
17278
17279
17280
17281
17282
17283
17284
17285
17286
17287
17288
17289
17290
17291
17292
17293
17294
17295
17296
17297
17298
17299
17300
17301
17302
17303
17304
17305
17306
17307
17308
17309
17310
17311
17312
17313
17314
17315
17316
17317
17318
17319
17320
17321
17322
17323
17324
17325
17326
17327
17328
17329
17330
17331
17332
17333
17334
17335
17336
17337
17338
17339
17340
17341
17342
17343
17344
17345
17346
17347
17348
17349
17350
17351
17352
17353
17354
17355
17356
17357
17358
17359
17360
17361
17362
17363
17364
17365
17366
17367
17368
17369
17370
17371
17372
17373
17374
17375
17376
17377
17378
17379
17380
17381
17382
17383
17384
17385
17386
17387
17388
17389
17390
17391
17392
17393
17394
17395
17396
17397
17398
17399
17400
17401
17402
17403
17404
17405
17406
17407
17408
17409
17410
17411
17412
17413
17414
17415
17416
17417
17418
17419
17420
17421
17422
17423
17424
17425
17426
17427
17428
17429
17430
17431
17432
17433
17434
17435
17436
17437
17438
17439
17440
17441
17442
17443
17444
17445
17446
17447
17448
17449
17450
17451
17452
17453
17454
17455
17456
17457
17458
17459
17460
17461
17462
17463
17464
17465
17466
17467
17468
17469
17470
17471
17472
17473
17474
17475
17476
17477
17478
17479
17480
17481
17482
17483
17484
17485
17486
17487
17488
17489
17490
17491
17492
17493
17494
17495
17496
17497
17498
17499
17500
17501
17502
17503
17504
17505
17506
17507
17508
17509
17510
17511
17512
17513
17514
17515
17516
17517
17518
17519
17520
17521
17522
17523
17524
17525
17526
17527
17528
17529
17530
17531
17532
17533
17534
17535
17536
17537
17538
17539
17540
17541
17542
17543
17544
17545
17546
17547
17548
17549
17550
17551
17552
17553
17554
17555
17556
17557
17558
17559
17560
17561
17562
17563
17564
17565
17566
17567
17568
17569
17570
17571
17572
17573
17574
17575
17576
17577
17578
17579
17580
17581
17582
17583
17584
17585
17586
17587
17588
17589
17590
17591
17592
17593
17594
17595
17596
17597
17598
17599
17600
17601
17602
17603
17604
17605
17606
17607
17608
17609
17610
17611
17612
17613
17614
17615
17616
17617
17618
17619
17620
17621
17622
17623
17624
17625
17626
17627
17628
17629
17630
17631
17632
17633
17634
17635
17636
17637
17638
17639
17640
17641
17642
17643
17644
17645
17646
17647
17648
17649
17650
17651
17652
17653
17654
17655
17656
17657
17658
17659
17660
17661
17662
17663
17664
17665
17666
17667
17668
17669
17670
17671
17672
17673
17674
17675
17676
17677
17678
17679
17680
17681
17682
17683
17684
17685
17686
17687
17688
17689
17690
17691
17692
17693
17694
17695
17696
17697
17698
17699
17700
17701
17702
17703
17704
17705
17706
17707
17708
17709
17710
17711
17712
17713
17714
17715
17716
17717
17718
17719
17720
17721
17722
17723
17724
17725
17726
17727
17728
17729
17730
17731
17732
17733
17734
17735
17736
17737
17738
17739
17740
17741
17742
17743
17744
17745
17746
17747
17748
17749
17750
17751
17752
17753
17754
17755
17756
17757
17758
17759
17760
17761
17762
17763
17764
17765
17766
17767
17768
17769
17770
17771
17772
17773
17774
17775
17776
17777
17778
17779
17780
17781
17782
17783
17784
17785
17786
17787
17788
17789
17790
17791
17792
17793
17794
17795
17796
17797
17798
17799
17800
17801
17802
17803
17804
17805
17806
17807
17808
17809
17810
17811
17812
17813
17814
17815
17816
17817
17818
17819
17820
17821
17822
17823
17824
17825
17826
17827
17828
17829
17830
17831
17832
17833
17834
17835
17836
17837
17838
17839
17840
17841
17842
17843
17844
17845
17846
17847
17848
17849
17850
17851
17852
17853
17854
17855
17856
17857
17858
17859
17860
17861
17862
17863
17864
17865
17866
17867
17868
17869
17870
17871
17872
17873
17874
17875
17876
17877
17878
17879
17880
17881
17882
17883
17884
17885
17886
17887
17888
17889
17890
17891
17892
17893
17894
17895
17896
17897
17898
17899
17900
17901
17902
17903
17904
17905
17906
17907
17908
17909
17910
17911
17912
17913
17914
17915
17916
17917
17918
17919
17920
17921
17922
17923
17924
17925
17926
17927
17928
17929
17930
17931
17932
17933
17934
17935
17936
17937
17938
17939
17940
17941
17942
17943
17944
17945
17946
17947
17948
17949
17950
17951
17952
17953
17954
17955
17956
17957
17958
17959
17960
17961
17962
17963
17964
17965
17966
17967
17968
17969
17970
17971
17972
17973
17974
17975
17976
17977
17978
17979
17980
17981
17982
17983
17984
17985
17986
17987
17988
17989
17990
17991
17992
17993
17994
17995
17996
17997
17998
17999
18000
18001
18002
18003
18004
18005
18006
18007
18008
18009
18010
18011
18012
18013
18014
18015
18016
18017
18018
18019
18020
18021
18022
18023
18024
18025
18026
18027
18028
18029
18030
18031
18032
18033
18034
18035
18036
18037
18038
18039
18040
18041
18042
18043
18044
18045
18046
18047
18048
18049
18050
18051
18052
18053
18054
18055
18056
18057
18058
18059
18060
18061
18062
18063
18064
18065
18066
18067
18068
18069
18070
18071
18072
18073
18074
18075
18076
18077
18078
18079
18080
18081
18082
18083
18084
18085
18086
18087
18088
18089
18090
18091
18092
18093
18094
18095
18096
18097
18098
18099
18100
18101
18102
18103
18104
18105
18106
18107
18108
18109
18110
18111
18112
18113
18114
18115
18116
18117
18118
18119
18120
18121
18122
18123
18124
18125
18126
18127
18128
18129
18130
18131
18132
18133
18134
18135
18136
18137
18138
18139
18140
18141
18142
18143
18144
18145
18146
18147
18148
18149
18150
18151
18152
18153
18154
18155
18156
18157
18158
18159
18160
18161
18162
18163
18164
18165
18166
18167
18168
18169
18170
18171
18172
18173
18174
18175
18176
18177
18178
18179
18180
18181
18182
18183
18184
18185
18186
18187
18188
18189
18190
18191
18192
18193
18194
18195
18196
18197
18198
18199
18200
18201
18202
18203
18204
18205
18206
18207
18208
18209
18210
18211
18212
18213
18214
18215
18216
18217
18218
18219
18220
18221
18222
18223
18224
18225
18226
18227
18228
18229
18230
18231
18232
18233
18234
18235
18236
18237
18238
18239
18240
18241
18242
18243
18244
18245
18246
18247
18248
18249
18250
18251
18252
18253
18254
18255
18256
18257
18258
18259
18260
18261
18262
18263
18264
18265
18266
18267
18268
18269
18270
18271
18272
18273
18274
18275
18276
18277
18278
18279
18280
18281
18282
18283
18284
18285
18286
18287
18288
18289
18290
18291
18292
18293
18294
18295
18296
18297
18298
18299
18300
18301
18302
18303
18304
18305
18306
18307
18308
18309
18310
18311
18312
18313
18314
18315
18316
18317
18318
18319
18320
18321
18322
18323
18324
18325
18326
18327
18328
18329
18330
18331
18332
18333
18334
18335
18336
18337
18338
18339
18340
18341
18342
18343
18344
18345
18346
18347
18348
18349
18350
18351
18352
18353
18354
18355
18356
18357
18358
18359
18360
18361
18362
18363
18364
18365
18366
18367
18368
18369
18370
18371
18372
18373
18374
18375
18376
18377
18378
18379
18380
18381
18382
18383
18384
18385
18386
18387
18388
18389
18390
18391
18392
18393
18394
18395
18396
18397
18398
18399
18400
18401
18402
18403
18404
18405
18406
18407
18408
18409
18410
18411
18412
18413
18414
18415
18416
18417
18418
18419
18420
18421
18422
18423
18424
18425
18426
18427
18428
18429
18430
18431
18432
18433
18434
18435
18436
18437
18438
18439
18440
18441
18442
18443
18444
18445
18446
18447
18448
18449
18450
18451
18452
18453
18454
18455
18456
18457
18458
18459
18460
18461
18462
18463
18464
18465
18466
18467
18468
18469
18470
18471
18472
18473
18474
18475
18476
18477
18478
18479
18480
18481
18482
18483
18484
18485
18486
18487
18488
18489
18490
18491
18492
18493
18494
18495
18496
18497
18498
18499
18500
18501
18502
18503
18504
18505
18506
18507
18508
18509
18510
18511
18512
18513
18514
18515
18516
18517
18518
18519
18520
18521
18522
18523
18524
18525
18526
18527
18528
18529
18530
18531
18532
18533
18534
18535
18536
18537
18538
18539
18540
18541
18542
18543
18544
18545
18546
18547
18548
18549
18550
18551
18552
18553
18554
18555
18556
18557
18558
18559
18560
18561
18562
18563
18564
18565
18566
18567
18568
18569
18570
18571
18572
18573
18574
18575
18576
18577
18578
18579
18580
18581
18582
18583
18584
18585
18586
18587
18588
18589
18590
18591
18592
18593
18594
18595
18596
18597
18598
18599
18600
18601
18602
18603
18604
18605
18606
18607
18608
18609
18610
18611
18612
18613
18614
18615
18616
18617
18618
18619
18620
18621
18622
18623
18624
18625
18626
18627
18628
18629
18630
18631
18632
18633
18634
18635
18636
18637
18638
18639
18640
18641
18642
18643
18644
18645
18646
18647
18648
18649
18650
18651
18652
18653
18654
18655
18656
18657
18658
18659
18660
18661
18662
18663
18664
18665
18666
18667
18668
18669
18670
18671
18672
18673
18674
18675
18676
18677
18678
18679
18680
18681
18682
18683
18684
18685
18686
18687
18688
18689
18690
18691
18692
18693
18694
18695
18696
18697
18698
18699
18700
18701
18702
18703
18704
18705
18706
18707
18708
18709
18710
18711
18712
18713
18714
18715
18716
18717
18718
18719
18720
18721
18722
18723
18724
18725
18726
18727
18728
18729
18730
18731
18732
18733
18734
18735
18736
18737
18738
18739
18740
18741
18742
18743
18744
18745
18746
18747
18748
18749
18750
18751
18752
18753
18754
18755
18756
18757
18758
18759
18760
18761
18762
18763
18764
18765
18766
18767
18768
18769
18770
18771
18772
18773
18774
18775
18776
18777
18778
18779
18780
18781
18782
18783
18784
18785
18786
18787
18788
18789
18790
18791
18792
18793
18794
18795
18796
18797
18798
18799
18800
18801
18802
18803
18804
18805
18806
18807
18808
18809
18810
18811
18812
18813
18814
18815
18816
18817
18818
18819
18820
18821
18822
18823
18824
18825
18826
18827
18828
18829
18830
18831
18832
18833
18834
18835
18836
18837
18838
18839
18840
18841
18842
18843
18844
18845
18846
18847
18848
18849
18850
18851
18852
18853
18854
18855
18856
18857
18858
18859
18860
18861
18862
18863
18864
18865
18866
18867
18868
18869
18870
18871
18872
18873
18874
18875
18876
18877
18878
18879
18880
18881
18882
18883
18884
18885
18886
18887
18888
18889
18890
18891
18892
18893
18894
18895
18896
18897
18898
18899
18900
18901
18902
18903
18904
18905
18906
18907
18908
18909
18910
18911
18912
18913
18914
18915
18916
18917
18918
18919
18920
18921
18922
18923
18924
18925
18926
18927
18928
18929
18930
18931
18932
18933
18934
18935
18936
18937
18938
18939
18940
18941
18942
18943
18944
18945
18946
18947
18948
18949
18950
18951
18952
18953
18954
18955
18956
18957
18958
18959
18960
18961
18962
18963
18964
18965
18966
18967
18968
18969
18970
18971
18972
18973
18974
18975
18976
18977
18978
18979
18980
18981
18982
18983
18984
18985
18986
18987
18988
18989
18990
18991
18992
18993
18994
18995
18996
18997
18998
18999
19000
19001
19002
19003
19004
19005
19006
19007
19008
19009
19010
19011
19012
19013
19014
19015
19016
19017
19018
19019
19020
19021
19022
19023
19024
19025
19026
19027
19028
19029
19030
19031
19032
19033
19034
19035
19036
19037
19038
19039
19040
19041
19042
19043
19044
19045
19046
19047
19048
19049
19050
19051
19052
19053
19054
19055
19056
19057
19058
19059
19060
19061
19062
19063
19064
19065
19066
19067
19068
19069
19070
19071
19072
19073
19074
19075
19076
19077
19078
19079
19080
19081
19082
19083
19084
19085
19086
19087
19088
19089
19090
19091
19092
19093
19094
19095
19096
19097
19098
19099
19100
19101
19102
19103
19104
19105
19106
19107
19108
19109
19110
19111
19112
19113
19114
19115
19116
19117
19118
19119
19120
19121
19122
19123
19124
19125
19126
19127
19128
19129
19130
19131
19132
19133
19134
19135
19136
19137
19138
19139
19140
19141
19142
19143
19144
19145
19146
19147
19148
19149
19150
19151
19152
19153
19154
19155
19156
19157
19158
19159
19160
19161
19162
19163
19164
19165
19166
19167
19168
19169
19170
19171
19172
19173
19174
19175
19176
19177
19178
19179
19180
19181
19182
19183
19184
19185
19186
19187
19188
19189
19190
19191
19192
19193
19194
19195
19196
19197
19198
19199
19200
19201
19202
19203
19204
19205
19206
19207
19208
19209
19210
19211
19212
19213
19214
19215
19216
19217
19218
19219
19220
19221
19222
19223
19224
19225
19226
19227
19228
19229
19230
19231
19232
19233
19234
19235
19236
19237
19238
19239
19240
19241
19242
19243
19244
19245
19246
19247
19248
19249
19250
19251
19252
19253
19254
19255
19256
19257
19258
19259
19260
19261
19262
19263
19264
19265
19266
19267
19268
19269
19270
19271
19272
19273
19274
19275
19276
19277
19278
19279
19280
19281
19282
19283
19284
19285
19286
19287
19288
19289
19290
19291
19292
19293
19294
19295
19296
19297
19298
19299
19300
19301
19302
19303
19304
19305
19306
19307
19308
19309
19310
19311
19312
19313
19314
19315
19316
19317
19318
19319
19320
19321
19322
19323
19324
19325
19326
19327
19328
19329
19330
19331
19332
19333
19334
19335
19336
19337
19338
19339
19340
19341
19342
19343
19344
19345
19346
19347
19348
19349
19350
19351
19352
19353
19354
19355
19356
19357
19358
19359
19360
19361
19362
19363
19364
19365
19366
19367
19368
19369
19370
19371
19372
19373
19374
19375
19376
19377
19378
19379
19380
19381
19382
19383
19384
19385
19386
19387
19388
19389
19390
19391
19392
19393
19394
19395
19396
19397
19398
19399
19400
19401
19402
19403
19404
19405
19406
19407
19408
19409
19410
19411
19412
19413
19414
19415
19416
19417
19418
19419
19420
19421
19422
19423
19424
19425
19426
19427
19428
19429
19430
19431
19432
19433
19434
19435
19436
19437
19438
19439
19440
19441
19442
19443
19444
19445
19446
19447
19448
19449
19450
19451
19452
19453
19454
19455
19456
19457
19458
19459
19460
19461
19462
19463
19464
19465
19466
19467
19468
19469
19470
19471
19472
19473
19474
19475
19476
19477
19478
19479
19480
19481
19482
19483
19484
19485
19486
19487
19488
19489
19490
19491
19492
19493
19494
19495
19496
19497
19498
19499
19500
19501
19502
19503
19504
19505
19506
19507
19508
19509
19510
19511
19512
19513
19514
19515
19516
19517
19518
19519
19520
19521
19522
19523
19524
19525
19526
19527
19528
19529
19530
19531
19532
19533
19534
19535
19536
19537
19538
19539
19540
19541
19542
19543
19544
19545
19546
19547
19548
19549
19550
19551
19552
19553
19554
19555
19556
19557
19558
19559
19560
19561
19562
19563
19564
19565
19566
19567
19568
19569
19570
19571
19572
19573
19574
19575
19576
19577
19578
19579
19580
19581
19582
19583
19584
19585
19586
19587
19588
19589
19590
19591
19592
19593
19594
19595
19596
19597
19598
19599
19600
19601
19602
19603
19604
19605
19606
19607
19608
19609
19610
19611
19612
19613
19614
19615
19616
19617
19618
19619
19620
19621
19622
19623
19624
19625
19626
19627
19628
19629
19630
19631
19632
19633
19634
19635
19636
19637
19638
19639
19640
19641
19642
19643
19644
19645
19646
19647
19648
19649
19650
19651
19652
19653
19654
19655
19656
19657
19658
19659
19660
19661
19662
19663
19664
19665
19666
19667
19668
19669
19670
19671
19672
19673
19674
19675
19676
19677
19678
19679
19680
19681
19682
19683
19684
19685
19686
19687
19688
19689
19690
19691
19692
19693
19694
19695
19696
19697
19698
19699
19700
19701
19702
19703
19704
19705
19706
19707
19708
19709
19710
19711
19712
19713
19714
19715
19716
19717
19718
19719
19720
19721
19722
19723
19724
19725
19726
19727
19728
19729
19730
19731
19732
19733
19734
19735
19736
19737
19738
19739
19740
19741
19742
19743
19744
19745
19746
19747
19748
19749
19750
19751
19752
19753
19754
19755
19756
19757
19758
19759
19760
19761
19762
19763
19764
19765
19766
19767
19768
19769
19770
19771
19772
19773
19774
19775
19776
19777
19778
19779
19780
19781
19782
19783
19784
19785
19786
19787
19788
19789
19790
19791
19792
19793
19794
19795
19796
19797
19798
19799
19800
19801
19802
19803
19804
19805
19806
19807
19808
19809
19810
19811
19812
19813
19814
19815
19816
19817
19818
19819
19820
19821
19822
19823
19824
19825
19826
19827
19828
19829
19830
19831
19832
19833
19834
19835
19836
19837
19838
19839
19840
19841
19842
19843
19844
19845
19846
19847
19848
19849
19850
19851
19852
19853
19854
19855
19856
19857
19858
19859
19860
19861
19862
19863
19864
19865
19866
19867
19868
19869
19870
19871
19872
19873
19874
19875
19876
19877
19878
19879
19880
19881
19882
19883
19884
19885
19886
19887
19888
19889
19890
19891
19892
19893
19894
19895
19896
19897
19898
19899
19900
19901
19902
19903
19904
19905
19906
19907
19908
19909
19910
19911
19912
19913
19914
19915
19916
19917
19918
19919
19920
19921
19922
19923
19924
19925
19926
19927
19928
19929
19930
19931
19932
19933
19934
19935
19936
19937
19938
19939
19940
19941
19942
19943
19944
19945
19946
19947
19948
19949
19950
19951
19952
19953
19954
19955
19956
19957
19958
19959
19960
19961
19962
19963
19964
19965
19966
19967
19968
19969
19970
19971
19972
19973
19974
19975
19976
19977
19978
19979
19980
19981
19982
19983
19984
19985
19986
19987
19988
19989
19990
19991
19992
19993
19994
19995
19996
19997
19998
19999
20000
20001
20002
20003
20004
20005
20006
20007
20008
20009
20010
20011
20012
20013
20014
20015
20016
20017
20018
20019
20020
20021
20022
20023
20024
20025
20026
20027
20028
20029
20030
20031
20032
20033
20034
20035
20036
20037
20038
20039
20040
20041
20042
20043
20044
20045
20046
20047
20048
20049
20050
20051
20052
20053
20054
20055
20056
20057
20058
20059
20060
20061
20062
20063
20064
20065
20066
20067
20068
20069
20070
20071
20072
20073
20074
20075
20076
20077
20078
20079
20080
20081
20082
20083
20084
20085
20086
20087
20088
20089
20090
20091
20092
20093
20094
20095
20096
20097
20098
20099
20100
20101
20102
20103
20104
20105
20106
20107
20108
20109
20110
20111
20112
20113
20114
20115
20116
20117
20118
20119
20120
20121
20122
20123
20124
20125
20126
20127
20128
20129
20130
20131
20132
20133
20134
20135
20136
20137
20138
20139
20140
20141
20142
20143
20144
20145
20146
20147
20148
20149
20150
20151
20152
20153
20154
20155
20156
20157
20158
20159
20160
20161
20162
20163
20164
20165
20166
20167
20168
20169
20170
20171
20172
20173
20174
20175
20176
20177
20178
20179
20180
20181
20182
20183
20184
20185
20186
20187
20188
20189
20190
20191
20192
20193
20194
20195
20196
20197
20198
20199
20200
20201
20202
20203
20204
20205
20206
20207
20208
20209
20210
20211
20212
20213
20214
20215
20216
20217
20218
20219
20220
20221
20222
20223
20224
20225
20226
20227
20228
20229
20230
20231
20232
20233
20234
20235
20236
20237
20238
20239
20240
20241
20242
20243
20244
20245
20246
20247
20248
20249
20250
20251
20252
20253
20254
20255
20256
20257
20258
20259
20260
20261
20262
20263
20264
20265
20266
20267
20268
20269
20270
20271
20272
20273
20274
20275
20276
20277
20278
20279
20280
20281
20282
20283
20284
20285
20286
20287
20288
20289
20290
20291
20292
20293
20294
20295
20296
20297
20298
20299
20300
20301
20302
20303
20304
20305
20306
20307
20308
20309
20310
20311
20312
20313
20314
20315
20316
20317
20318
20319
20320
20321
20322
20323
20324
20325
20326
20327
20328
20329
20330
20331
20332
20333
20334
20335
20336
20337
20338
20339
20340
20341
20342
20343
20344
20345
20346
20347
20348
20349
20350
20351
20352
20353
20354
20355
20356
20357
20358
20359
20360
20361
20362
20363
20364
20365
20366
20367
20368
20369
20370
20371
20372
20373
20374
20375
20376
20377
20378
20379
20380
20381
20382
20383
20384
20385
20386
20387
20388
20389
20390
20391
20392
20393
20394
20395
20396
20397
20398
20399
20400
20401
20402
20403
20404
20405
20406
20407
20408
20409
20410
20411
20412
20413
20414
20415
20416
20417
20418
20419
20420
20421
20422
20423
20424
20425
20426
20427
20428
20429
20430
20431
20432
20433
20434
20435
20436
20437
20438
20439
20440
20441
20442
20443
20444
20445
20446
20447
20448
20449
20450
20451
20452
20453
20454
20455
20456
20457
20458
20459
20460
20461
20462
20463
20464
20465
20466
20467
20468
20469
20470
20471
20472
20473
20474
20475
20476
20477
20478
20479
20480
20481
20482
20483
20484
20485
20486
20487
20488
20489
20490
20491
20492
20493
20494
20495
20496
20497
20498
20499
20500
20501
20502
20503
20504
20505
20506
20507
20508
20509
20510
20511
20512
20513
20514
20515
20516
20517
20518
20519
20520
20521
20522
20523
20524
20525
20526
20527
20528
20529
20530
20531
20532
20533
20534
20535
20536
20537
20538
20539
20540
20541
20542
20543
20544
20545
20546
20547
20548
20549
20550
20551
20552
20553
20554
20555
20556
20557
20558
20559
20560
20561
20562
20563
20564
20565
20566
20567
20568
20569
20570
20571
20572
20573
20574
20575
20576
20577
20578
20579
20580
20581
20582
20583
20584
20585
20586
20587
20588
20589
20590
20591
20592
20593
20594
20595
20596
20597
20598
20599
20600
20601
20602
20603
20604
20605
20606
20607
20608
20609
20610
20611
20612
20613
20614
20615
20616
20617
20618
20619
20620
20621
20622
20623
20624
20625
20626
20627
20628
20629
20630
20631
20632
20633
20634
20635
20636
20637
20638
20639
20640
20641
20642
20643
20644
20645
20646
20647
20648
20649
20650
20651
20652
20653
20654
20655
20656
20657
20658
20659
20660
20661
20662
20663
20664
20665
20666
20667
20668
20669
20670
20671
20672
20673
20674
20675
20676
20677
20678
20679
20680
20681
20682
20683
20684
20685
20686
20687
20688
20689
20690
20691
20692
20693
20694
20695
20696
20697
20698
20699
20700
20701
20702
20703
20704
20705
20706
20707
20708
20709
20710
20711
20712
20713
20714
20715
20716
20717
20718
20719
20720
20721
20722
20723
20724
20725
20726
20727
20728
20729
20730
20731
20732
20733
20734
20735
20736
20737
20738
20739
20740
20741
20742
20743
20744
20745
20746
20747
20748
20749
20750
20751
20752
20753
20754
20755
20756
20757
20758
20759
20760
20761
20762
20763
20764
20765
20766
20767
20768
20769
20770
20771
20772
20773
20774
20775
20776
20777
20778
20779
20780
20781
20782
20783
20784
20785
20786
20787
20788
20789
20790
20791
20792
20793
20794
20795
20796
20797
20798
20799
20800
20801
20802
20803
20804
20805
20806
20807
20808
20809
20810
20811
20812
20813
20814
20815
20816
20817
20818
20819
20820
20821
20822
20823
20824
20825
20826
20827
20828
20829
20830
20831
20832
20833
20834
20835
20836
20837
20838
20839
20840
20841
20842
20843
20844
20845
20846
20847
20848
20849
20850
20851
20852
20853
20854
20855
20856
20857
20858
20859
20860
20861
20862
20863
20864
20865
20866
20867
20868
20869
20870
20871
20872
20873
20874
20875
20876
20877
20878
20879
20880
20881
20882
20883
20884
20885
20886
20887
20888
20889
20890
20891
20892
20893
20894
20895
20896
20897
20898
20899
20900
20901
20902
20903
20904
20905
20906
20907
20908
20909
20910
20911
20912
20913
20914
20915
20916
20917
20918
20919
20920
20921
20922
20923
20924
20925
20926
20927
20928
20929
20930
20931
20932
20933
20934
20935
20936
20937
20938
20939
20940
20941
20942
20943
20944
20945
20946
20947
20948
20949
20950
20951
20952
20953
20954
20955
20956
20957
20958
20959
20960
20961
20962
20963
20964
20965
20966
20967
20968
20969
20970
20971
20972
20973
20974
20975
20976
20977
20978
20979
20980
20981
20982
20983
20984
20985
20986
20987
20988
20989
20990
20991
20992
20993
20994
20995
20996
20997
20998
20999
21000
21001
21002
21003
21004
21005
21006
21007
21008
21009
21010
21011
21012
21013
21014
21015
21016
21017
21018
21019
21020
21021
21022
21023
21024
21025
21026
21027
21028
21029
21030
21031
21032
21033
21034
21035
21036
21037
21038
21039
21040
21041
21042
21043
21044
21045
21046
21047
21048
21049
21050
21051
21052
21053
21054
21055
21056
21057
21058
21059
21060
21061
21062
21063
21064
21065
21066
21067
21068
21069
21070
21071
21072
21073
21074
21075
21076
21077
21078
21079
21080
21081
21082
21083
21084
21085
21086
21087
21088
21089
21090
21091
21092
21093
21094
21095
21096
21097
21098
21099
21100
21101
21102
21103
21104
21105
21106
21107
21108
21109
21110
21111
21112
21113
21114
21115
21116
21117
21118
21119
21120
21121
21122
21123
21124
21125
21126
21127
21128
21129
21130
21131
21132
21133
21134
21135
21136
21137
21138
21139
21140
21141
21142
21143
21144
21145
21146
21147
21148
21149
21150
21151
21152
21153
21154
21155
21156
21157
21158
21159
21160
21161
21162
21163
21164
21165
21166
21167
21168
21169
21170
21171
21172
21173
21174
21175
21176
21177
21178
21179
21180
21181
21182
21183
21184
21185
21186
21187
21188
21189
21190
21191
21192
21193
21194
21195
21196
21197
21198
21199
21200
21201
21202
21203
21204
21205
21206
21207
21208
21209
21210
21211
21212
21213
21214
21215
21216
21217
21218
21219
21220
21221
21222
21223
21224
21225
21226
21227
21228
21229
21230
21231
21232
21233
21234
21235
21236
21237
21238
21239
21240
21241
21242
21243
21244
21245
21246
21247
21248
21249
21250
21251
21252
21253
21254
21255
21256
21257
21258
21259
21260
21261
21262
21263
21264
21265
21266
21267
21268
21269
21270
21271
21272
21273
21274
21275
21276
21277
21278
21279
21280
21281
21282
21283
21284
21285
21286
21287
21288
21289
21290
21291
21292
21293
21294
21295
21296
21297
21298
21299
21300
21301
21302
21303
21304
21305
21306
21307
21308
21309
21310
21311
21312
21313
21314
21315
21316
21317
21318
21319
21320
21321
21322
21323
21324
21325
21326
21327
21328
21329
21330
21331
21332
21333
21334
21335
21336
21337
21338
21339
21340
21341
21342
21343
21344
21345
21346
21347
21348
21349
21350
21351
21352
21353
21354
21355
21356
21357
21358
21359
21360
21361
21362
21363
21364
21365
21366
21367
21368
21369
21370
21371
21372
21373
21374
21375
21376
21377
21378
21379
21380
21381
21382
21383
21384
21385
21386
21387
21388
21389
21390
21391
21392
21393
21394
21395
21396
21397
21398
21399
21400
21401
21402
21403
21404
21405
21406
21407
21408
21409
21410
21411
21412
21413
21414
21415
21416
21417
21418
21419
21420
21421
21422
21423
21424
21425
21426
21427
21428
21429
21430
21431
21432
21433
21434
21435
21436
21437
21438
21439
21440
21441
21442
21443
21444
21445
21446
21447
21448
21449
21450
21451
21452
21453
21454
21455
21456
21457
21458
21459
21460
21461
21462
21463
21464
21465
21466
21467
21468
21469
21470
21471
21472
21473
21474
21475
21476
21477
21478
21479
21480
21481
21482
21483
21484
21485
21486
21487
21488
21489
21490
21491
21492
21493
21494
21495
21496
21497
21498
21499
21500
21501
21502
21503
21504
21505
21506
21507
21508
21509
21510
21511
21512
21513
21514
21515
21516
21517
21518
21519
21520
21521
21522
21523
21524
21525
21526
21527
21528
21529
21530
21531
21532
21533
21534
21535
21536
21537
21538
21539
21540
21541
21542
21543
21544
21545
21546
21547
21548
21549
21550
21551
21552
21553
21554
21555
21556
21557
21558
21559
21560
21561
21562
21563
21564
21565
21566
21567
21568
21569
21570
21571
21572
21573
21574
21575
21576
21577
21578
21579
21580
21581
21582
21583
21584
21585
21586
21587
21588
21589
21590
21591
21592
21593
21594
21595
21596
21597
21598
21599
21600
21601
21602
21603
21604
21605
21606
21607
21608
21609
21610
21611
21612
21613
21614
21615
21616
21617
21618
21619
21620
21621
21622
21623
21624
21625
21626
21627
21628
21629
21630
21631
21632
21633
21634
21635
21636
21637
21638
21639
21640
21641
21642
21643
21644
21645
21646
21647
21648
21649
21650
21651
21652
21653
21654
21655
21656
21657
21658
21659
21660
21661
21662
21663
21664
21665
21666
21667
21668
21669
21670
21671
21672
21673
21674
21675
21676
21677
21678
21679
21680
21681
21682
21683
21684
21685
21686
21687
21688
21689
21690
21691
21692
21693
21694
21695
21696
21697
21698
21699
21700
21701
21702
21703
21704
21705
21706
21707
21708
21709
21710
21711
21712
21713
21714
21715
21716
21717
21718
21719
21720
21721
21722
21723
21724
21725
21726
21727
21728
21729
21730
21731
21732
21733
21734
21735
21736
21737
21738
21739
21740
21741
21742
21743
21744
21745
21746
21747
21748
21749
21750
21751
21752
21753
21754
21755
21756
21757
21758
21759
21760
21761
21762
21763
21764
21765
21766
21767
21768
21769
21770
21771
21772
21773
21774
21775
21776
21777
21778
21779
21780
21781
21782
21783
21784
21785
21786
21787
21788
21789
21790
21791
21792
21793
21794
21795
21796
21797
21798
21799
21800
21801
21802
21803
21804
21805
21806
21807
21808
21809
21810
21811
21812
21813
21814
21815
21816
21817
21818
21819
21820
21821
21822
21823
21824
21825
21826
21827
21828
21829
21830
21831
21832
21833
21834
21835
21836
21837
21838
21839
21840
21841
21842
21843
21844
21845
21846
21847
21848
21849
21850
21851
21852
21853
21854
21855
21856
21857
21858
21859
21860
21861
21862
21863
21864
21865
21866
21867
21868
21869
21870
21871
21872
21873
21874
21875
21876
21877
21878
21879
21880
21881
21882
21883
21884
21885
21886
21887
21888
21889
21890
21891
21892
21893
21894
21895
21896
21897
21898
21899
21900
21901
21902
21903
21904
21905
21906
21907
21908
21909
21910
21911
21912
21913
21914
21915
21916
21917
21918
21919
21920
21921
21922
21923
21924
21925
21926
21927
21928
21929
21930
21931
21932
21933
21934
21935
21936
21937
21938
21939
21940
21941
21942
21943
21944
21945
21946
21947
21948
21949
21950
21951
21952
21953
21954
21955
21956
21957
21958
21959
21960
21961
21962
21963
21964
21965
21966
21967
21968
21969
21970
21971
21972
21973
21974
21975
21976
21977
21978
21979
21980
21981
21982
21983
21984
21985
21986
21987
21988
21989
21990
21991
21992
21993
21994
21995
21996
21997
21998
21999
22000
22001
22002
22003
22004
22005
22006
22007
22008
22009
22010
22011
22012
22013
22014
22015
22016
22017
22018
22019
22020
22021
22022
22023
22024
22025
22026
22027
22028
22029
22030
22031
22032
22033
22034
22035
22036
22037
22038
22039
22040
22041
22042
22043
22044
22045
22046
22047
22048
22049
22050
22051
22052
22053
22054
22055
22056
22057
22058
22059
22060
22061
22062
22063
22064
22065
22066
22067
22068
22069
22070
22071
22072
22073
22074
22075
22076
22077
22078
22079
22080
22081
22082
22083
22084
22085
22086
22087
22088
22089
22090
22091
22092
22093
22094
22095
22096
22097
22098
22099
22100
22101
22102
22103
22104
22105
22106
22107
22108
22109
22110
22111
22112
22113
22114
22115
22116
22117
22118
22119
22120
22121
22122
22123
22124
22125
22126
22127
22128
22129
22130
22131
22132
22133
22134
22135
22136
22137
22138
22139
22140
22141
22142
22143
22144
22145
22146
22147
22148
22149
22150
22151
22152
22153
22154
22155
22156
22157
22158
22159
22160
22161
22162
22163
22164
22165
22166
22167
22168
22169
22170
22171
22172
22173
22174
22175
22176
22177
22178
22179
22180
22181
22182
22183
22184
22185
22186
22187
22188
22189
22190
22191
22192
22193
22194
22195
22196
22197
22198
22199
22200
22201
22202
22203
22204
22205
22206
22207
22208
22209
22210
22211
22212
22213
22214
22215
22216
22217
22218
22219
22220
22221
22222
22223
22224
22225
22226
22227
22228
22229
22230
22231
22232
22233
22234
22235
22236
22237
22238
22239
22240
22241
22242
22243
22244
22245
22246
22247
22248
22249
22250
22251
22252
22253
22254
22255
22256
22257
22258
22259
22260
22261
22262
22263
22264
22265
22266
22267
22268
22269
22270
22271
22272
22273
22274
22275
22276
22277
22278
22279
22280
22281
22282
22283
22284
22285
22286
22287
22288
22289
22290
22291
22292
22293
22294
22295
22296
22297
22298
22299
22300
22301
22302
22303
22304
22305
22306
22307
22308
22309
22310
22311
22312
22313
22314
22315
22316
22317
22318
22319
22320
22321
22322
22323
22324
22325
22326
22327
22328
22329
22330
22331
22332
22333
22334
22335
22336
22337
22338
22339
22340
22341
22342
22343
22344
22345
22346
22347
22348
22349
22350
22351
22352
22353
22354
22355
22356
22357
22358
22359
22360
22361
22362
22363
22364
22365
22366
22367
22368
22369
22370
22371
22372
22373
22374
22375
22376
22377
22378
22379
22380
22381
22382
22383
22384
22385
22386
22387
22388
22389
22390
22391
22392
22393
22394
22395
22396
22397
22398
22399
22400
22401
22402
22403
22404
22405
22406
22407
22408
22409
22410
22411
22412
22413
22414
22415
22416
22417
22418
22419
22420
22421
22422
22423
22424
22425
22426
22427
22428
22429
22430
22431
22432
22433
22434
22435
22436
22437
22438
22439
22440
22441
22442
22443
22444
22445
22446
22447
22448
22449
22450
22451
22452
22453
22454
22455
22456
22457
22458
22459
22460
22461
22462
22463
22464
22465
22466
22467
22468
22469
22470
22471
22472
22473
22474
22475
22476
22477
22478
22479
22480
22481
22482
22483
22484
22485
22486
22487
22488
22489
22490
22491
22492
22493
22494
22495
22496
22497
22498
22499
22500
22501
22502
22503
22504
22505
22506
22507
22508
22509
22510
22511
22512
22513
22514
22515
22516
22517
22518
22519
22520
22521
22522
22523
22524
22525
22526
22527
22528
22529
22530
22531
22532
22533
22534
22535
22536
22537
22538
22539
22540
22541
22542
22543
22544
22545
22546
22547
22548
22549
22550
22551
22552
22553
22554
22555
22556
22557
22558
22559
22560
22561
22562
22563
22564
22565
22566
22567
22568
22569
22570
22571
22572
22573
22574
22575
22576
22577
22578
22579
22580
22581
22582
22583
22584
22585
22586
22587
22588
22589
22590
22591
22592
22593
22594
22595
22596
22597
22598
22599
22600
22601
22602
22603
22604
22605
22606
22607
22608
22609
22610
22611
22612
22613
22614
22615
22616
22617
22618
22619
22620
22621
22622
22623
22624
22625
22626
22627
22628
22629
22630
22631
22632
22633
22634
22635
22636
22637
22638
22639
22640
22641
22642
22643
22644
22645
22646
22647
22648
22649
22650
22651
22652
22653
22654
22655
22656
22657
22658
22659
22660
22661
22662
22663
22664
22665
22666
22667
22668
22669
22670
22671
22672
22673
22674
22675
22676
22677
22678
22679
22680
22681
22682
22683
22684
22685
22686
22687
22688
22689
22690
22691
22692
22693
22694
22695
22696
22697
22698
22699
22700
22701
22702
22703
22704
22705
22706
22707
22708
22709
22710
22711
22712
22713
22714
22715
22716
22717
22718
22719
22720
22721
22722
22723
22724
22725
22726
22727
22728
22729
22730
22731
22732
22733
22734
22735
22736
22737
22738
22739
22740
22741
22742
22743
22744
22745
22746
22747
22748
22749
22750
22751
22752
22753
22754
22755
22756
22757
22758
22759
22760
22761
22762
22763
22764
22765
22766
22767
22768
22769
22770
22771
22772
22773
22774
22775
22776
22777
22778
22779
22780
22781
22782
22783
22784
22785
22786
22787
22788
22789
22790
22791
22792
22793
22794
22795
22796
22797
22798
22799
22800
22801
22802
22803
22804
22805
22806
22807
22808
22809
22810
22811
22812
22813
22814
22815
22816
22817
22818
22819
22820
22821
22822
22823
22824
22825
22826
22827
22828
22829
22830
22831
22832
22833
22834
22835
22836
22837
22838
22839
22840
22841
22842
22843
22844
22845
22846
22847
22848
22849
22850
22851
22852
22853
22854
22855
22856
22857
22858
22859
22860
22861
22862
22863
22864
22865
22866
22867
22868
22869
22870
22871
22872
22873
22874
22875
22876
22877
22878
22879
22880
22881
22882
22883
22884
22885
22886
22887
22888
22889
22890
22891
22892
22893
22894
22895
22896
22897
22898
22899
22900
22901
22902
22903
22904
22905
22906
22907
22908
22909
22910
22911
22912
22913
22914
22915
22916
22917
22918
22919
22920
22921
22922
22923
22924
22925
22926
22927
22928
22929
22930
22931
22932
22933
22934
22935
22936
22937
22938
22939
22940
22941
22942
22943
22944
22945
22946
22947
22948
22949
22950
22951
22952
22953
22954
22955
22956
22957
22958
22959
22960
22961
22962
22963
22964
22965
22966
22967
22968
22969
22970
22971
22972
22973
22974
22975
22976
22977
22978
22979
22980
22981
22982
22983
22984
22985
22986
22987
22988
22989
22990
22991
22992
22993
22994
22995
22996
22997
22998
22999
23000
23001
23002
23003
23004
23005
23006
23007
23008
23009
23010
23011
23012
23013
23014
23015
23016
23017
23018
23019
23020
23021
23022
23023
23024
23025
23026
23027
23028
23029
23030
23031
23032
23033
23034
23035
23036
23037
23038
23039
23040
23041
23042
23043
23044
23045
23046
23047
23048
23049
23050
23051
23052
23053
23054
23055
23056
23057
23058
23059
23060
23061
23062
23063
23064
23065
23066
23067
23068
23069
23070
23071
23072
23073
23074
23075
23076
23077
23078
23079
23080
23081
23082
23083
23084
23085
23086
23087
23088
23089
23090
23091
23092
23093
23094
23095
23096
23097
23098
23099
23100
23101
23102
23103
23104
23105
23106
23107
23108
23109
23110
23111
23112
23113
23114
23115
23116
23117
23118
23119
23120
23121
23122
23123
23124
23125
23126
23127
23128
23129
23130
23131
23132
23133
23134
23135
23136
23137
23138
23139
23140
23141
23142
23143
23144
23145
23146
23147
23148
23149
23150
23151
23152
23153
23154
23155
23156
23157
23158
23159
23160
23161
23162
23163
23164
23165
23166
23167
23168
23169
23170
23171
23172
23173
23174
23175
23176
23177
23178
23179
23180
23181
23182
23183
23184
23185
23186
23187
23188
23189
23190
23191
23192
23193
23194
23195
23196
23197
23198
23199
23200
23201
23202
23203
23204
23205
23206
23207
23208
23209
23210
23211
23212
23213
23214
23215
23216
23217
23218
23219
23220
23221
23222
23223
23224
23225
23226
23227
23228
23229
23230
23231
23232
23233
23234
23235
23236
23237
23238
23239
23240
23241
23242
23243
23244
23245
23246
23247
23248
23249
23250
23251
23252
23253
23254
23255
23256
23257
23258
23259
23260
23261
23262
23263
23264
23265
23266
23267
23268
23269
23270
23271
23272
23273
23274
23275
23276
23277
23278
23279
23280
23281
23282
23283
23284
23285
23286
23287
23288
23289
23290
23291
23292
23293
23294
23295
23296
23297
23298
23299
23300
23301
23302
23303
23304
23305
23306
23307
23308
23309
23310
23311
23312
23313
23314
23315
23316
23317
23318
23319
23320
23321
23322
23323
23324
23325
23326
23327
23328
23329
23330
23331
23332
23333
23334
23335
23336
23337
23338
23339
23340
23341
23342
23343
23344
23345
23346
23347
23348
23349
23350
23351
23352
23353
23354
23355
23356
23357
23358
23359
23360
23361
23362
23363
23364
23365
23366
23367
23368
23369
23370
23371
23372
23373
23374
23375
23376
23377
23378
23379
23380
23381
23382
23383
23384
23385
23386
23387
23388
23389
23390
23391
23392
23393
23394
23395
23396
23397
23398
23399
23400
23401
23402
23403
23404
23405
23406
23407
23408
23409
23410
23411
23412
23413
23414
23415
23416
23417
23418
23419
23420
23421
23422
23423
23424
23425
23426
23427
23428
23429
23430
23431
23432
23433
23434
23435
23436
23437
23438
23439
23440
23441
23442
23443
23444
23445
23446
23447
23448
23449
23450
23451
23452
23453
23454
23455
23456
23457
23458
23459
23460
23461
23462
23463
23464
23465
23466
23467
23468
23469
23470
23471
23472
23473
23474
23475
23476
23477
23478
23479
23480
23481
23482
23483
23484
23485
23486
23487
23488
23489
23490
23491
23492
23493
23494
23495
23496
23497
23498
23499
23500
23501
23502
23503
23504
23505
23506
23507
23508
23509
23510
23511
23512
23513
23514
23515
23516
23517
23518
23519
23520
23521
23522
23523
23524
23525
23526
23527
23528
23529
23530
23531
23532
23533
23534
23535
23536
23537
23538
23539
23540
23541
23542
23543
23544
23545
23546
23547
23548
23549
23550
23551
23552
23553
23554
23555
23556
23557
23558
23559
23560
23561
23562
23563
23564
23565
23566
23567
23568
23569
23570
23571
23572
23573
23574
23575
23576
23577
23578
23579
23580
23581
23582
23583
23584
23585
23586
23587
23588
23589
23590
23591
23592
23593
23594
23595
23596
23597
23598
23599
23600
23601
23602
23603
23604
23605
23606
23607
23608
23609
23610
23611
23612
23613
23614
23615
23616
23617
23618
23619
23620
23621
23622
23623
23624
23625
23626
23627
23628
23629
23630
23631
23632
23633
23634
23635
23636
23637
23638
23639
23640
23641
23642
23643
23644
23645
23646
23647
23648
23649
23650
23651
23652
23653
23654
23655
23656
23657
23658
23659
23660
23661
23662
23663
23664
23665
23666
23667
23668
23669
23670
23671
23672
23673
23674
23675
23676
23677
23678
23679
23680
23681
23682
23683
23684
23685
23686
23687
23688
23689
23690
23691
23692
23693
23694
23695
23696
23697
23698
23699
23700
23701
23702
23703
23704
23705
23706
23707
23708
23709
23710
23711
23712
23713
23714
23715
23716
23717
23718
23719
23720
23721
23722
23723
23724
23725
23726
23727
23728
23729
23730
23731
23732
23733
23734
23735
23736
23737
23738
23739
23740
23741
23742
23743
23744
23745
23746
23747
23748
23749
23750
23751
23752
23753
23754
23755
23756
23757
23758
23759
23760
23761
23762
23763
23764
23765
23766
23767
23768
23769
23770
23771
23772
23773
23774
23775
23776
23777
23778
23779
23780
23781
23782
23783
23784
23785
23786
23787
23788
23789
23790
23791
23792
23793
23794
23795
23796
23797
23798
23799
23800
23801
23802
23803
23804
23805
23806
23807
23808
23809
23810
23811
23812
23813
23814
23815
23816
23817
23818
23819
23820
23821
23822
23823
23824
23825
23826
23827
23828
23829
23830
23831
23832
23833
23834
23835
23836
23837
23838
23839
23840
23841
23842
23843
23844
23845
23846
23847
23848
23849
23850
23851
23852
23853
23854
23855
23856
23857
23858
23859
23860
23861
23862
23863
23864
23865
23866
23867
23868
23869
23870
23871
23872
23873
23874
23875
23876
23877
23878
23879
23880
23881
23882
23883
23884
23885
23886
23887
23888
23889
23890
23891
23892
23893
23894
23895
23896
23897
23898
23899
23900
23901
23902
23903
23904
23905
23906
23907
23908
23909
23910
23911
23912
23913
23914
23915
23916
23917
23918
23919
23920
23921
23922
23923
23924
23925
23926
23927
23928
23929
23930
23931
23932
23933
23934
23935
23936
23937
23938
23939
23940
23941
23942
23943
23944
23945
23946
23947
23948
23949
23950
23951
23952
23953
23954
23955
23956
23957
23958
23959
23960
23961
23962
23963
23964
23965
23966
23967
23968
23969
23970
23971
23972
23973
23974
23975
23976
23977
23978
23979
23980
23981
23982
23983
23984
23985
23986
23987
23988
23989
23990
23991
23992
23993
23994
23995
23996
23997
23998
23999
24000
24001
24002
24003
24004
24005
24006
24007
24008
24009
24010
24011
24012
24013
24014
24015
24016
24017
24018
24019
24020
24021
24022
24023
24024
24025
24026
24027
24028
24029
24030
24031
24032
24033
24034
24035
24036
24037
24038
24039
24040
24041
24042
24043
24044
24045
24046
24047
24048
24049
24050
24051
24052
24053
24054
24055
24056
24057
24058
24059
24060
24061
24062
24063
24064
24065
24066
24067
24068
24069
24070
24071
24072
24073
24074
24075
24076
24077
24078
24079
24080
24081
24082
24083
24084
24085
24086
24087
24088
24089
24090
24091
24092
24093
24094
24095
24096
24097
24098
24099
24100
24101
24102
24103
24104
24105
24106
24107
24108
24109
24110
24111
24112
24113
24114
24115
24116
24117
24118
24119
24120
24121
24122
24123
24124
24125
24126
24127
24128
24129
24130
24131
24132
24133
24134
24135
24136
24137
24138
24139
24140
24141
24142
24143
24144
24145
24146
24147
24148
24149
24150
24151
24152
24153
24154
24155
24156
24157
24158
24159
24160
24161
24162
24163
24164
24165
24166
24167
24168
24169
24170
24171
24172
24173
24174
24175
24176
24177
24178
24179
24180
24181
24182
24183
24184
24185
24186
24187
24188
24189
24190
24191
24192
24193
24194
24195
24196
24197
24198
24199
24200
24201
24202
24203
24204
24205
24206
24207
24208
24209
24210
24211
24212
24213
24214
24215
24216
24217
24218
24219
24220
24221
24222
24223
24224
24225
24226
24227
24228
24229
24230
24231
24232
24233
24234
24235
24236
24237
24238
24239
24240
24241
24242
24243
24244
24245
24246
24247
24248
24249
24250
24251
24252
24253
24254
24255
24256
24257
24258
24259
24260
24261
24262
24263
24264
24265
24266
24267
24268
24269
24270
24271
24272
24273
24274
24275
24276
24277
24278
24279
24280
24281
24282
24283
24284
24285
24286
24287
24288
24289
24290
24291
24292
24293
24294
24295
24296
24297
24298
24299
24300
24301
24302
24303
24304
24305
24306
24307
24308
24309
24310
24311
24312
24313
24314
24315
24316
24317
24318
24319
24320
24321
24322
24323
24324
24325
24326
24327
24328
24329
24330
24331
24332
24333
24334
24335
24336
24337
24338
24339
24340
24341
24342
24343
24344
24345
24346
24347
24348
24349
24350
24351
24352
24353
24354
24355
24356
24357
24358
24359
24360
24361
24362
24363
24364
24365
24366
24367
24368
24369
24370
24371
24372
24373
24374
24375
24376
24377
24378
24379
24380
24381
24382
24383
24384
24385
24386
24387
24388
24389
24390
24391
24392
24393
24394
24395
24396
24397
24398
24399
24400
24401
24402
24403
24404
24405
24406
24407
24408
24409
24410
24411
24412
24413
24414
24415
24416
24417
24418
24419
24420
24421
24422
24423
24424
24425
24426
24427
24428
24429
24430
24431
24432
24433
24434
24435
24436
24437
24438
24439
24440
24441
24442
24443
24444
24445
24446
24447
24448
24449
24450
24451
24452
24453
24454
24455
24456
24457
24458
24459
24460
24461
24462
24463
24464
24465
24466
24467
24468
24469
24470
24471
24472
24473
24474
24475
24476
24477
24478
24479
24480
24481
24482
24483
24484
24485
24486
24487
24488
24489
24490
24491
24492
24493
24494
24495
24496
24497
24498
24499
24500
24501
24502
24503
24504
24505
24506
24507
24508
24509
24510
24511
24512
24513
24514
24515
24516
24517
24518
24519
24520
24521
24522
24523
24524
24525
24526
24527
24528
24529
24530
24531
24532
24533
24534
24535
24536
24537
24538
24539
24540
24541
24542
24543
24544
24545
24546
24547
24548
24549
24550
24551
24552
24553
24554
24555
24556
24557
24558
24559
24560
24561
24562
24563
24564
24565
24566
24567
24568
24569
24570
24571
24572
24573
24574
24575
24576
24577
24578
24579
24580
24581
24582
24583
24584
24585
24586
24587
24588
24589
24590
24591
24592
24593
24594
24595
24596
24597
24598
24599
24600
24601
24602
24603
24604
24605
24606
24607
24608
24609
24610
24611
24612
24613
24614
24615
24616
24617
24618
24619
24620
24621
24622
24623
24624
24625
24626
24627
24628
24629
24630
24631
24632
24633
24634
24635
24636
24637
24638
24639
24640
24641
24642
24643
24644
24645
24646
24647
24648
24649
24650
24651
24652
24653
24654
24655
24656
24657
24658
24659
24660
24661
24662
24663
24664
24665
24666
24667
24668
24669
24670
24671
24672
24673
24674
24675
24676
24677
24678
24679
24680
24681
24682
24683
24684
24685
24686
24687
24688
24689
24690
24691
24692
24693
24694
24695
24696
24697
24698
24699
24700
24701
24702
24703
24704
24705
24706
24707
24708
24709
24710
24711
24712
24713
24714
24715
24716
24717
24718
24719
24720
24721
24722
24723
24724
24725
24726
24727
24728
24729
24730
24731
24732
24733
24734
24735
24736
24737
24738
24739
24740
24741
24742
24743
24744
24745
24746
24747
24748
24749
24750
24751
24752
24753
24754
24755
24756
24757
24758
24759
24760
24761
24762
24763
24764
24765
24766
24767
24768
24769
24770
24771
24772
24773
24774
24775
24776
24777
24778
24779
24780
24781
24782
24783
24784
24785
24786
24787
24788
24789
24790
24791
24792
24793
24794
24795
24796
24797
24798
24799
24800
24801
24802
24803
24804
24805
24806
24807
24808
24809
24810
24811
24812
24813
24814
24815
24816
24817
24818
24819
24820
24821
24822
24823
24824
24825
24826
24827
24828
24829
24830
24831
24832
24833
24834
24835
24836
24837
24838
24839
24840
24841
24842
24843
24844
24845
24846
24847
24848
24849
24850
24851
24852
24853
24854
24855
24856
24857
24858
24859
24860
24861
24862
24863
24864
24865
24866
24867
24868
24869
24870
24871
24872
24873
24874
24875
24876
24877
24878
24879
24880
24881
24882
24883
24884
24885
24886
24887
24888
24889
24890
24891
24892
24893
24894
24895
24896
24897
24898
24899
24900
24901
24902
24903
24904
24905
24906
24907
24908
24909
24910
24911
24912
24913
24914
24915
24916
24917
24918
24919
24920
24921
24922
24923
24924
24925
24926
24927
24928
24929
24930
24931
24932
24933
24934
24935
24936
24937
24938
24939
24940
24941
24942
24943
24944
24945
24946
24947
24948
24949
24950
24951
24952
24953
24954
24955
24956
24957
24958
24959
24960
24961
24962
24963
24964
24965
24966
24967
24968
24969
24970
24971
24972
24973
24974
24975
24976
24977
24978
24979
24980
24981
24982
24983
24984
24985
24986
24987
24988
24989
24990
24991
24992
24993
24994
24995
24996
24997
24998
24999
25000
25001
25002
25003
25004
25005
25006
25007
25008
25009
25010
25011
25012
25013
25014
25015
25016
25017
25018
25019
25020
25021
25022
25023
25024
25025
25026
25027
25028
25029
25030
25031
25032
25033
25034
25035
25036
25037
25038
25039
25040
25041
25042
25043
25044
25045
25046
25047
25048
25049
25050
25051
25052
25053
25054
25055
25056
25057
25058
25059
25060
25061
25062
25063
25064
25065
25066
25067
25068
25069
25070
25071
25072
25073
25074
25075
25076
25077
25078
25079
25080
25081
25082
25083
25084
25085
25086
25087
25088
25089
25090
25091
25092
25093
25094
25095
25096
25097
25098
25099
25100
25101
25102
25103
25104
25105
25106
25107
25108
25109
25110
25111
25112
25113
25114
25115
25116
25117
25118
25119
25120
25121
25122
25123
25124
25125
25126
25127
25128
25129
25130
25131
25132
25133
25134
25135
25136
25137
25138
25139
25140
25141
25142
25143
25144
25145
25146
25147
25148
25149
25150
25151
25152
25153
25154
25155
25156
25157
25158
25159
25160
25161
25162
25163
25164
25165
25166
25167
25168
25169
25170
25171
25172
25173
25174
25175
25176
25177
25178
25179
25180
25181
25182
25183
25184
25185
25186
25187
25188
25189
25190
25191
25192
25193
25194
25195
25196
25197
25198
25199
25200
25201
25202
25203
25204
25205
25206
25207
25208
25209
25210
25211
25212
25213
25214
25215
25216
25217
25218
25219
25220
25221
25222
25223
25224
25225
25226
25227
25228
25229
25230
25231
25232
25233
25234
25235
25236
25237
25238
25239
25240
25241
25242
25243
25244
25245
25246
25247
25248
25249
25250
25251
25252
25253
25254
25255
25256
25257
25258
25259
25260
25261
25262
25263
25264
25265
25266
25267
25268
25269
25270
25271
25272
25273
25274
25275
25276
25277
25278
25279
25280
25281
25282
25283
25284
25285
25286
25287
25288
25289
25290
25291
25292
25293
25294
25295
25296
25297
25298
25299
25300
25301
25302
25303
25304
25305
25306
25307
25308
25309
25310
25311
25312
25313
25314
25315
25316
25317
25318
25319
25320
25321
25322
25323
25324
25325
25326
25327
25328
25329
25330
25331
25332
25333
25334
25335
25336
25337
25338
25339
25340
25341
25342
25343
25344
25345
25346
25347
25348
25349
25350
25351
25352
25353
25354
25355
25356
25357
25358
25359
25360
25361
25362
25363
25364
25365
25366
25367
25368
25369
25370
25371
25372
25373
25374
25375
25376
25377
25378
25379
25380
25381
25382
25383
25384
25385
25386
25387
25388
25389
25390
25391
25392
25393
25394
25395
25396
25397
25398
25399
25400
25401
25402
25403
25404
25405
25406
25407
25408
25409
25410
25411
25412
25413
25414
25415
25416
25417
25418
25419
25420
25421
25422
25423
25424
25425
25426
25427
25428
25429
25430
25431
25432
25433
25434
25435
25436
25437
25438
25439
25440
25441
25442
25443
25444
25445
25446
25447
25448
25449
25450
25451
25452
25453
25454
25455
25456
25457
25458
25459
25460
25461
25462
25463
25464
25465
25466
25467
25468
25469
25470
25471
25472
25473
25474
25475
25476
25477
25478
25479
25480
25481
25482
25483
25484
25485
25486
25487
25488
25489
25490
25491
25492
25493
25494
25495
25496
25497
25498
25499
25500
25501
25502
25503
25504
25505
25506
25507
25508
25509
25510
25511
25512
25513
25514
25515
25516
25517
25518
25519
25520
25521
25522
25523
25524
25525
25526
25527
25528
25529
25530
25531
25532
25533
25534
25535
25536
25537
25538
25539
25540
25541
25542
25543
25544
25545
25546
25547
25548
25549
25550
25551
25552
25553
25554
25555
25556
25557
25558
25559
25560
25561
25562
25563
25564
25565
25566
25567
25568
25569
25570
25571
25572
25573
25574
25575
25576
25577
25578
25579
25580
25581
25582
25583
25584
25585
25586
25587
25588
25589
25590
25591
25592
25593
25594
25595
25596
25597
25598
25599
25600
25601
25602
25603
25604
25605
25606
25607
25608
25609
25610
25611
25612
25613
25614
25615
25616
25617
25618
25619
25620
25621
25622
25623
25624
25625
25626
25627
25628
25629
25630
25631
25632
25633
25634
25635
25636
25637
25638
25639
25640
25641
25642
25643
25644
25645
25646
25647
25648
25649
25650
25651
25652
25653
25654
25655
25656
25657
25658
25659
25660
25661
25662
25663
25664
25665
25666
25667
25668
25669
25670
25671
25672
25673
25674
25675
25676
25677
25678
25679
25680
25681
25682
25683
25684
25685
25686
25687
25688
25689
25690
25691
25692
25693
25694
25695
25696
25697
25698
25699
25700
25701
25702
25703
25704
25705
25706
25707
25708
25709
25710
25711
25712
25713
25714
25715
25716
25717
25718
25719
25720
25721
25722
25723
25724
25725
25726
25727
25728
25729
25730
25731
25732
25733
25734
25735
25736
25737
25738
25739
25740
25741
25742
25743
25744
25745
25746
25747
25748
25749
25750
25751
25752
25753
25754
25755
25756
25757
25758
25759
25760
25761
25762
25763
25764
25765
25766
25767
25768
25769
25770
25771
25772
25773
25774
25775
25776
25777
25778
25779
25780
25781
25782
25783
25784
25785
25786
25787
25788
25789
25790
25791
25792
25793
25794
25795
25796
25797
25798
25799
25800
25801
25802
25803
25804
25805
25806
25807
25808
25809
25810
25811
25812
25813
25814
25815
25816
25817
25818
25819
25820
25821
25822
25823
25824
25825
25826
25827
25828
25829
25830
25831
25832
25833
25834
25835
25836
25837
25838
25839
25840
25841
25842
25843
25844
25845
25846
25847
25848
25849
25850
25851
25852
25853
25854
25855
25856
25857
25858
25859
25860
25861
25862
25863
25864
25865
25866
25867
25868
25869
25870
25871
25872
25873
25874
25875
25876
25877
25878
25879
25880
25881
25882
25883
25884
25885
25886
25887
25888
25889
25890
25891
25892
25893
25894
25895
25896
25897
25898
25899
25900
25901
25902
25903
25904
25905
25906
25907
25908
25909
25910
25911
25912
25913
25914
25915
25916
25917
25918
25919
25920
25921
25922
25923
25924
25925
25926
25927
25928
25929
25930
25931
25932
25933
25934
25935
25936
25937
25938
25939
25940
25941
25942
25943
25944
25945
25946
25947
25948
25949
25950
25951
25952
25953
25954
25955
25956
25957
25958
25959
25960
25961
25962
25963
25964
25965
25966
25967
25968
25969
25970
25971
25972
25973
25974
25975
25976
25977
25978
25979
25980
25981
25982
25983
25984
25985
25986
25987
25988
25989
25990
25991
25992
25993
25994
25995
25996
25997
25998
25999
26000
26001
26002
26003
26004
26005
26006
26007
26008
26009
26010
26011
26012
26013
26014
26015
26016
26017
26018
26019
26020
26021
26022
26023
26024
26025
26026
26027
26028
26029
26030
26031
26032
26033
26034
26035
26036
26037
26038
26039
26040
26041
26042
26043
26044
26045
26046
26047
26048
26049
26050
26051
26052
26053
26054
26055
26056
26057
26058
26059
26060
26061
26062
26063
26064
26065
26066
26067
26068
26069
26070
26071
26072
26073
26074
26075
26076
26077
26078
26079
26080
26081
26082
26083
26084
26085
26086
26087
26088
26089
26090
26091
26092
26093
26094
26095
26096
26097
26098
26099
26100
26101
26102
26103
26104
26105
26106
26107
26108
26109
26110
26111
26112
26113
26114
26115
26116
26117
26118
26119
26120
26121
26122
26123
26124
26125
26126
26127
26128
26129
26130
26131
26132
26133
26134
26135
26136
26137
26138
26139
26140
26141
26142
26143
26144
26145
26146
26147
26148
26149
26150
26151
26152
26153
26154
26155
26156
26157
26158
26159
26160
26161
26162
26163
26164
26165
26166
26167
26168
26169
26170
26171
26172
26173
26174
26175
26176
26177
26178
26179
26180
26181
26182
26183
26184
26185
26186
26187
26188
26189
26190
26191
26192
26193
26194
26195
26196
26197
26198
26199
26200
26201
26202
26203
26204
26205
26206
26207
26208
26209
26210
26211
26212
26213
26214
26215
26216
26217
26218
26219
26220
26221
26222
26223
26224
26225
26226
26227
26228
26229
26230
26231
26232
26233
26234
26235
26236
26237
26238
26239
26240
26241
26242
26243
26244
26245
26246
26247
26248
26249
26250
26251
26252
26253
26254
26255
26256
26257
26258
26259
26260
26261
26262
26263
26264
26265
26266
26267
26268
26269
26270
26271
26272
26273
26274
26275
26276
26277
26278
26279
26280
26281
26282
26283
26284
26285
26286
26287
26288
26289
26290
26291
26292
26293
26294
26295
26296
26297
26298
26299
26300
26301
26302
26303
26304
26305
26306
26307
26308
26309
26310
26311
26312
26313
26314
26315
26316
26317
26318
26319
26320
26321
26322
26323
26324
26325
26326
26327
26328
26329
26330
26331
26332
26333
26334
26335
26336
26337
26338
26339
26340
26341
26342
26343
26344
26345
26346
26347
26348
26349
26350
26351
26352
26353
26354
26355
26356
26357
26358
26359
26360
26361
26362
26363
26364
26365
26366
26367
26368
26369
26370
26371
26372
26373
26374
26375
26376
26377
26378
26379
26380
26381
26382
26383
26384
26385
26386
26387
26388
26389
26390
26391
26392
26393
26394
26395
26396
26397
26398
26399
26400
26401
26402
26403
26404
26405
26406
26407
26408
26409
26410
26411
26412
26413
26414
26415
26416
26417
26418
26419
26420
26421
26422
26423
26424
26425
26426
26427
26428
26429
26430
26431
26432
26433
26434
26435
26436
26437
26438
26439
26440
26441
26442
26443
26444
26445
26446
26447
26448
26449
26450
26451
26452
26453
26454
26455
26456
26457
26458
26459
26460
26461
26462
26463
26464
26465
26466
26467
26468
26469
26470
26471
26472
26473
26474
26475
26476
26477
26478
26479
26480
26481
26482
26483
26484
26485
26486
26487
26488
26489
26490
26491
26492
26493
26494
26495
26496
26497
26498
26499
26500
26501
26502
26503
26504
26505
26506
26507
26508
26509
26510
26511
26512
26513
26514
26515
26516
26517
26518
26519
26520
26521
26522
26523
26524
26525
26526
26527
26528
26529
26530
26531
26532
26533
26534
26535
26536
26537
26538
26539
26540
26541
26542
26543
26544
26545
26546
26547
26548
26549
26550
26551
26552
26553
26554
26555
26556
26557
26558
26559
26560
26561
26562
26563
26564
26565
26566
26567
26568
26569
26570
26571
26572
26573
26574
26575
26576
26577
26578
26579
26580
26581
26582
26583
26584
26585
26586
26587
26588
26589
26590
26591
26592
26593
26594
26595
26596
26597
26598
26599
26600
26601
26602
26603
26604
26605
26606
26607
26608
26609
26610
26611
26612
26613
26614
26615
26616
26617
26618
26619
26620
26621
26622
26623
26624
26625
26626
26627
26628
26629
26630
26631
26632
26633
26634
26635
26636
26637
26638
26639
26640
26641
26642
26643
26644
26645
26646
26647
26648
26649
26650
26651
26652
26653
26654
26655
26656
26657
26658
26659
26660
26661
26662
26663
26664
26665
26666
26667
26668
26669
26670
26671
26672
26673
26674
26675
26676
26677
26678
26679
26680
26681
26682
26683
26684
26685
26686
26687
26688
26689
26690
26691
26692
26693
26694
26695
26696
26697
26698
26699
26700
26701
26702
26703
26704
26705
26706
26707
26708
26709
26710
26711
26712
26713
26714
26715
26716
26717
26718
26719
26720
26721
26722
26723
26724
26725
26726
26727
26728
26729
26730
26731
26732
26733
26734
26735
26736
26737
26738
26739
26740
26741
26742
26743
26744
26745
26746
26747
26748
26749
26750
26751
26752
26753
26754
26755
26756
26757
26758
26759
26760
26761
26762
26763
26764
26765
26766
26767
26768
26769
26770
26771
26772
26773
26774
26775
26776
26777
26778
26779
26780
26781
26782
26783
26784
26785
26786
26787
26788
26789
26790
26791
26792
26793
26794
26795
26796
26797
26798
26799
26800
26801
26802
26803
26804
26805
26806
26807
26808
26809
26810
26811
26812
26813
26814
26815
26816
26817
26818
26819
26820
26821
26822
26823
26824
26825
26826
26827
26828
26829
26830
26831
26832
26833
26834
26835
26836
26837
26838
26839
26840
26841
26842
26843
26844
26845
26846
26847
26848
26849
26850
26851
26852
26853
26854
26855
26856
26857
26858
26859
26860
26861
26862
26863
26864
26865
26866
26867
26868
26869
26870
26871
26872
26873
26874
26875
26876
26877
26878
26879
26880
26881
26882
26883
26884
26885
26886
26887
26888
26889
26890
26891
26892
26893
26894
26895
26896
26897
26898
26899
26900
26901
26902
26903
26904
26905
26906
26907
26908
26909
26910
26911
26912
26913
26914
26915
26916
26917
26918
26919
26920
26921
26922
26923
26924
26925
26926
26927
26928
26929
26930
26931
26932
26933
26934
26935
26936
26937
26938
26939
26940
26941
26942
26943
26944
26945
26946
26947
26948
26949
26950
26951
26952
26953
26954
26955
26956
26957
26958
26959
26960
26961
26962
26963
26964
26965
26966
26967
26968
26969
26970
26971
26972
26973
26974
26975
26976
26977
26978
26979
26980
26981
26982
26983
26984
26985
26986
26987
26988
26989
26990
26991
26992
26993
26994
26995
26996
26997
26998
26999
27000
27001
27002
27003
27004
27005
27006
27007
27008
27009
27010
27011
27012
27013
27014
27015
27016
27017
27018
27019
27020
27021
27022
27023
27024
27025
27026
27027
27028
27029
27030
27031
27032
27033
27034
27035
27036
27037
27038
27039
27040
27041
27042
27043
27044
27045
27046
27047
27048
27049
27050
27051
27052
27053
27054
27055
27056
27057
27058
27059
27060
27061
27062
27063
27064
27065
27066
27067
27068
27069
27070
27071
27072
27073
27074
27075
27076
27077
27078
27079
27080
27081
27082
27083
27084
27085
27086
27087
27088
27089
27090
27091
27092
27093
27094
27095
27096
27097
27098
27099
27100
27101
27102
27103
27104
27105
27106
27107
27108
27109
27110
27111
27112
27113
27114
27115
27116
27117
27118
27119
27120
27121
27122
27123
27124
27125
27126
27127
27128
27129
27130
27131
27132
27133
27134
27135
27136
27137
27138
27139
27140
27141
27142
27143
27144
27145
27146
27147
27148
27149
27150
27151
27152
27153
27154
27155
27156
27157
27158
27159
27160
27161
27162
27163
27164
27165
27166
27167
27168
27169
27170
27171
27172
27173
27174
27175
27176
27177
27178
27179
27180
27181
27182
27183
27184
27185
27186
27187
27188
27189
27190
27191
27192
27193
27194
27195
27196
27197
27198
27199
27200
27201
27202
27203
27204
27205
27206
27207
27208
27209
27210
27211
27212
27213
27214
27215
27216
27217
27218
27219
27220
27221
27222
27223
27224
27225
27226
27227
27228
27229
27230
27231
27232
27233
27234
27235
27236
27237
27238
27239
27240
27241
27242
27243
27244
27245
27246
27247
27248
27249
27250
27251
27252
27253
27254
27255
27256
27257
27258
27259
27260
27261
27262
27263
27264
27265
27266
27267
27268
27269
27270
27271
27272
27273
27274
27275
27276
27277
27278
27279
27280
27281
27282
27283
27284
27285
27286
27287
27288
27289
27290
27291
27292
27293
27294
27295
27296
27297
27298
27299
27300
27301
27302
27303
27304
27305
27306
27307
27308
27309
27310
27311
27312
27313
27314
27315
27316
27317
27318
27319
27320
27321
27322
27323
27324
27325
27326
27327
27328
27329
27330
27331
27332
27333
27334
27335
27336
27337
27338
27339
27340
27341
27342
27343
27344
27345
27346
27347
27348
27349
27350
27351
27352
27353
27354
27355
27356
27357
27358
27359
27360
27361
27362
27363
27364
27365
27366
27367
27368
27369
27370
27371
27372
27373
27374
27375
27376
27377
27378
27379
27380
27381
27382
27383
27384
27385
27386
27387
27388
27389
27390
27391
27392
27393
27394
27395
27396
27397
27398
27399
27400
27401
27402
27403
27404
27405
27406
27407
27408
27409
27410
27411
27412
27413
27414
27415
27416
27417
27418
27419
27420
27421
27422
27423
27424
27425
27426
27427
27428
27429
27430
27431
27432
27433
27434
27435
27436
27437
27438
27439
27440
27441
27442
27443
27444
27445
27446
27447
27448
27449
27450
27451
27452
27453
27454
27455
27456
27457
27458
27459
27460
27461
27462
27463
27464
27465
27466
27467
27468
27469
27470
27471
27472
27473
27474
27475
27476
27477
27478
27479
27480
27481
27482
27483
27484
27485
27486
27487
27488
27489
27490
27491
27492
27493
27494
27495
27496
27497
27498
27499
27500
27501
27502
27503
27504
27505
27506
27507
27508
27509
27510
27511
27512
27513
27514
27515
27516
27517
27518
27519
27520
27521
27522
27523
27524
27525
27526
27527
27528
27529
27530
27531
27532
27533
27534
27535
27536
27537
27538
27539
27540
27541
27542
27543
27544
27545
27546
27547
27548
27549
27550
27551
27552
27553
27554
27555
27556
27557
27558
27559
27560
27561
27562
27563
27564
27565
27566
27567
27568
27569
27570
27571
27572
27573
27574
27575
27576
27577
27578
27579
27580
27581
27582
27583
27584
27585
27586
27587
27588
27589
27590
27591
27592
27593
27594
27595
27596
27597
27598
27599
27600
27601
27602
27603
27604
27605
27606
27607
27608
27609
27610
27611
27612
27613
27614
27615
27616
27617
27618
27619
27620
27621
27622
27623
27624
27625
27626
27627
27628
27629
27630
27631
27632
27633
27634
27635
27636
27637
27638
27639
27640
27641
27642
27643
27644
27645
27646
27647
27648
27649
27650
27651
27652
27653
27654
27655
27656
27657
27658
27659
27660
27661
27662
27663
27664
27665
27666
27667
27668
27669
27670
27671
27672
27673
27674
27675
27676
27677
27678
27679
27680
27681
27682
27683
27684
27685
27686
27687
27688
27689
27690
27691
27692
27693
27694
27695
27696
27697
27698
27699
27700
27701
27702
27703
27704
27705
27706
27707
27708
27709
27710
27711
27712
27713
27714
27715
27716
27717
27718
27719
27720
27721
27722
27723
27724
27725
27726
27727
27728
27729
27730
27731
27732
27733
27734
27735
27736
27737
27738
27739
27740
27741
27742
27743
27744
27745
27746
27747
27748
27749
27750
27751
27752
27753
27754
27755
27756
27757
27758
27759
27760
27761
27762
27763
27764
27765
27766
27767
27768
27769
27770
27771
27772
27773
27774
27775
27776
27777
27778
27779
27780
27781
27782
27783
27784
27785
27786
27787
27788
27789
27790
27791
27792
27793
27794
27795
27796
27797
27798
27799
27800
27801
27802
27803
27804
27805
27806
27807
27808
27809
27810
27811
27812
27813
27814
27815
27816
27817
27818
27819
27820
27821
27822
27823
27824
27825
27826
27827
27828
27829
27830
27831
27832
27833
27834
27835
27836
27837
27838
27839
27840
27841
27842
27843
27844
27845
27846
27847
27848
27849
27850
27851
27852
27853
27854
27855
27856
27857
27858
27859
27860
27861
27862
27863
27864
27865
27866
27867
27868
27869
27870
27871
27872
27873
27874
27875
27876
27877
27878
27879
27880
27881
27882
27883
27884
27885
27886
27887
27888
27889
27890
27891
27892
27893
27894
27895
27896
27897
27898
27899
27900
27901
27902
27903
27904
27905
27906
27907
27908
27909
27910
27911
27912
27913
27914
27915
27916
27917
27918
27919
27920
27921
27922
27923
27924
27925
27926
27927
27928
27929
27930
27931
27932
27933
27934
27935
27936
27937
27938
27939
27940
27941
27942
27943
27944
27945
27946
27947
27948
27949
27950
27951
27952
27953
27954
27955
27956
27957
27958
27959
27960
27961
27962
27963
27964
27965
27966
27967
27968
27969
27970
27971
27972
27973
27974
27975
27976
27977
27978
27979
27980
27981
27982
27983
27984
27985
27986
27987
27988
27989
27990
27991
27992
27993
27994
27995
27996
27997
27998
27999
28000
28001
28002
28003
28004
28005
28006
28007
28008
28009
28010
28011
28012
28013
28014
28015
28016
28017
28018
28019
28020
28021
28022
28023
28024
28025
28026
28027
28028
28029
28030
28031
28032
28033
28034
28035
28036
28037
28038
28039
28040
28041
28042
28043
28044
28045
28046
28047
28048
28049
28050
28051
28052
28053
28054
28055
28056
28057
28058
28059
28060
28061
28062
28063
28064
28065
28066
28067
28068
28069
28070
28071
28072
28073
28074
28075
28076
28077
28078
28079
28080
28081
28082
28083
28084
28085
28086
28087
28088
28089
28090
28091
28092
28093
28094
28095
28096
28097
28098
28099
28100
28101
28102
28103
28104
28105
28106
28107
28108
28109
28110
28111
28112
28113
28114
28115
28116
28117
28118
28119
28120
28121
28122
28123
28124
28125
28126
28127
28128
28129
28130
28131
28132
28133
28134
28135
28136
28137
28138
28139
28140
28141
28142
28143
28144
28145
28146
28147
28148
28149
28150
28151
28152
28153
28154
28155
28156
28157
28158
28159
28160
28161
28162
28163
28164
28165
28166
28167
28168
28169
28170
28171
28172
28173
28174
28175
28176
28177
28178
28179
28180
28181
28182
28183
28184
28185
28186
28187
28188
28189
28190
28191
28192
28193
28194
28195
28196
28197
28198
28199
28200
28201
28202
28203
28204
28205
28206
28207
28208
28209
28210
28211
28212
28213
28214
28215
28216
28217
28218
28219
28220
28221
28222
28223
28224
28225
28226
28227
28228
28229
28230
28231
28232
28233
28234
28235
28236
28237
28238
28239
28240
28241
28242
28243
28244
28245
28246
28247
28248
28249
28250
28251
28252
28253
28254
28255
28256
28257
28258
28259
28260
28261
28262
28263
28264
28265
28266
28267
28268
28269
28270
28271
28272
28273
28274
28275
28276
28277
28278
28279
28280
28281
28282
28283
28284
28285
28286
28287
28288
28289
28290
28291
28292
28293
28294
28295
28296
28297
28298
28299
28300
28301
28302
28303
28304
28305
28306
28307
28308
28309
28310
28311
28312
28313
28314
28315
28316
28317
28318
28319
28320
28321
28322
28323
28324
28325
28326
28327
28328
28329
28330
28331
28332
28333
28334
28335
28336
28337
28338
28339
28340
28341
28342
28343
28344
28345
28346
28347
28348
28349
28350
28351
28352
28353
28354
28355
28356
28357
28358
28359
28360
28361
28362
28363
28364
28365
28366
28367
28368
28369
28370
28371
28372
28373
28374
28375
28376
28377
28378
28379
28380
28381
28382
28383
28384
28385
28386
28387
28388
28389
28390
28391
28392
28393
28394
28395
28396
28397
28398
28399
28400
28401
28402
28403
28404
28405
28406
28407
28408
28409
28410
28411
28412
28413
28414
28415
28416
28417
28418
28419
28420
28421
28422
28423
28424
28425
28426
28427
28428
28429
28430
28431
28432
28433
28434
28435
28436
28437
28438
28439
28440
28441
28442
28443
28444
28445
28446
28447
28448
28449
28450
28451
28452
28453
28454
28455
28456
28457
28458
28459
28460
28461
28462
28463
28464
28465
28466
28467
28468
28469
28470
28471
28472
28473
28474
28475
28476
28477
28478
28479
28480
28481
28482
28483
28484
28485
28486
28487
28488
28489
28490
28491
28492
28493
28494
28495
28496
28497
28498
28499
28500
28501
28502
28503
28504
28505
28506
28507
28508
28509
28510
28511
28512
28513
28514
28515
28516
28517
28518
28519
28520
28521
28522
28523
28524
28525
28526
28527
28528
28529
28530
28531
28532
28533
28534
28535
28536
28537
28538
28539
28540
28541
28542
28543
28544
28545
28546
28547
28548
28549
28550
28551
28552
28553
28554
28555
28556
28557
28558
28559
28560
28561
28562
28563
28564
28565
28566
28567
28568
28569
28570
28571
28572
28573
28574
28575
28576
28577
28578
28579
28580
28581
28582
28583
28584
28585
28586
28587
28588
28589
28590
28591
28592
28593
28594
28595
28596
28597
28598
28599
28600
28601
28602
28603
28604
28605
28606
28607
28608
28609
28610
28611
28612
28613
28614
28615
28616
28617
28618
28619
28620
28621
28622
28623
28624
28625
28626
28627
28628
28629
28630
28631
28632
28633
28634
28635
28636
28637
28638
28639
28640
28641
28642
28643
28644
28645
28646
28647
28648
28649
28650
28651
28652
28653
28654
28655
28656
28657
28658
28659
28660
28661
28662
28663
28664
28665
28666
28667
28668
28669
28670
28671
28672
28673
28674
28675
28676
28677
28678
28679
28680
28681
28682
28683
28684
28685
28686
28687
28688
28689
28690
28691
28692
28693
28694
28695
28696
28697
28698
28699
28700
28701
28702
28703
28704
28705
28706
28707
28708
28709
28710
28711
28712
28713
28714
28715
28716
28717
28718
28719
28720
28721
28722
28723
28724
28725
28726
28727
28728
28729
28730
28731
28732
28733
28734
28735
28736
28737
28738
28739
28740
28741
28742
28743
28744
28745
28746
28747
28748
28749
28750
28751
28752
28753
28754
28755
28756
28757
28758
28759
28760
28761
28762
28763
28764
28765
28766
28767
28768
28769
28770
28771
28772
28773
28774
28775
28776
28777
28778
28779
28780
28781
28782
28783
28784
28785
28786
28787
28788
28789
28790
28791
28792
28793
28794
28795
28796
28797
28798
28799
28800
28801
28802
28803
28804
28805
28806
28807
28808
28809
28810
28811
28812
28813
28814
28815
28816
28817
28818
28819
28820
28821
28822
28823
28824
28825
28826
28827
28828
28829
28830
28831
28832
28833
28834
28835
28836
28837
28838
28839
28840
28841
28842
28843
28844
28845
28846
28847
28848
28849
28850
28851
28852
28853
28854
28855
28856
28857
28858
28859
28860
28861
28862
28863
28864
28865
28866
28867
28868
28869
28870
28871
28872
28873
28874
28875
28876
28877
28878
28879
28880
28881
28882
28883
28884
28885
28886
28887
28888
28889
28890
28891
28892
28893
28894
28895
28896
28897
28898
28899
28900
28901
28902
28903
28904
28905
28906
28907
28908
28909
28910
28911
28912
28913
28914
28915
28916
28917
28918
28919
28920
28921
28922
28923
28924
28925
28926
28927
28928
28929
28930
28931
28932
28933
28934
28935
28936
28937
28938
28939
28940
28941
28942
28943
28944
28945
28946
28947
28948
28949
28950
28951
28952
28953
28954
28955
28956
28957
28958
28959
28960
28961
28962
28963
28964
28965
28966
28967
28968
28969
28970
28971
28972
28973
28974
28975
28976
28977
28978
28979
28980
28981
28982
28983
28984
28985
28986
28987
28988
28989
28990
28991
28992
28993
28994
28995
28996
28997
28998
28999
29000
29001
29002
29003
29004
29005
29006
29007
29008
29009
29010
29011
29012
29013
29014
29015
29016
29017
29018
29019
29020
29021
29022
29023
29024
29025
29026
29027
29028
29029
29030
29031
29032
29033
29034
29035
29036
29037
29038
29039
29040
29041
29042
29043
29044
29045
29046
29047
29048
29049
29050
29051
29052
29053
29054
29055
29056
29057
29058
29059
29060
29061
29062
29063
29064
29065
29066
29067
29068
29069
29070
29071
29072
29073
29074
29075
29076
29077
29078
29079
29080
29081
29082
29083
29084
29085
29086
29087
29088
29089
29090
29091
29092
29093
29094
29095
29096
29097
29098
29099
29100
29101
29102
29103
29104
29105
29106
29107
29108
29109
29110
29111
29112
29113
29114
29115
29116
29117
29118
29119
29120
29121
29122
29123
29124
29125
29126
29127
29128
29129
29130
29131
29132
29133
29134
29135
29136
29137
29138
29139
29140
29141
29142
29143
29144
29145
29146
29147
29148
29149
29150
29151
29152
29153
29154
29155
29156
29157
29158
29159
29160
29161
29162
29163
29164
29165
29166
29167
29168
29169
29170
29171
29172
29173
29174
29175
29176
29177
29178
29179
29180
29181
29182
29183
29184
29185
29186
29187
29188
29189
29190
29191
29192
29193
29194
29195
29196
29197
29198
29199
29200
29201
29202
29203
29204
29205
29206
29207
29208
29209
29210
29211
29212
29213
29214
29215
29216
29217
29218
29219
29220
29221
29222
29223
29224
29225
29226
29227
29228
29229
29230
29231
29232
29233
29234
29235
29236
29237
29238
29239
29240
29241
29242
29243
29244
29245
29246
29247
29248
29249
29250
29251
29252
29253
29254
29255
29256
29257
29258
29259
29260
29261
29262
29263
29264
29265
29266
29267
29268
29269
29270
29271
29272
29273
29274
29275
29276
29277
29278
29279
29280
29281
29282
29283
29284
29285
29286
29287
29288
29289
29290
29291
29292
29293
29294
29295
29296
29297
29298
29299
29300
29301
29302
29303
29304
29305
29306
29307
29308
29309
29310
29311
29312
29313
29314
29315
29316
29317
29318
29319
29320
29321
29322
29323
29324
29325
29326
29327
29328
29329
29330
29331
29332
29333
29334
29335
29336
29337
29338
29339
29340
29341
29342
29343
29344
29345
29346
29347
29348
29349
29350
29351
29352
29353
29354
29355
29356
29357
29358
29359
29360
29361
29362
29363
29364
29365
29366
29367
29368
29369
29370
29371
29372
29373
29374
29375
29376
29377
29378
29379
29380
29381
29382
29383
29384
29385
29386
29387
29388
29389
29390
29391
29392
29393
29394
29395
29396
29397
29398
29399
29400
29401
29402
29403
29404
29405
29406
29407
29408
29409
29410
29411
29412
29413
29414
29415
29416
29417
29418
29419
29420
29421
29422
29423
29424
29425
29426
29427
29428
29429
29430
29431
29432
29433
29434
29435
29436
29437
29438
29439
29440
29441
29442
29443
29444
29445
29446
29447
29448
29449
29450
29451
29452
29453
29454
29455
29456
29457
29458
29459
29460
29461
29462
29463
29464
29465
29466
29467
29468
29469
29470
29471
29472
29473
29474
29475
29476
29477
29478
29479
29480
29481
29482
29483
29484
29485
29486
29487
29488
29489
29490
29491
29492
29493
29494
29495
29496
29497
29498
29499
29500
29501
29502
29503
29504
29505
29506
29507
29508
29509
29510
29511
29512
29513
29514
29515
29516
29517
29518
29519
29520
29521
29522
29523
29524
29525
29526
29527
29528
29529
29530
29531
29532
29533
29534
29535
29536
29537
29538
29539
29540
29541
29542
29543
29544
29545
29546
29547
29548
29549
29550
29551
29552
29553
29554
29555
29556
29557
29558
29559
29560
29561
29562
29563
29564
29565
29566
29567
29568
29569
29570
29571
29572
29573
29574
29575
29576
29577
29578
29579
29580
29581
29582
29583
29584
29585
29586
29587
29588
29589
29590
29591
29592
29593
29594
29595
29596
29597
29598
29599
29600
29601
29602
29603
29604
29605
29606
29607
29608
29609
29610
29611
29612
29613
29614
29615
29616
29617
29618
29619
29620
29621
29622
29623
29624
29625
29626
29627
29628
29629
29630
29631
29632
29633
29634
29635
29636
29637
29638
29639
29640
29641
29642
29643
29644
29645
29646
29647
29648
29649
29650
29651
29652
29653
29654
29655
29656
29657
29658
29659
29660
29661
29662
29663
29664
29665
29666
29667
29668
29669
29670
29671
29672
29673
29674
29675
29676
29677
29678
29679
29680
29681
29682
29683
29684
29685
29686
29687
29688
29689
29690
29691
29692
29693
29694
29695
29696
29697
29698
29699
29700
29701
29702
29703
29704
29705
29706
29707
29708
29709
29710
29711
29712
29713
29714
29715
29716
29717
29718
29719
29720
29721
29722
29723
29724
29725
29726
29727
29728
29729
29730
29731
29732
29733
29734
29735
29736
29737
29738
29739
29740
29741
29742
29743
29744
29745
29746
29747
29748
29749
29750
29751
29752
29753
29754
29755
29756
29757
29758
29759
29760
29761
29762
29763
29764
29765
29766
29767
29768
29769
29770
29771
29772
29773
29774
29775
29776
29777
29778
29779
29780
29781
29782
29783
29784
29785
29786
29787
29788
29789
29790
29791
29792
29793
29794
29795
29796
29797
29798
29799
29800
29801
29802
29803
29804
29805
29806
29807
29808
29809
29810
29811
29812
29813
29814
29815
29816
29817
29818
29819
29820
29821
29822
29823
29824
29825
29826
29827
29828
29829
29830
29831
29832
29833
29834
29835
29836
29837
29838
29839
29840
29841
29842
29843
29844
29845
29846
29847
29848
29849
29850
29851
29852
29853
29854
29855
29856
29857
29858
29859
29860
29861
29862
29863
29864
29865
29866
29867
29868
29869
29870
29871
29872
29873
29874
29875
29876
29877
29878
29879
29880
29881
29882
29883
29884
29885
29886
29887
29888
29889
29890
29891
29892
29893
29894
29895
29896
29897
29898
29899
29900
29901
29902
29903
29904
29905
29906
29907
29908
29909
29910
29911
29912
29913
29914
29915
29916
29917
29918
29919
29920
29921
29922
29923
29924
29925
29926
29927
29928
29929
29930
29931
29932
29933
29934
29935
29936
29937
29938
29939
29940
29941
29942
29943
29944
29945
29946
29947
29948
29949
29950
29951
29952
29953
29954
29955
29956
29957
29958
29959
29960
29961
29962
29963
29964
29965
29966
29967
29968
29969
29970
29971
29972
29973
29974
29975
29976
29977
29978
29979
29980
29981
29982
29983
29984
29985
29986
29987
29988
29989
29990
29991
29992
29993
29994
29995
29996
29997
29998
29999
30000
30001
30002
30003
30004
30005
30006
30007
30008
30009
30010
30011
30012
30013
30014
30015
30016
30017
30018
30019
30020
30021
30022
30023
30024
30025
30026
30027
30028
30029
30030
30031
30032
30033
30034
30035
30036
30037
30038
30039
30040
30041
30042
30043
30044
30045
30046
30047
30048
30049
30050
30051
30052
30053
30054
30055
30056
30057
30058
30059
30060
30061
30062
30063
30064
30065
30066
30067
30068
30069
30070
30071
30072
30073
30074
30075
30076
30077
30078
30079
30080
30081
30082
30083
30084
30085
30086
30087
30088
30089
30090
30091
30092
30093
30094
30095
30096
30097
30098
30099
30100
30101
30102
30103
30104
30105
30106
30107
30108
30109
30110
30111
30112
30113
30114
30115
30116
30117
30118
30119
30120
30121
30122
30123
30124
30125
30126
30127
30128
30129
30130
30131
30132
30133
30134
30135
30136
30137
30138
30139
30140
30141
30142
30143
30144
30145
30146
30147
30148
30149
30150
30151
30152
30153
30154
30155
30156
30157
30158
30159
30160
30161
30162
30163
30164
30165
30166
30167
30168
30169
30170
30171
30172
30173
30174
30175
30176
30177
30178
30179
30180
30181
30182
30183
30184
30185
30186
30187
30188
30189
30190
30191
30192
30193
30194
30195
30196
30197
30198
30199
30200
30201
30202
30203
30204
30205
30206
30207
30208
30209
30210
30211
30212
30213
30214
30215
30216
30217
30218
30219
30220
30221
30222
30223
30224
30225
30226
30227
30228
30229
30230
30231
30232
30233
30234
30235
30236
30237
30238
30239
30240
30241
30242
30243
30244
30245
30246
30247
30248
30249
30250
30251
30252
30253
30254
30255
30256
30257
30258
30259
30260
30261
30262
30263
30264
30265
30266
30267
30268
30269
30270
30271
30272
30273
30274
30275
30276
30277
30278
30279
30280
30281
30282
30283
30284
30285
30286
30287
30288
30289
30290
30291
30292
30293
30294
30295
30296
30297
30298
30299
30300
30301
30302
30303
30304
30305
30306
30307
30308
30309
30310
30311
30312
30313
30314
30315
30316
30317
30318
30319
30320
30321
30322
30323
30324
30325
30326
30327
30328
30329
30330
30331
30332
30333
30334
30335
30336
30337
30338
30339
30340
30341
30342
30343
30344
30345
30346
30347
30348
30349
30350
30351
30352
30353
30354
30355
30356
30357
30358
30359
30360
30361
30362
30363
30364
30365
30366
30367
30368
30369
30370
30371
30372
30373
30374
30375
30376
30377
30378
30379
30380
30381
30382
30383
30384
30385
30386
30387
30388
30389
30390
30391
30392
30393
30394
30395
30396
30397
30398
30399
30400
30401
30402
30403
30404
30405
30406
30407
30408
30409
30410
30411
30412
30413
30414
30415
30416
30417
30418
30419
30420
30421
30422
30423
30424
30425
30426
30427
30428
30429
30430
30431
30432
30433
30434
30435
30436
30437
30438
30439
30440
30441
30442
30443
30444
30445
30446
30447
30448
30449
30450
30451
30452
30453
30454
30455
30456
30457
30458
30459
30460
30461
30462
30463
30464
30465
30466
30467
30468
30469
30470
30471
30472
30473
30474
30475
30476
30477
30478
30479
30480
30481
30482
30483
30484
30485
30486
30487
30488
30489
30490
30491
30492
30493
30494
30495
30496
30497
30498
30499
30500
30501
30502
30503
30504
30505
30506
30507
30508
30509
30510
30511
30512
30513
30514
30515
30516
30517
30518
30519
30520
30521
30522
30523
30524
30525
30526
30527
30528
30529
30530
30531
30532
30533
30534
30535
30536
30537
30538
30539
30540
30541
30542
30543
30544
30545
30546
30547
30548
30549
30550
30551
30552
30553
30554
30555
30556
30557
30558
30559
30560
30561
30562
30563
30564
30565
30566
30567
30568
30569
30570
30571
30572
30573
30574
30575
30576
30577
30578
30579
30580
30581
30582
30583
30584
30585
30586
30587
30588
30589
30590
30591
30592
30593
30594
30595
30596
30597
30598
30599
30600
30601
30602
30603
30604
30605
30606
30607
30608
30609
30610
30611
30612
30613
30614
30615
30616
30617
30618
30619
30620
30621
30622
30623
30624
30625
30626
30627
30628
30629
30630
30631
30632
30633
30634
30635
30636
30637
30638
30639
30640
30641
30642
30643
30644
30645
30646
30647
30648
30649
30650
30651
30652
30653
30654
30655
30656
30657
30658
30659
30660
30661
30662
30663
30664
30665
30666
30667
30668
30669
30670
30671
30672
30673
30674
30675
30676
30677
30678
30679
30680
30681
30682
30683
30684
30685
30686
30687
30688
30689
30690
30691
30692
30693
30694
30695
30696
30697
30698
30699
30700
30701
30702
30703
30704
30705
30706
30707
30708
30709
30710
30711
30712
30713
30714
30715
30716
30717
30718
30719
30720
30721
30722
30723
30724
30725
30726
30727
30728
30729
30730
30731
30732
30733
30734
30735
30736
30737
30738
30739
30740
30741
30742
30743
30744
30745
30746
30747
30748
30749
30750
30751
30752
30753
30754
30755
30756
30757
30758
30759
30760
30761
30762
30763
30764
30765
30766
30767
30768
30769
30770
30771
30772
30773
30774
30775
30776
30777
30778
30779
30780
30781
30782
30783
30784
30785
30786
30787
30788
30789
30790
30791
30792
30793
30794
30795
30796
30797
30798
30799
30800
30801
30802
30803
30804
30805
30806
30807
30808
30809
30810
30811
30812
30813
30814
30815
30816
30817
30818
30819
30820
30821
30822
30823
30824
30825
30826
30827
30828
30829
30830
30831
30832
30833
30834
30835
30836
30837
30838
30839
30840
30841
30842
30843
30844
30845
30846
30847
30848
30849
30850
30851
30852
30853
30854
30855
30856
30857
30858
30859
30860
30861
30862
30863
30864
30865
30866
30867
30868
30869
30870
30871
30872
30873
30874
30875
30876
30877
30878
30879
30880
30881
30882
30883
30884
30885
30886
30887
30888
30889
30890
30891
30892
30893
30894
30895
30896
30897
30898
30899
30900
30901
30902
30903
30904
30905
30906
30907
30908
30909
30910
30911
30912
30913
30914
30915
30916
30917
30918
30919
30920
30921
30922
30923
30924
30925
30926
30927
30928
30929
30930
30931
30932
30933
30934
30935
30936
30937
30938
30939
30940
30941
30942
30943
30944
30945
30946
30947
30948
30949
30950
30951
30952
30953
30954
30955
30956
30957
30958
30959
30960
30961
30962
30963
30964
30965
30966
30967
30968
30969
30970
30971
30972
30973
30974
30975
30976
30977
30978
30979
30980
30981
30982
30983
30984
30985
30986
30987
30988
30989
30990
30991
30992
30993
30994
30995
30996
30997
30998
30999
31000
31001
31002
31003
31004
31005
31006
31007
31008
31009
31010
31011
31012
31013
31014
31015
31016
31017
31018
31019
31020
31021
31022
31023
31024
31025
31026
31027
31028
31029
31030
31031
31032
31033
31034
31035
31036
31037
31038
31039
31040
31041
31042
31043
31044
31045
31046
31047
31048
31049
31050
31051
31052
31053
31054
31055
31056
31057
31058
31059
31060
31061
31062
31063
31064
31065
31066
31067
31068
31069
31070
31071
31072
31073
31074
31075
31076
31077
31078
31079
31080
31081
31082
31083
31084
31085
31086
31087
31088
31089
31090
31091
31092
31093
31094
31095
31096
31097
31098
31099
31100
31101
31102
31103
31104
31105
31106
31107
31108
31109
31110
31111
31112
31113
31114
31115
31116
31117
31118
31119
31120
31121
31122
31123
31124
31125
31126
31127
31128
31129
31130
31131
31132
31133
31134
31135
31136
31137
31138
31139
31140
31141
31142
31143
31144
31145
31146
31147
31148
31149
31150
31151
31152
31153
31154
31155
31156
31157
31158
31159
31160
31161
31162
31163
31164
31165
31166
31167
31168
31169
31170
31171
31172
31173
31174
31175
31176
31177
31178
31179
31180
31181
31182
31183
31184
31185
31186
31187
31188
31189
31190
31191
31192
31193
31194
31195
31196
31197
31198
31199
31200
31201
31202
31203
31204
31205
31206
31207
31208
31209
31210
31211
31212
31213
31214
31215
31216
31217
31218
31219
31220
31221
31222
31223
31224
31225
31226
31227
31228
31229
31230
31231
31232
31233
31234
31235
31236
31237
31238
31239
31240
31241
31242
31243
31244
31245
31246
31247
31248
31249
31250
31251
31252
31253
31254
31255
31256
31257
31258
31259
31260
31261
31262
31263
31264
31265
31266
31267
31268
31269
31270
31271
31272
31273
31274
31275
31276
31277
31278
31279
31280
31281
31282
31283
31284
31285
31286
31287
31288
31289
31290
31291
31292
31293
31294
31295
31296
31297
31298
31299
31300
31301
31302
31303
31304
31305
31306
31307
31308
31309
31310
31311
31312
31313
31314
31315
31316
31317
31318
31319
31320
31321
31322
31323
31324
31325
31326
31327
31328
31329
31330
31331
31332
31333
31334
31335
31336
31337
31338
31339
31340
31341
31342
31343
31344
31345
31346
31347
31348
31349
31350
31351
31352
31353
31354
31355
31356
31357
31358
31359
31360
31361
31362
31363
31364
31365
31366
31367
31368
31369
31370
31371
31372
31373
31374
31375
31376
31377
31378
31379
31380
31381
31382
31383
31384
31385
31386
31387
31388
31389
31390
31391
31392
31393
31394
31395
31396
31397
31398
31399
31400
31401
31402
31403
31404
31405
31406
31407
31408
31409
31410
31411
31412
31413
31414
31415
31416
31417
31418
31419
31420
31421
31422
31423
31424
31425
31426
31427
31428
31429
31430
31431
31432
31433
31434
31435
31436
31437
31438
31439
31440
31441
31442
31443
31444
31445
31446
31447
31448
31449
31450
31451
31452
31453
31454
31455
31456
31457
31458
31459
31460
31461
31462
31463
31464
31465
31466
31467
31468
31469
31470
31471
31472
31473
31474
31475
31476
31477
31478
31479
31480
31481
31482
31483
31484
31485
31486
31487
31488
31489
31490
31491
31492
31493
31494
31495
31496
31497
31498
31499
31500
31501
31502
31503
31504
31505
31506
31507
31508
31509
31510
31511
31512
31513
31514
31515
31516
31517
31518
31519
31520
31521
31522
31523
31524
31525
31526
31527
31528
31529
31530
31531
31532
31533
31534
31535
31536
31537
31538
31539
31540
31541
31542
31543
31544
31545
31546
31547
31548
31549
31550
31551
31552
31553
31554
31555
31556
31557
31558
31559
31560
31561
31562
31563
31564
31565
31566
31567
31568
31569
31570
31571
31572
31573
31574
31575
31576
31577
31578
31579
31580
31581
31582
31583
31584
31585
31586
31587
31588
31589
31590
31591
31592
31593
31594
31595
31596
31597
31598
31599
31600
31601
31602
31603
31604
31605
31606
31607
31608
31609
31610
31611
31612
31613
31614
31615
31616
31617
31618
31619
31620
31621
31622
31623
31624
31625
31626
31627
31628
31629
31630
31631
31632
31633
31634
31635
31636
31637
31638
31639
31640
31641
31642
31643
31644
31645
31646
31647
31648
31649
31650
31651
31652
31653
31654
31655
31656
31657
31658
31659
31660
31661
31662
31663
31664
31665
31666
31667
31668
31669
31670
31671
31672
31673
31674
31675
31676
31677
31678
31679
31680
31681
31682
31683
31684
31685
31686
31687
31688
31689
31690
31691
31692
31693
31694
31695
31696
31697
31698
31699
31700
31701
31702
31703
31704
31705
31706
31707
31708
31709
31710
31711
31712
31713
31714
31715
31716
31717
31718
31719
31720
31721
31722
31723
31724
31725
31726
31727
31728
31729
31730
31731
31732
31733
31734
31735
31736
31737
31738
31739
31740
31741
31742
31743
31744
31745
31746
31747
31748
31749
31750
31751
31752
31753
31754
31755
31756
31757
31758
31759
31760
31761
31762
31763
31764
31765
31766
31767
31768
31769
31770
31771
31772
31773
31774
31775
31776
31777
31778
31779
31780
31781
31782
31783
31784
31785
31786
31787
31788
31789
31790
31791
31792
31793
31794
31795
31796
31797
31798
31799
31800
31801
31802
31803
31804
31805
31806
31807
31808
31809
31810
31811
31812
31813
31814
31815
31816
31817
31818
31819
31820
31821
31822
31823
31824
31825
31826
31827
31828
31829
31830
31831
31832
31833
31834
31835
31836
31837
31838
31839
31840
31841
31842
31843
31844
31845
31846
31847
31848
31849
31850
31851
31852
31853
31854
31855
31856
31857
31858
31859
31860
31861
31862
31863
31864
31865
31866
31867
31868
31869
31870
31871
31872
31873
31874
31875
31876
31877
31878
31879
31880
31881
31882
31883
31884
31885
31886
31887
31888
31889
31890
31891
31892
31893
31894
31895
31896
31897
31898
31899
31900
31901
31902
31903
31904
31905
31906
31907
31908
31909
31910
31911
31912
31913
31914
31915
31916
31917
31918
31919
31920
31921
31922
31923
31924
31925
31926
31927
31928
31929
31930
31931
31932
31933
31934
31935
31936
31937
31938
31939
31940
31941
31942
31943
31944
31945
31946
31947
31948
31949
31950
31951
31952
31953
31954
31955
31956
31957
31958
31959
31960
31961
31962
31963
31964
31965
31966
31967
31968
31969
31970
31971
31972
31973
31974
31975
31976
31977
31978
31979
31980
31981
31982
31983
31984
31985
31986
31987
31988
31989
31990
31991
31992
31993
31994
31995
31996
31997
31998
31999
32000
32001
32002
32003
32004
32005
32006
32007
32008
32009
32010
32011
32012
32013
32014
32015
32016
32017
32018
32019
32020
32021
32022
32023
32024
32025
32026
32027
32028
32029
32030
32031
32032
32033
32034
32035
32036
32037
32038
32039
32040
32041
32042
32043
32044
32045
32046
32047
32048
32049
32050
32051
32052
32053
32054
32055
32056
32057
32058
32059
32060
32061
32062
32063
32064
32065
32066
32067
32068
32069
32070
32071
32072
32073
32074
32075
32076
32077
32078
32079
32080
32081
32082
32083
32084
32085
32086
32087
32088
32089
32090
32091
32092
32093
32094
32095
32096
32097
32098
32099
32100
32101
32102
32103
32104
32105
32106
32107
32108
32109
32110
32111
32112
32113
32114
32115
32116
32117
32118
32119
32120
32121
32122
32123
32124
32125
32126
32127
32128
32129
32130
32131
32132
32133
32134
32135
32136
32137
32138
32139
32140
32141
32142
32143
32144
32145
32146
32147
32148
32149
32150
32151
32152
32153
32154
32155
32156
32157
32158
32159
32160
32161
32162
32163
32164
32165
32166
32167
32168
32169
32170
32171
32172
32173
32174
32175
32176
32177
32178
32179
32180
32181
32182
32183
32184
32185
32186
32187
32188
32189
32190
32191
32192
32193
32194
32195
32196
32197
32198
32199
32200
32201
32202
32203
32204
32205
32206
32207
32208
32209
32210
32211
32212
32213
32214
32215
32216
32217
32218
32219
32220
32221
32222
32223
32224
32225
32226
32227
32228
32229
32230
32231
32232
32233
32234
32235
32236
32237
32238
32239
32240
32241
32242
32243
32244
32245
32246
32247
32248
32249
32250
32251
32252
32253
32254
32255
32256
32257
32258
32259
32260
32261
32262
32263
32264
32265
32266
32267
32268
32269
32270
32271
32272
32273
32274
32275
32276
32277
32278
32279
32280
32281
32282
32283
32284
32285
32286
32287
32288
32289
32290
32291
32292
32293
32294
32295
32296
32297
32298
32299
32300
32301
32302
32303
32304
32305
32306
32307
32308
32309
32310
32311
32312
32313
32314
32315
32316
32317
32318
32319
32320
32321
32322
32323
32324
32325
32326
32327
32328
32329
32330
32331
32332
32333
32334
32335
32336
32337
32338
32339
32340
32341
32342
32343
32344
32345
32346
32347
32348
32349
32350
32351
32352
32353
32354
32355
32356
32357
32358
32359
32360
32361
32362
32363
32364
32365
32366
32367
32368
32369
32370
32371
32372
32373
32374
32375
32376
32377
32378
32379
32380
32381
32382
32383
32384
32385
32386
32387
32388
32389
32390
32391
32392
32393
32394
32395
32396
32397
32398
32399
32400
32401
32402
32403
32404
32405
32406
32407
32408
32409
32410
32411
32412
32413
32414
32415
32416
32417
32418
32419
32420
32421
32422
32423
32424
32425
32426
32427
32428
32429
32430
32431
32432
32433
32434
32435
32436
32437
32438
32439
32440
32441
32442
32443
32444
32445
32446
32447
32448
32449
32450
32451
32452
32453
32454
32455
32456
32457
32458
32459
32460
32461
32462
32463
32464
32465
32466
32467
32468
32469
32470
32471
32472
32473
32474
32475
32476
32477
32478
32479
32480
32481
32482
32483
32484
32485
32486
32487
32488
32489
32490
32491
32492
32493
32494
32495
32496
32497
32498
32499
32500
32501
32502
32503
32504
32505
32506
32507
32508
32509
32510
32511
32512
32513
32514
32515
32516
32517
32518
32519
32520
32521
32522
32523
32524
32525
32526
32527
32528
32529
32530
32531
32532
32533
32534
32535
32536
32537
32538
32539
32540
32541
32542
32543
32544
32545
32546
32547
32548
32549
32550
32551
32552
32553
32554
32555
32556
32557
32558
32559
32560
32561
32562
32563
32564
32565
32566
32567
32568
32569
32570
32571
32572
32573
32574
32575
32576
32577
32578
32579
32580
32581
32582
32583
32584
32585
32586
32587
32588
32589
32590
32591
32592
32593
32594
32595
32596
32597
32598
32599
32600
32601
32602
32603
32604
32605
32606
32607
32608
32609
32610
32611
32612
32613
32614
32615
32616
32617
32618
32619
32620
32621
32622
32623
32624
32625
32626
32627
32628
32629
32630
32631
32632
32633
32634
32635
32636
32637
32638
32639
32640
32641
32642
32643
32644
32645
32646
32647
32648
32649
32650
32651
32652
32653
32654
32655
32656
32657
32658
32659
32660
32661
32662
32663
32664
32665
32666
32667
32668
32669
32670
32671
32672
32673
32674
32675
32676
32677
32678
32679
32680
32681
32682
32683
32684
32685
32686
32687
32688
32689
32690
32691
32692
32693
32694
32695
32696
32697
32698
32699
32700
32701
32702
32703
32704
32705
32706
32707
32708
32709
32710
32711
32712
32713
32714
32715
32716
32717
32718
32719
32720
32721
32722
32723
32724
32725
32726
32727
32728
32729
32730
32731
32732
32733
32734
32735
32736
32737
32738
32739
32740
32741
32742
32743
32744
32745
32746
32747
32748
32749
32750
32751
32752
32753
32754
32755
32756
32757
32758
32759
32760
32761
32762
32763
32764
32765
32766
32767
32768
32769
32770
32771
32772
32773
32774
32775
32776
32777
32778
32779
32780
32781
32782
32783
32784
32785
32786
32787
32788
32789
32790
32791
32792
32793
32794
32795
32796
32797
32798
32799
32800
32801
32802
32803
32804
32805
32806
32807
32808
32809
32810
32811
32812
32813
32814
32815
32816
32817
32818
32819
32820
32821
32822
32823
32824
32825
32826
32827
32828
32829
32830
32831
32832
32833
32834
32835
32836
32837
32838
32839
32840
32841
32842
32843
32844
32845
32846
32847
32848
32849
32850
32851
32852
32853
32854
32855
32856
32857
32858
32859
32860
32861
32862
32863
32864
32865
32866
32867
32868
32869
32870
32871
32872
32873
32874
32875
32876
32877
32878
32879
32880
32881
32882
32883
32884
32885
32886
32887
32888
32889
32890
32891
32892
32893
32894
32895
32896
32897
32898
32899
32900
32901
32902
32903
32904
32905
32906
32907
32908
32909
32910
32911
32912
32913
32914
32915
32916
32917
32918
32919
32920
32921
32922
32923
32924
32925
32926
32927
32928
32929
32930
32931
32932
32933
32934
32935
32936
32937
32938
32939
32940
32941
32942
32943
32944
32945
32946
32947
32948
32949
32950
32951
32952
32953
32954
32955
32956
32957
32958
32959
32960
32961
32962
32963
32964
32965
32966
32967
32968
32969
32970
32971
32972
32973
32974
32975
32976
32977
32978
32979
32980
32981
32982
32983
32984
32985
32986
32987
32988
32989
32990
32991
32992
32993
32994
32995
32996
32997
32998
32999
33000
33001
33002
33003
33004
33005
33006
33007
33008
33009
33010
33011
33012
33013
33014
33015
33016
33017
33018
33019
33020
33021
33022
33023
33024
33025
33026
33027
33028
33029
33030
33031
33032
33033
33034
33035
33036
33037
33038
33039
33040
33041
33042
33043
33044
33045
33046
33047
33048
33049
33050
33051
33052
33053
33054
33055
33056
33057
33058
33059
33060
33061
33062
33063
33064
33065
33066
33067
33068
33069
33070
33071
33072
33073
33074
33075
33076
33077
33078
33079
33080
33081
33082
33083
33084
33085
33086
33087
33088
33089
33090
33091
33092
33093
33094
33095
33096
33097
33098
33099
33100
33101
33102
33103
33104
33105
33106
33107
33108
33109
33110
33111
33112
33113
33114
33115
33116
33117
33118
33119
33120
33121
33122
33123
33124
33125
33126
33127
33128
33129
33130
33131
33132
33133
33134
33135
33136
33137
33138
33139
33140
33141
33142
33143
33144
33145
33146
33147
33148
33149
33150
33151
33152
33153
33154
33155
33156
33157
33158
33159
33160
33161
33162
33163
33164
33165
33166
33167
33168
33169
33170
33171
33172
33173
33174
33175
33176
33177
33178
33179
33180
33181
33182
33183
33184
33185
33186
33187
33188
33189
33190
33191
33192
33193
33194
33195
33196
33197
33198
33199
33200
33201
33202
33203
33204
33205
33206
33207
33208
33209
33210
33211
33212
33213
33214
33215
33216
33217
33218
33219
33220
33221
33222
33223
33224
33225
33226
33227
33228
33229
33230
33231
33232
33233
33234
33235
33236
33237
33238
33239
33240
33241
33242
33243
33244
33245
33246
33247
33248
33249
33250
33251
33252
33253
33254
33255
33256
33257
33258
33259
33260
33261
33262
33263
33264
33265
33266
33267
33268
33269
33270
33271
33272
33273
33274
33275
33276
33277
33278
33279
33280
33281
33282
33283
33284
33285
33286
33287
33288
33289
33290
33291
33292
33293
33294
33295
33296
33297
33298
33299
33300
33301
33302
33303
33304
33305
33306
33307
33308
33309
33310
33311
33312
33313
33314
33315
33316
33317
33318
33319
33320
33321
33322
33323
33324
33325
33326
33327
33328
33329
33330
33331
33332
33333
33334
33335
33336
33337
33338
33339
33340
33341
33342
33343
33344
33345
33346
33347
33348
33349
33350
33351
33352
33353
33354
33355
33356
33357
33358
33359
33360
33361
33362
33363
33364
33365
33366
33367
33368
33369
33370
33371
33372
33373
33374
33375
33376
33377
33378
33379
33380
33381
33382
33383
33384
33385
33386
33387
33388
33389
33390
33391
33392
33393
33394
33395
33396
33397
33398
33399
33400
33401
33402
33403
33404
33405
33406
33407
33408
33409
33410
33411
33412
33413
33414
33415
33416
33417
33418
33419
33420
33421
33422
33423
33424
33425
33426
33427
33428
33429
33430
33431
33432
33433
33434
33435
33436
33437
33438
33439
33440
33441
33442
33443
33444
33445
33446
33447
33448
33449
33450
33451
33452
33453
33454
33455
33456
33457
33458
33459
33460
33461
33462
33463
33464
33465
33466
33467
33468
33469
33470
33471
33472
33473
33474
33475
33476
33477
33478
33479
33480
33481
33482
33483
33484
33485
33486
33487
33488
33489
33490
33491
33492
33493
33494
33495
33496
33497
33498
33499
33500
33501
33502
33503
33504
33505
33506
33507
33508
33509
33510
33511
33512
33513
33514
33515
33516
33517
33518
33519
33520
33521
33522
33523
33524
33525
33526
33527
33528
33529
33530
33531
33532
33533
33534
33535
33536
33537
33538
33539
33540
33541
33542
33543
33544
33545
33546
33547
33548
33549
33550
33551
33552
33553
33554
33555
33556
33557
33558
33559
33560
33561
33562
33563
33564
33565
33566
33567
33568
33569
33570
33571
33572
33573
33574
33575
33576
33577
33578
33579
33580
33581
33582
33583
33584
33585
33586
33587
33588
33589
33590
33591
33592
33593
33594
33595
33596
33597
33598
33599
33600
33601
33602
33603
33604
33605
33606
33607
33608
33609
33610
33611
33612
33613
33614
33615
33616
33617
33618
33619
33620
33621
33622
33623
33624
33625
33626
33627
33628
33629
33630
33631
33632
33633
33634
33635
33636
33637
33638
33639
33640
33641
33642
33643
33644
33645
33646
33647
33648
33649
33650
33651
33652
33653
33654
33655
33656
33657
33658
33659
33660
33661
33662
33663
33664
33665
33666
33667
33668
33669
33670
33671
33672
33673
33674
33675
33676
33677
33678
33679
33680
33681
33682
33683
33684
33685
33686
33687
33688
33689
33690
33691
33692
33693
33694
33695
33696
33697
33698
33699
33700
33701
33702
33703
33704
33705
33706
33707
33708
33709
33710
33711
33712
33713
33714
33715
33716
33717
33718
33719
33720
33721
33722
33723
33724
33725
33726
33727
33728
33729
33730
33731
33732
33733
33734
33735
33736
33737
33738
33739
33740
33741
33742
33743
33744
33745
33746
33747
33748
33749
33750
33751
33752
33753
33754
33755
33756
33757
33758
33759
33760
33761
33762
33763
33764
33765
33766
33767
33768
33769
33770
33771
33772
33773
33774
33775
33776
33777
33778
33779
33780
33781
33782
33783
33784
33785
33786
33787
33788
33789
33790
33791
33792
33793
33794
33795
33796
33797
33798
33799
33800
33801
33802
33803
33804
33805
33806
33807
33808
33809
33810
33811
33812
33813
33814
33815
33816
33817
33818
33819
33820
33821
33822
33823
33824
33825
33826
33827
33828
33829
33830
33831
33832
33833
33834
33835
33836
33837
33838
33839
33840
33841
33842
33843
33844
33845
33846
33847
33848
33849
33850
33851
33852
33853
33854
33855
33856
33857
33858
33859
33860
33861
33862
33863
33864
33865
33866
33867
33868
33869
33870
33871
33872
33873
33874
33875
33876
33877
33878
33879
33880
33881
33882
33883
33884
33885
33886
33887
33888
33889
33890
33891
33892
33893
33894
33895
33896
33897
33898
33899
33900
33901
33902
33903
33904
33905
33906
33907
33908
33909
33910
33911
33912
33913
33914
33915
33916
33917
33918
33919
33920
33921
33922
33923
33924
33925
33926
33927
33928
33929
33930
33931
33932
33933
33934
33935
33936
33937
33938
33939
33940
33941
33942
33943
33944
33945
33946
33947
33948
33949
33950
33951
33952
33953
33954
33955
33956
33957
33958
33959
33960
33961
33962
33963
33964
33965
33966
33967
33968
33969
33970
33971
33972
33973
33974
33975
33976
33977
33978
33979
33980
33981
33982
33983
33984
33985
33986
33987
33988
33989
33990
33991
33992
33993
33994
33995
33996
33997
33998
33999
34000
34001
34002
34003
34004
34005
34006
34007
34008
34009
34010
34011
34012
34013
34014
34015
34016
34017
34018
34019
34020
34021
34022
34023
34024
34025
34026
34027
34028
34029
34030
34031
34032
34033
34034
34035
34036
34037
34038
34039
34040
34041
34042
34043
34044
34045
34046
34047
34048
34049
34050
34051
34052
34053
34054
34055
34056
34057
34058
34059
34060
34061
34062
34063
34064
34065
34066
34067
34068
34069
34070
34071
34072
34073
34074
34075
34076
34077
34078
34079
34080
34081
34082
34083
34084
34085
34086
34087
34088
34089
34090
34091
34092
34093
34094
34095
34096
34097
34098
34099
34100
34101
34102
34103
34104
34105
34106
34107
34108
34109
34110
34111
34112
34113
34114
34115
34116
34117
34118
34119
34120
34121
34122
34123
34124
34125
34126
34127
34128
34129
34130
34131
34132
34133
34134
34135
34136
34137
34138
34139
34140
34141
34142
34143
34144
34145
34146
34147
34148
34149
34150
34151
34152
34153
34154
34155
34156
34157
34158
34159
34160
34161
34162
34163
34164
34165
34166
34167
34168
34169
34170
34171
34172
34173
34174
34175
34176
34177
34178
34179
34180
34181
34182
34183
34184
34185
34186
34187
34188
34189
34190
34191
34192
34193
34194
34195
34196
34197
34198
34199
34200
34201
34202
34203
34204
34205
34206
34207
34208
34209
34210
34211
34212
34213
34214
34215
34216
34217
34218
34219
34220
34221
34222
34223
34224
34225
34226
34227
34228
34229
34230
34231
34232
34233
34234
34235
34236
34237
34238
34239
34240
34241
34242
34243
34244
34245
34246
34247
34248
34249
34250
34251
34252
34253
34254
34255
34256
34257
34258
34259
34260
34261
34262
34263
34264
34265
34266
34267
34268
34269
34270
34271
34272
34273
34274
34275
34276
34277
34278
34279
34280
34281
34282
34283
34284
34285
34286
34287
34288
34289
34290
34291
34292
34293
34294
34295
34296
34297
34298
34299
34300
34301
34302
34303
34304
34305
34306
34307
34308
34309
34310
34311
34312
34313
34314
34315
34316
34317
34318
34319
34320
34321
34322
34323
34324
34325
34326
34327
34328
34329
34330
34331
34332
34333
34334
34335
34336
34337
34338
34339
34340
34341
34342
34343
34344
34345
34346
34347
34348
34349
34350
34351
34352
34353
34354
34355
34356
34357
34358
34359
34360
34361
34362
34363
34364
34365
34366
34367
34368
34369
34370
34371
34372
34373
34374
34375
34376
34377
34378
34379
34380
34381
34382
34383
34384
34385
34386
34387
34388
34389
34390
34391
34392
34393
34394
34395
34396
34397
34398
34399
34400
34401
34402
34403
34404
34405
34406
34407
34408
34409
34410
34411
34412
34413
34414
34415
34416
34417
34418
34419
34420
34421
34422
34423
34424
34425
34426
34427
34428
34429
34430
34431
34432
34433
34434
34435
34436
34437
34438
34439
34440
34441
34442
34443
34444
34445
34446
34447
34448
34449
34450
34451
34452
34453
34454
34455
34456
34457
34458
34459
34460
34461
34462
34463
34464
34465
34466
34467
34468
34469
34470
34471
34472
34473
34474
34475
34476
34477
34478
34479
34480
34481
34482
34483
34484
34485
34486
34487
34488
34489
34490
34491
34492
34493
34494
34495
34496
34497
34498
34499
34500
34501
34502
34503
34504
34505
34506
34507
34508
34509
34510
34511
34512
34513
34514
34515
34516
34517
34518
34519
34520
34521
34522
34523
34524
34525
34526
34527
34528
34529
34530
34531
34532
34533
34534
34535
34536
34537
34538
34539
34540
34541
34542
34543
34544
34545
34546
34547
34548
34549
34550
34551
34552
34553
34554
34555
34556
34557
34558
34559
34560
34561
34562
34563
34564
34565
34566
34567
34568
34569
34570
34571
34572
34573
34574
34575
34576
34577
34578
34579
34580
34581
34582
34583
34584
34585
34586
34587
34588
34589
34590
34591
34592
34593
34594
34595
34596
34597
34598
34599
34600
34601
34602
34603
34604
34605
34606
34607
34608
34609
34610
34611
34612
34613
34614
34615
34616
34617
34618
34619
34620
34621
34622
34623
34624
34625
34626
34627
34628
34629
34630
34631
34632
34633
34634
34635
34636
34637
34638
34639
34640
34641
34642
34643
34644
34645
34646
34647
34648
34649
34650
34651
34652
34653
34654
34655
34656
34657
34658
34659
34660
34661
34662
34663
34664
34665
34666
34667
34668
34669
34670
34671
34672
34673
34674
34675
34676
34677
34678
34679
34680
34681
34682
34683
34684
34685
34686
34687
34688
34689
34690
34691
34692
34693
34694
34695
34696
34697
34698
34699
34700
34701
34702
34703
34704
34705
34706
34707
34708
34709
34710
34711
34712
34713
34714
34715
34716
34717
34718
34719
34720
34721
34722
34723
34724
34725
34726
34727
34728
34729
34730
34731
34732
34733
34734
34735
34736
34737
34738
34739
34740
34741
34742
34743
34744
34745
34746
34747
34748
34749
34750
34751
34752
34753
34754
34755
34756
34757
34758
34759
34760
34761
34762
34763
34764
34765
34766
34767
34768
34769
34770
34771
34772
34773
34774
34775
34776
34777
34778
34779
34780
34781
34782
34783
34784
34785
34786
34787
34788
34789
34790
34791
34792
34793
34794
34795
34796
34797
34798
34799
34800
34801
34802
34803
34804
34805
34806
34807
34808
34809
34810
34811
34812
34813
34814
34815
34816
34817
34818
34819
34820
34821
34822
34823
34824
34825
34826
34827
34828
34829
34830
34831
34832
34833
34834
34835
34836
34837
34838
34839
34840
34841
34842
34843
34844
34845
34846
34847
34848
34849
34850
34851
34852
34853
34854
34855
34856
34857
34858
34859
34860
34861
34862
34863
34864
34865
34866
34867
34868
34869
34870
34871
34872
34873
34874
34875
34876
34877
34878
34879
34880
34881
34882
34883
34884
34885
34886
34887
34888
34889
34890
34891
34892
34893
34894
34895
34896
34897
34898
34899
34900
34901
34902
34903
34904
34905
34906
34907
34908
34909
34910
34911
34912
34913
34914
34915
34916
34917
34918
34919
34920
34921
34922
34923
34924
34925
34926
34927
34928
34929
34930
34931
34932
34933
34934
34935
34936
34937
34938
34939
34940
34941
34942
34943
34944
34945
34946
34947
34948
34949
34950
34951
34952
34953
34954
34955
34956
34957
34958
34959
34960
34961
34962
34963
34964
34965
34966
34967
34968
34969
34970
34971
34972
34973
34974
34975
34976
34977
34978
34979
34980
34981
34982
34983
34984
34985
34986
34987
34988
34989
34990
34991
34992
34993
34994
34995
34996
34997
34998
34999
35000
35001
35002
35003
35004
35005
35006
35007
35008
35009
35010
35011
35012
35013
35014
35015
35016
35017
35018
35019
35020
35021
35022
35023
35024
35025
35026
35027
35028
35029
35030
35031
35032
35033
35034
35035
35036
35037
35038
35039
35040
35041
35042
35043
35044
35045
35046
35047
35048
35049
35050
35051
35052
35053
35054
35055
35056
35057
35058
35059
35060
35061
35062
35063
35064
35065
35066
35067
35068
35069
35070
35071
35072
35073
35074
35075
35076
35077
35078
35079
35080
35081
35082
35083
35084
35085
35086
35087
35088
35089
35090
35091
35092
35093
35094
35095
35096
35097
35098
35099
35100
35101
35102
35103
35104
35105
35106
35107
35108
35109
35110
35111
35112
35113
35114
35115
35116
35117
35118
35119
35120
35121
35122
35123
35124
35125
35126
35127
35128
35129
35130
35131
35132
35133
35134
35135
35136
35137
35138
35139
35140
35141
35142
35143
35144
35145
35146
35147
35148
35149
35150
35151
35152
35153
35154
35155
35156
35157
35158
35159
35160
35161
35162
35163
35164
35165
35166
35167
35168
35169
35170
35171
35172
35173
35174
35175
35176
35177
35178
35179
35180
35181
35182
35183
35184
35185
35186
35187
35188
35189
35190
35191
35192
35193
35194
35195
35196
35197
35198
35199
35200
35201
35202
35203
35204
35205
35206
35207
35208
35209
35210
35211
35212
35213
35214
35215
35216
35217
35218
35219
35220
35221
35222
35223
35224
35225
35226
35227
35228
35229
35230
35231
35232
35233
35234
35235
35236
35237
35238
35239
35240
35241
35242
35243
35244
35245
35246
35247
35248
35249
35250
35251
35252
35253
35254
35255
35256
35257
35258
35259
35260
35261
35262
35263
35264
35265
35266
35267
35268
35269
35270
35271
35272
35273
35274
35275
35276
35277
35278
35279
35280
35281
35282
35283
35284
35285
35286
35287
35288
35289
35290
35291
35292
35293
35294
35295
35296
35297
35298
35299
35300
35301
35302
35303
35304
35305
35306
35307
35308
35309
35310
35311
35312
35313
35314
35315
35316
35317
35318
35319
35320
35321
35322
35323
35324
35325
35326
35327
35328
35329
35330
35331
35332
35333
35334
35335
35336
35337
35338
35339
35340
35341
35342
35343
35344
35345
35346
35347
35348
35349
35350
35351
35352
35353
35354
35355
35356
35357
35358
35359
35360
35361
35362
35363
35364
35365
35366
35367
35368
35369
35370
35371
35372
35373
35374
35375
35376
35377
35378
35379
35380
35381
35382
35383
35384
35385
35386
35387
35388
35389
35390
35391
35392
35393
35394
35395
35396
35397
35398
35399
35400
35401
35402
35403
35404
35405
35406
35407
35408
35409
35410
35411
35412
35413
35414
35415
35416
35417
35418
35419
35420
35421
35422
35423
35424
35425
35426
35427
35428
35429
35430
35431
35432
35433
35434
35435
35436
35437
35438
35439
35440
35441
35442
35443
35444
35445
35446
35447
35448
35449
35450
35451
35452
35453
35454
35455
35456
35457
35458
35459
35460
35461
35462
35463
35464
35465
35466
35467
35468
35469
35470
35471
35472
35473
35474
35475
35476
35477
35478
35479
35480
35481
35482
35483
35484
35485
35486
35487
35488
35489
35490
35491
35492
35493
35494
35495
35496
35497
35498
35499
35500
35501
35502
35503
35504
35505
35506
35507
35508
35509
35510
35511
35512
35513
35514
35515
35516
35517
35518
35519
35520
35521
35522
35523
35524
35525
35526
35527
35528
35529
35530
35531
35532
35533
35534
35535
35536
35537
35538
35539
35540
35541
35542
35543
35544
35545
35546
35547
35548
35549
35550
35551
35552
35553
35554
35555
35556
35557
35558
35559
35560
35561
35562
35563
35564
35565
35566
35567
35568
35569
35570
35571
35572
35573
35574
35575
35576
35577
35578
35579
35580
35581
35582
35583
35584
35585
35586
35587
35588
35589
35590
35591
35592
35593
35594
35595
35596
35597
35598
35599
35600
35601
35602
35603
35604
35605
35606
35607
35608
35609
35610
35611
35612
35613
35614
35615
35616
35617
35618
35619
35620
35621
35622
35623
35624
35625
35626
35627
35628
35629
35630
35631
35632
35633
35634
35635
35636
35637
35638
35639
35640
35641
35642
35643
35644
35645
35646
35647
35648
35649
35650
35651
35652
35653
35654
35655
35656
35657
35658
35659
35660
35661
35662
35663
35664
35665
35666
35667
35668
35669
35670
35671
35672
35673
35674
35675
35676
35677
35678
35679
35680
35681
35682
35683
35684
35685
35686
35687
35688
35689
35690
35691
35692
35693
35694
35695
35696
35697
35698
35699
35700
35701
35702
35703
35704
35705
35706
35707
35708
35709
35710
35711
35712
35713
35714
35715
35716
35717
35718
35719
35720
35721
35722
35723
35724
35725
35726
35727
35728
35729
35730
35731
35732
35733
35734
35735
35736
35737
35738
35739
35740
35741
35742
35743
35744
35745
35746
35747
35748
35749
35750
35751
35752
35753
35754
35755
35756
35757
35758
35759
35760
35761
35762
35763
35764
35765
35766
35767
35768
35769
35770
35771
35772
35773
35774
35775
35776
35777
35778
35779
35780
35781
35782
35783
35784
35785
35786
35787
35788
35789
35790
35791
35792
35793
35794
35795
35796
35797
35798
35799
35800
35801
35802
35803
35804
35805
35806
35807
35808
35809
35810
35811
35812
35813
35814
35815
35816
35817
35818
35819
35820
35821
35822
35823
35824
35825
35826
35827
35828
35829
35830
35831
35832
35833
35834
35835
35836
35837
35838
35839
35840
35841
35842
35843
35844
35845
35846
35847
35848
35849
35850
35851
35852
35853
35854
35855
35856
35857
35858
35859
35860
35861
35862
35863
35864
35865
35866
35867
35868
35869
35870
35871
35872
35873
35874
35875
35876
35877
35878
35879
35880
35881
35882
35883
35884
35885
35886
35887
35888
35889
35890
35891
35892
35893
35894
35895
35896
35897
35898
35899
35900
35901
35902
35903
35904
35905
35906
35907
35908
35909
35910
35911
35912
35913
35914
35915
35916
35917
35918
35919
35920
35921
35922
35923
35924
35925
35926
35927
35928
35929
35930
35931
35932
35933
35934
35935
35936
35937
35938
35939
35940
35941
35942
35943
35944
35945
35946
35947
35948
35949
35950
35951
35952
35953
35954
35955
35956
35957
35958
35959
35960
35961
35962
35963
35964
35965
35966
35967
35968
35969
35970
35971
35972
35973
35974
35975
35976
35977
35978
35979
35980
35981
35982
35983
35984
35985
35986
35987
35988
35989
35990
35991
35992
35993
35994
35995
35996
35997
35998
35999
36000
36001
36002
36003
36004
36005
36006
36007
36008
36009
36010
36011
36012
36013
36014
36015
36016
36017
36018
36019
36020
36021
36022
36023
36024
36025
36026
36027
36028
36029
36030
36031
36032
36033
36034
36035
36036
36037
36038
36039
36040
36041
36042
36043
36044
36045
36046
36047
36048
36049
36050
36051
36052
36053
36054
36055
36056
36057
36058
36059
36060
36061
36062
36063
36064
36065
36066
36067
36068
36069
36070
36071
36072
36073
36074
36075
36076
36077
36078
36079
36080
36081
36082
36083
36084
36085
36086
36087
36088
36089
36090
36091
36092
36093
36094
36095
36096
36097
36098
36099
36100
36101
36102
36103
36104
36105
36106
36107
36108
36109
36110
36111
36112
36113
36114
36115
36116
36117
36118
36119
36120
36121
36122
36123
36124
36125
36126
36127
36128
36129
36130
36131
36132
36133
36134
36135
36136
36137
36138
36139
36140
36141
36142
36143
36144
36145
36146
36147
36148
36149
36150
36151
36152
36153
36154
36155
36156
36157
36158
36159
36160
36161
36162
36163
36164
36165
36166
36167
36168
36169
36170
36171
36172
36173
36174
36175
36176
36177
36178
36179
36180
36181
36182
36183
36184
36185
36186
36187
36188
36189
36190
36191
36192
36193
36194
36195
36196
36197
36198
36199
36200
36201
36202
36203
36204
36205
36206
36207
36208
36209
36210
36211
36212
36213
36214
36215
36216
36217
36218
36219
36220
36221
36222
36223
36224
36225
36226
36227
36228
36229
36230
36231
36232
36233
36234
36235
36236
36237
36238
36239
36240
36241
36242
36243
36244
36245
36246
36247
36248
36249
36250
36251
36252
36253
36254
36255
36256
36257
36258
36259
36260
36261
36262
36263
36264
36265
36266
36267
36268
36269
36270
36271
36272
36273
36274
36275
36276
36277
36278
36279
36280
36281
36282
36283
36284
36285
36286
36287
36288
36289
36290
36291
36292
36293
36294
36295
36296
36297
36298
36299
36300
36301
36302
36303
36304
36305
36306
36307
36308
36309
36310
36311
36312
36313
36314
36315
36316
36317
36318
36319
36320
36321
36322
36323
36324
36325
36326
36327
36328
36329
36330
36331
36332
36333
36334
36335
36336
36337
36338
36339
36340
36341
36342
36343
36344
36345
36346
36347
36348
36349
36350
36351
36352
36353
36354
36355
36356
36357
36358
36359
36360
36361
36362
36363
36364
36365
36366
36367
36368
36369
36370
36371
36372
36373
36374
36375
36376
36377
36378
36379
36380
36381
36382
36383
36384
36385
36386
36387
36388
36389
36390
36391
36392
36393
36394
36395
36396
36397
36398
36399
36400
36401
36402
36403
36404
36405
36406
36407
36408
36409
36410
36411
36412
36413
36414
36415
36416
36417
36418
36419
36420
36421
36422
36423
36424
36425
36426
36427
36428
36429
36430
36431
36432
36433
36434
36435
36436
36437
36438
36439
36440
36441
36442
36443
36444
36445
36446
36447
36448
36449
36450
36451
36452
36453
36454
36455
36456
36457
36458
36459
36460
36461
36462
36463
36464
36465
36466
36467
36468
36469
36470
36471
36472
36473
36474
36475
36476
36477
36478
36479
36480
36481
36482
36483
36484
36485
36486
36487
36488
36489
36490
36491
36492
36493
36494
36495
36496
36497
36498
36499
36500
36501
36502
36503
36504
36505
36506
36507
36508
36509
36510
36511
36512
36513
36514
36515
36516
36517
36518
36519
36520
36521
36522
36523
36524
36525
36526
36527
36528
36529
36530
36531
36532
36533
36534
36535
36536
36537
36538
36539
36540
36541
36542
36543
36544
36545
36546
36547
36548
36549
36550
36551
36552
36553
36554
36555
36556
36557
36558
36559
36560
36561
36562
36563
36564
36565
36566
36567
36568
36569
36570
36571
36572
36573
36574
36575
36576
36577
36578
36579
36580
36581
36582
36583
36584
36585
36586
36587
36588
36589
36590
36591
36592
36593
36594
36595
36596
36597
36598
36599
36600
36601
36602
36603
36604
36605
36606
36607
36608
36609
36610
36611
36612
36613
36614
36615
36616
36617
36618
36619
36620
36621
36622
36623
36624
36625
36626
36627
36628
36629
36630
36631
36632
36633
36634
36635
36636
36637
36638
36639
36640
36641
36642
36643
36644
36645
36646
36647
36648
36649
36650
36651
36652
36653
36654
36655
36656
36657
36658
36659
36660
36661
36662
36663
36664
36665
36666
36667
36668
36669
36670
36671
36672
36673
36674
36675
36676
36677
36678
36679
36680
36681
36682
36683
36684
36685
36686
36687
36688
36689
36690
36691
36692
36693
36694
36695
36696
36697
36698
36699
36700
36701
36702
36703
36704
36705
36706
36707
36708
36709
36710
36711
36712
36713
36714
36715
36716
36717
36718
36719
36720
36721
36722
36723
36724
36725
36726
36727
36728
36729
36730
36731
36732
36733
36734
36735
36736
36737
36738
36739
36740
36741
36742
36743
36744
36745
36746
36747
36748
36749
36750
36751
36752
36753
36754
36755
36756
36757
36758
36759
36760
36761
36762
36763
36764
36765
36766
36767
36768
36769
36770
36771
36772
36773
36774
36775
36776
36777
36778
36779
36780
36781
36782
36783
36784
36785
36786
36787
36788
36789
36790
36791
36792
36793
36794
36795
36796
36797
36798
36799
36800
36801
36802
36803
36804
36805
36806
36807
36808
36809
36810
36811
36812
36813
36814
36815
36816
36817
36818
36819
36820
36821
36822
36823
36824
36825
36826
36827
36828
36829
36830
36831
36832
36833
36834
36835
36836
36837
36838
36839
36840
36841
36842
36843
36844
36845
36846
36847
36848
36849
36850
36851
36852
36853
36854
36855
36856
36857
36858
36859
36860
36861
36862
36863
36864
36865
36866
36867
36868
36869
36870
36871
36872
36873
36874
36875
36876
36877
36878
36879
36880
36881
36882
36883
36884
36885
36886
36887
36888
36889
36890
36891
36892
36893
36894
36895
36896
36897
36898
36899
36900
36901
36902
36903
36904
36905
36906
36907
36908
36909
36910
36911
36912
36913
36914
36915
36916
36917
36918
36919
36920
36921
36922
36923
36924
36925
36926
36927
36928
36929
36930
36931
36932
36933
36934
36935
36936
36937
36938
36939
36940
36941
36942
36943
36944
36945
36946
36947
36948
36949
36950
36951
36952
36953
36954
36955
36956
36957
36958
36959
36960
36961
36962
36963
36964
36965
36966
36967
36968
36969
36970
36971
36972
36973
36974
36975
36976
36977
36978
36979
36980
36981
36982
36983
36984
36985
36986
36987
36988
36989
36990
36991
36992
36993
36994
36995
36996
36997
36998
36999
37000
37001
37002
37003
37004
37005
37006
37007
37008
37009
37010
37011
37012
37013
37014
37015
37016
37017
37018
37019
37020
37021
37022
37023
37024
37025
37026
37027
37028
37029
37030
37031
37032
37033
37034
37035
37036
37037
37038
37039
37040
37041
37042
37043
37044
37045
37046
37047
37048
37049
37050
37051
37052
37053
37054
37055
37056
37057
37058
37059
37060
37061
37062
37063
37064
37065
37066
37067
37068
37069
37070
37071
37072
37073
37074
37075
37076
37077
37078
37079
37080
37081
37082
37083
37084
37085
37086
37087
37088
37089
37090
37091
37092
37093
37094
37095
37096
37097
37098
37099
37100
37101
37102
37103
37104
37105
37106
37107
37108
37109
37110
37111
37112
37113
37114
37115
37116
37117
37118
37119
37120
37121
37122
37123
37124
37125
37126
37127
37128
37129
37130
37131
37132
37133
37134
37135
37136
37137
37138
37139
37140
37141
37142
37143
37144
37145
37146
37147
37148
37149
37150
37151
37152
37153
37154
37155
37156
37157
37158
37159
37160
37161
37162
37163
37164
37165
37166
37167
37168
37169
37170
37171
37172
37173
37174
37175
37176
37177
37178
37179
37180
37181
37182
37183
37184
37185
37186
37187
37188
37189
37190
37191
37192
37193
37194
37195
37196
37197
37198
37199
37200
37201
37202
37203
37204
37205
37206
37207
37208
37209
37210
37211
37212
37213
37214
37215
37216
37217
37218
37219
37220
37221
37222
37223
37224
37225
37226
37227
37228
37229
37230
37231
37232
37233
37234
37235
37236
37237
37238
37239
37240
37241
37242
37243
37244
37245
37246
37247
37248
37249
37250
37251
37252
37253
37254
37255
37256
37257
37258
37259
37260
37261
37262
37263
37264
37265
37266
37267
37268
37269
37270
37271
37272
37273
37274
37275
37276
37277
37278
37279
37280
37281
37282
37283
37284
37285
37286
37287
37288
37289
37290
37291
37292
37293
37294
37295
37296
37297
37298
37299
37300
37301
37302
37303
37304
37305
37306
37307
37308
37309
37310
37311
37312
37313
37314
37315
37316
37317
37318
37319
37320
37321
37322
37323
37324
37325
37326
37327
37328
37329
37330
37331
37332
37333
37334
37335
37336
37337
37338
37339
37340
37341
37342
37343
37344
37345
37346
37347
37348
37349
37350
37351
37352
37353
37354
37355
37356
37357
37358
37359
37360
37361
37362
37363
37364
37365
37366
37367
37368
37369
37370
37371
37372
37373
37374
37375
37376
37377
37378
37379
37380
37381
37382
37383
37384
37385
37386
37387
37388
37389
37390
37391
37392
37393
37394
37395
37396
37397
37398
37399
37400
37401
37402
37403
37404
37405
37406
37407
37408
37409
37410
37411
37412
37413
37414
37415
37416
37417
37418
37419
37420
37421
37422
37423
37424
37425
37426
37427
37428
37429
37430
37431
37432
37433
37434
37435
37436
37437
37438
37439
37440
37441
37442
37443
37444
37445
37446
37447
37448
37449
37450
37451
37452
37453
37454
37455
37456
37457
37458
37459
37460
37461
37462
37463
37464
37465
37466
37467
37468
37469
37470
37471
37472
37473
37474
37475
37476
37477
37478
37479
37480
37481
37482
37483
37484
37485
37486
37487
37488
37489
37490
37491
37492
37493
37494
37495
37496
37497
37498
37499
37500
37501
37502
37503
37504
37505
37506
37507
37508
37509
37510
37511
37512
37513
37514
37515
37516
37517
37518
37519
37520
37521
37522
37523
37524
37525
37526
37527
37528
37529
37530
37531
37532
37533
37534
37535
37536
37537
37538
37539
37540
37541
37542
37543
37544
37545
37546
37547
37548
37549
37550
37551
37552
37553
37554
37555
37556
37557
37558
37559
37560
37561
37562
37563
37564
37565
37566
37567
37568
37569
37570
37571
37572
37573
37574
37575
37576
37577
37578
37579
37580
37581
37582
37583
37584
37585
37586
37587
37588
37589
37590
37591
37592
37593
37594
37595
37596
37597
37598
37599
37600
37601
37602
37603
37604
37605
37606
37607
37608
37609
37610
37611
37612
37613
37614
37615
37616
37617
37618
37619
37620
37621
37622
37623
37624
37625
37626
37627
37628
37629
37630
37631
37632
37633
37634
37635
37636
37637
37638
37639
37640
37641
37642
37643
37644
37645
37646
37647
37648
37649
37650
37651
37652
37653
37654
37655
37656
37657
37658
37659
37660
37661
37662
37663
37664
37665
37666
37667
37668
37669
37670
37671
37672
37673
37674
37675
37676
37677
37678
37679
37680
37681
37682
37683
37684
37685
37686
37687
37688
37689
37690
37691
37692
37693
37694
37695
37696
37697
37698
37699
37700
37701
37702
37703
37704
37705
37706
37707
37708
37709
37710
37711
37712
37713
37714
37715
37716
37717
37718
37719
37720
37721
37722
37723
37724
37725
37726
37727
37728
37729
37730
37731
37732
37733
37734
37735
37736
37737
37738
37739
37740
37741
37742
37743
37744
37745
37746
37747
37748
37749
37750
37751
37752
37753
37754
37755
37756
37757
37758
37759
37760
37761
37762
37763
37764
37765
37766
37767
37768
37769
37770
37771
37772
37773
37774
37775
37776
37777
37778
37779
37780
37781
37782
37783
37784
37785
37786
37787
37788
37789
37790
37791
37792
37793
37794
37795
37796
37797
37798
37799
37800
37801
37802
37803
37804
37805
37806
37807
37808
37809
37810
37811
37812
37813
37814
37815
37816
37817
37818
37819
37820
37821
37822
37823
37824
37825
37826
37827
37828
37829
37830
37831
37832
37833
37834
37835
37836
37837
37838
37839
37840
37841
37842
37843
37844
37845
37846
37847
37848
37849
37850
37851
37852
37853
37854
37855
37856
37857
37858
37859
37860
37861
37862
37863
37864
37865
37866
37867
37868
37869
37870
37871
37872
37873
37874
37875
37876
37877
37878
37879
37880
37881
37882
37883
37884
37885
37886
37887
37888
37889
37890
37891
37892
37893
37894
37895
37896
37897
37898
37899
37900
37901
37902
37903
37904
37905
37906
37907
37908
37909
37910
37911
37912
37913
37914
37915
37916
37917
37918
37919
37920
37921
37922
37923
37924
37925
37926
37927
37928
37929
37930
37931
37932
37933
37934
37935
37936
37937
37938
37939
37940
37941
37942
37943
37944
37945
37946
37947
37948
37949
37950
37951
37952
37953
37954
37955
37956
37957
37958
37959
37960
37961
37962
37963
37964
37965
37966
37967
37968
37969
37970
37971
37972
37973
37974
37975
37976
37977
37978
37979
37980
37981
37982
37983
37984
37985
37986
37987
37988
37989
37990
37991
37992
37993
37994
37995
37996
37997
37998
37999
38000
38001
38002
38003
38004
38005
38006
38007
38008
38009
38010
38011
38012
38013
38014
38015
38016
38017
38018
38019
38020
38021
38022
38023
38024
38025
38026
38027
38028
38029
38030
38031
38032
38033
38034
38035
38036
38037
38038
38039
38040
38041
38042
38043
38044
38045
38046
38047
38048
38049
38050
38051
38052
38053
38054
38055
38056
38057
38058
38059
38060
38061
38062
38063
38064
38065
38066
38067
38068
38069
38070
38071
38072
38073
38074
38075
38076
38077
38078
38079
38080
38081
38082
38083
38084
38085
38086
38087
38088
38089
38090
38091
38092
38093
38094
38095
38096
38097
38098
38099
38100
38101
38102
38103
38104
38105
38106
38107
38108
38109
38110
38111
38112
38113
38114
38115
38116
38117
38118
38119
38120
38121
38122
38123
38124
38125
38126
38127
38128
38129
38130
38131
38132
38133
38134
38135
38136
38137
38138
38139
38140
38141
38142
38143
38144
38145
38146
38147
38148
38149
38150
38151
38152
38153
38154
38155
38156
38157
38158
38159
38160
38161
38162
38163
38164
38165
38166
38167
38168
38169
38170
38171
38172
38173
38174
38175
38176
38177
38178
38179
38180
38181
38182
38183
38184
38185
38186
38187
38188
38189
38190
38191
38192
38193
38194
38195
38196
38197
38198
38199
38200
38201
38202
38203
38204
38205
38206
38207
38208
38209
38210
38211
38212
38213
38214
38215
38216
38217
38218
38219
38220
38221
38222
38223
38224
38225
38226
38227
38228
38229
38230
38231
38232
38233
38234
38235
38236
38237
38238
38239
38240
38241
38242
38243
38244
38245
38246
38247
38248
38249
38250
38251
38252
38253
38254
38255
38256
38257
38258
38259
38260
38261
38262
38263
38264
38265
38266
38267
38268
38269
38270
38271
38272
38273
38274
38275
38276
38277
38278
38279
38280
38281
38282
38283
38284
38285
38286
38287
38288
38289
38290
38291
38292
38293
38294
38295
38296
38297
38298
38299
38300
38301
38302
38303
38304
38305
38306
38307
38308
38309
38310
38311
38312
38313
38314
38315
38316
38317
38318
38319
38320
38321
38322
38323
38324
38325
38326
38327
38328
38329
38330
38331
38332
38333
38334
38335
38336
38337
38338
38339
38340
38341
38342
38343
38344
38345
38346
38347
38348
38349
38350
38351
38352
38353
38354
38355
38356
38357
38358
38359
38360
38361
38362
38363
38364
38365
38366
38367
38368
38369
38370
38371
38372
38373
38374
38375
38376
38377
38378
38379
38380
38381
38382
38383
38384
38385
38386
38387
38388
38389
38390
38391
38392
38393
38394
38395
38396
38397
38398
38399
38400
38401
38402
38403
38404
38405
38406
38407
38408
38409
38410
38411
38412
38413
38414
38415
38416
38417
38418
38419
38420
38421
38422
38423
38424
38425
38426
38427
38428
38429
38430
38431
38432
38433
38434
38435
38436
38437
38438
38439
38440
38441
38442
38443
38444
38445
38446
38447
38448
38449
38450
38451
38452
38453
38454
38455
38456
38457
38458
38459
38460
38461
38462
38463
38464
38465
38466
38467
38468
38469
38470
38471
38472
38473
38474
38475
38476
38477
38478
38479
38480
38481
38482
38483
38484
38485
38486
38487
38488
38489
38490
38491
38492
38493
38494
38495
38496
38497
38498
38499
38500
38501
38502
38503
38504
38505
38506
38507
38508
38509
38510
38511
38512
38513
38514
38515
38516
38517
38518
38519
38520
38521
38522
38523
38524
38525
38526
38527
38528
38529
38530
38531
38532
38533
38534
38535
38536
38537
38538
38539
38540
38541
38542
38543
38544
38545
38546
38547
38548
38549
38550
38551
38552
38553
38554
38555
38556
38557
38558
38559
38560
38561
38562
38563
38564
38565
38566
38567
38568
38569
38570
38571
38572
38573
38574
38575
38576
38577
38578
38579
38580
38581
38582
38583
38584
38585
38586
38587
38588
38589
38590
38591
38592
38593
38594
38595
38596
38597
38598
38599
38600
38601
38602
38603
38604
38605
38606
38607
38608
38609
38610
38611
38612
38613
38614
38615
38616
38617
38618
38619
38620
38621
38622
38623
38624
38625
38626
38627
38628
38629
38630
38631
38632
38633
38634
38635
38636
38637
38638
38639
38640
38641
38642
38643
38644
38645
38646
38647
38648
38649
38650
38651
38652
38653
38654
38655
38656
38657
38658
38659
38660
38661
38662
38663
38664
38665
38666
38667
38668
38669
38670
38671
38672
38673
38674
38675
38676
38677
38678
38679
38680
38681
38682
38683
38684
38685
38686
38687
38688
38689
38690
38691
38692
38693
38694
38695
38696
38697
38698
38699
38700
38701
38702
38703
38704
38705
38706
38707
38708
38709
38710
38711
38712
38713
38714
38715
38716
38717
38718
38719
38720
38721
38722
38723
38724
38725
38726
38727
38728
38729
38730
38731
38732
38733
38734
38735
38736
38737
38738
38739
38740
38741
38742
38743
38744
38745
38746
38747
38748
38749
38750
38751
38752
38753
38754
38755
38756
38757
38758
38759
38760
38761
38762
38763
38764
38765
38766
38767
38768
38769
38770
38771
38772
38773
38774
38775
38776
38777
38778
38779
38780
38781
38782
38783
38784
38785
38786
38787
38788
38789
38790
38791
38792
38793
38794
38795
38796
38797
38798
38799
38800
38801
38802
38803
38804
38805
38806
38807
38808
38809
38810
38811
38812
38813
38814
38815
38816
38817
38818
38819
38820
38821
38822
38823
38824
38825
38826
38827
38828
38829
38830
38831
38832
38833
38834
38835
38836
38837
38838
38839
38840
38841
38842
38843
38844
38845
38846
38847
38848
38849
38850
38851
38852
38853
38854
38855
38856
38857
38858
38859
38860
38861
38862
38863
38864
38865
38866
38867
38868
38869
38870
38871
38872
38873
38874
38875
38876
38877
38878
38879
38880
38881
38882
38883
38884
38885
38886
38887
38888
38889
38890
38891
38892
38893
38894
38895
38896
38897
38898
38899
38900
38901
38902
38903
38904
38905
38906
38907
38908
38909
38910
38911
38912
38913
38914
38915
38916
38917
38918
38919
38920
38921
38922
38923
38924
38925
38926
38927
38928
38929
38930
38931
38932
38933
38934
38935
38936
38937
38938
38939
38940
38941
38942
38943
38944
38945
38946
38947
38948
38949
38950
38951
38952
38953
38954
38955
38956
38957
38958
38959
38960
38961
38962
38963
38964
38965
38966
38967
38968
38969
38970
38971
38972
38973
38974
38975
38976
38977
38978
38979
38980
38981
38982
38983
38984
38985
38986
38987
38988
38989
38990
38991
38992
38993
38994
38995
38996
38997
38998
38999
39000
39001
39002
39003
39004
39005
39006
39007
39008
39009
39010
39011
39012
39013
39014
39015
39016
39017
39018
39019
39020
39021
39022
39023
39024
39025
39026
39027
39028
39029
39030
39031
39032
39033
39034
39035
39036
39037
39038
39039
39040
39041
39042
39043
39044
39045
39046
39047
39048
39049
39050
39051
39052
39053
39054
39055
39056
39057
39058
39059
39060
39061
39062
39063
39064
39065
39066
39067
39068
39069
39070
39071
39072
39073
39074
39075
39076
39077
39078
39079
39080
39081
39082
39083
39084
39085
39086
39087
39088
39089
39090
39091
39092
39093
39094
39095
39096
39097
39098
39099
39100
39101
39102
39103
39104
39105
39106
39107
39108
39109
39110
39111
39112
39113
39114
39115
39116
39117
39118
39119
39120
39121
39122
39123
39124
39125
39126
39127
39128
39129
39130
39131
39132
39133
39134
39135
39136
39137
39138
39139
39140
39141
39142
39143
39144
39145
39146
39147
39148
39149
39150
39151
39152
39153
39154
39155
39156
39157
39158
39159
39160
39161
39162
39163
39164
39165
39166
39167
39168
39169
39170
39171
39172
39173
39174
39175
39176
39177
39178
39179
39180
39181
39182
39183
39184
39185
39186
39187
39188
39189
39190
39191
39192
39193
39194
39195
39196
39197
39198
39199
39200
39201
39202
39203
39204
39205
39206
39207
39208
39209
39210
39211
39212
39213
39214
39215
39216
39217
39218
39219
39220
39221
39222
39223
39224
39225
39226
39227
39228
39229
39230
39231
39232
39233
39234
39235
39236
39237
39238
39239
39240
39241
39242
39243
39244
39245
39246
39247
39248
39249
39250
39251
39252
39253
39254
39255
39256
39257
39258
39259
39260
39261
39262
39263
39264
39265
39266
39267
39268
39269
39270
39271
39272
39273
39274
39275
39276
39277
39278
39279
39280
39281
39282
39283
39284
39285
39286
39287
39288
39289
39290
39291
39292
39293
39294
39295
39296
39297
39298
39299
39300
39301
39302
39303
39304
39305
39306
39307
39308
39309
39310
39311
39312
39313
39314
39315
39316
39317
39318
39319
39320
39321
39322
39323
39324
39325
39326
39327
39328
39329
39330
39331
39332
39333
39334
39335
39336
39337
39338
39339
39340
39341
39342
39343
39344
39345
39346
39347
39348
39349
39350
39351
39352
39353
39354
39355
39356
39357
39358
39359
39360
39361
39362
39363
39364
39365
39366
39367
39368
39369
39370
39371
39372
39373
39374
39375
39376
39377
39378
39379
39380
39381
39382
39383
39384
39385
39386
39387
39388
39389
39390
39391
39392
39393
39394
39395
39396
39397
39398
39399
39400
39401
39402
39403
39404
39405
39406
39407
39408
39409
39410
39411
39412
39413
39414
39415
39416
39417
39418
39419
39420
39421
39422
39423
39424
39425
39426
39427
39428
39429
39430
39431
39432
39433
39434
39435
39436
39437
39438
39439
39440
39441
39442
39443
39444
39445
39446
39447
39448
39449
39450
39451
39452
39453
39454
39455
39456
39457
39458
39459
39460
39461
39462
39463
39464
39465
39466
39467
39468
39469
39470
39471
39472
39473
39474
39475
39476
39477
39478
39479
39480
39481
39482
39483
39484
39485
39486
39487
39488
39489
39490
39491
39492
39493
39494
39495
39496
39497
39498
39499
39500
39501
39502
39503
39504
39505
39506
39507
39508
39509
39510
39511
39512
39513
39514
39515
39516
39517
39518
39519
39520
39521
39522
39523
39524
39525
39526
39527
39528
39529
39530
39531
39532
39533
39534
39535
39536
39537
39538
39539
39540
39541
39542
39543
39544
39545
39546
39547
39548
39549
39550
39551
39552
39553
39554
39555
39556
39557
39558
39559
39560
39561
39562
39563
39564
39565
39566
39567
39568
39569
39570
39571
39572
39573
39574
39575
39576
39577
39578
39579
39580
39581
39582
39583
39584
39585
39586
39587
39588
39589
39590
39591
39592
39593
39594
39595
39596
39597
39598
39599
39600
39601
39602
39603
39604
39605
39606
39607
39608
39609
39610
39611
39612
39613
39614
39615
39616
39617
39618
39619
39620
39621
39622
39623
39624
39625
39626
39627
39628
39629
39630
39631
39632
39633
39634
39635
39636
39637
39638
39639
39640
39641
39642
39643
39644
39645
39646
39647
39648
39649
39650
39651
39652
39653
39654
39655
39656
39657
39658
39659
39660
39661
39662
39663
39664
39665
39666
39667
39668
39669
39670
39671
39672
39673
39674
39675
39676
39677
39678
39679
39680
39681
39682
39683
39684
39685
39686
39687
39688
39689
39690
39691
39692
39693
39694
39695
39696
39697
39698
39699
39700
39701
39702
39703
39704
39705
39706
39707
39708
39709
39710
39711
39712
39713
39714
39715
39716
39717
39718
39719
39720
39721
39722
39723
39724
39725
39726
39727
39728
39729
39730
39731
39732
39733
39734
39735
39736
39737
39738
39739
39740
39741
39742
39743
39744
39745
39746
39747
39748
39749
39750
39751
39752
39753
39754
39755
39756
39757
39758
39759
39760
39761
39762
39763
39764
39765
39766
39767
39768
39769
39770
39771
39772
39773
39774
39775
39776
39777
39778
39779
39780
39781
39782
39783
39784
39785
39786
39787
39788
39789
39790
39791
39792
39793
39794
39795
39796
39797
39798
39799
39800
39801
39802
39803
39804
39805
39806
39807
39808
39809
39810
39811
39812
39813
39814
39815
39816
39817
39818
39819
39820
39821
39822
39823
39824
39825
39826
39827
39828
39829
39830
39831
39832
39833
39834
39835
39836
39837
39838
39839
39840
39841
39842
39843
39844
39845
39846
39847
39848
39849
39850
39851
39852
39853
39854
39855
39856
39857
39858
39859
39860
39861
39862
39863
39864
39865
39866
39867
39868
39869
39870
39871
39872
39873
39874
39875
39876
39877
39878
39879
39880
39881
39882
39883
39884
39885
39886
39887
39888
39889
39890
39891
39892
39893
39894
39895
39896
39897
39898
39899
39900
39901
39902
39903
39904
39905
39906
39907
39908
39909
39910
39911
39912
39913
39914
39915
39916
39917
39918
39919
39920
39921
39922
39923
39924
39925
39926
39927
39928
39929
39930
39931
39932
39933
39934
39935
39936
39937
39938
39939
39940
39941
39942
39943
39944
39945
39946
39947
39948
39949
39950
39951
39952
39953
39954
39955
39956
39957
39958
39959
39960
39961
39962
39963
39964
39965
39966
39967
39968
39969
39970
39971
39972
39973
39974
39975
39976
39977
39978
39979
39980
39981
39982
39983
39984
39985
39986
39987
39988
39989
39990
39991
39992
39993
39994
39995
39996
39997
39998
39999
40000
40001
40002
40003
40004
40005
40006
40007
40008
40009
40010
40011
40012
40013
40014
40015
40016
40017
40018
40019
40020
40021
40022
40023
40024
40025
40026
40027
40028
40029
40030
40031
40032
40033
40034
40035
40036
40037
40038
40039
40040
40041
40042
40043
40044
40045
40046
40047
40048
40049
40050
40051
40052
40053
40054
40055
40056
40057
40058
40059
40060
40061
40062
40063
40064
40065
40066
40067
40068
40069
40070
40071
40072
40073
40074
40075
40076
40077
40078
40079
40080
40081
40082
40083
40084
40085
40086
40087
40088
40089
40090
40091
40092
40093
40094
40095
40096
40097
40098
40099
40100
40101
40102
40103
40104
40105
40106
40107
40108
40109
40110
40111
40112
40113
40114
40115
40116
40117
40118
40119
40120
40121
40122
40123
40124
40125
40126
40127
40128
40129
40130
40131
40132
40133
40134
40135
40136
40137
40138
40139
40140
40141
40142
40143
40144
40145
40146
40147
40148
40149
40150
40151
40152
40153
40154
40155
40156
40157
40158
40159
40160
40161
40162
40163
40164
40165
40166
40167
40168
40169
40170
40171
40172
40173
40174
40175
40176
40177
40178
40179
40180
40181
40182
40183
40184
40185
40186
40187
40188
40189
40190
40191
40192
40193
40194
40195
40196
40197
40198
40199
40200
40201
40202
40203
40204
40205
40206
40207
40208
40209
40210
40211
40212
40213
40214
40215
40216
40217
40218
40219
40220
40221
40222
40223
40224
40225
40226
40227
40228
40229
40230
40231
40232
40233
40234
40235
40236
40237
40238
40239
40240
40241
40242
40243
40244
40245
40246
40247
40248
40249
40250
40251
40252
40253
40254
40255
40256
40257
40258
40259
40260
40261
40262
40263
40264
40265
40266
40267
40268
40269
40270
40271
40272
40273
40274
40275
40276
40277
40278
40279
40280
40281
40282
40283
40284
40285
40286
40287
40288
40289
40290
40291
40292
40293
40294
40295
40296
40297
40298
40299
40300
40301
40302
40303
40304
40305
40306
40307
40308
40309
40310
40311
40312
40313
40314
40315
40316
40317
40318
40319
40320
40321
40322
40323
40324
40325
40326
40327
40328
40329
40330
40331
40332
40333
40334
40335
40336
40337
40338
40339
40340
40341
40342
40343
40344
40345
40346
40347
40348
40349
40350
40351
40352
40353
40354
40355
40356
40357
40358
40359
40360
40361
40362
40363
40364
40365
40366
40367
40368
40369
40370
40371
40372
40373
40374
40375
40376
40377
40378
40379
40380
40381
40382
40383
40384
40385
40386
40387
40388
40389
40390
40391
40392
40393
40394
40395
40396
40397
40398
40399
40400
40401
40402
40403
40404
40405
40406
40407
40408
40409
40410
40411
40412
40413
40414
40415
40416
40417
40418
40419
40420
40421
40422
40423
40424
40425
40426
40427
40428
40429
40430
40431
40432
40433
40434
40435
40436
40437
40438
40439
40440
40441
40442
40443
40444
40445
40446
40447
40448
40449
40450
40451
40452
40453
40454
40455
40456
40457
40458
40459
40460
40461
40462
40463
40464
40465
40466
40467
40468
40469
40470
40471
40472
40473
40474
40475
40476
40477
40478
40479
40480
40481
40482
40483
40484
40485
40486
40487
40488
40489
40490
40491
40492
40493
40494
40495
40496
40497
40498
40499
40500
40501
40502
40503
40504
40505
40506
40507
40508
40509
40510
40511
40512
40513
40514
40515
40516
40517
40518
40519
40520
40521
40522
40523
40524
40525
40526
40527
40528
40529
40530
40531
40532
40533
40534
40535
40536
40537
40538
40539
40540
40541
40542
40543
40544
40545
40546
40547
40548
40549
40550
40551
40552
40553
40554
40555
40556
40557
40558
40559
40560
40561
40562
40563
40564
40565
40566
40567
40568
40569
40570
40571
40572
40573
40574
40575
40576
40577
40578
40579
40580
40581
40582
40583
40584
40585
40586
40587
40588
40589
40590
40591
40592
40593
40594
40595
40596
40597
40598
40599
40600
40601
40602
40603
40604
40605
40606
40607
40608
40609
40610
40611
40612
40613
40614
40615
40616
40617
40618
40619
40620
40621
40622
40623
40624
40625
40626
40627
40628
40629
40630
40631
40632
40633
40634
40635
40636
40637
40638
40639
40640
40641
40642
40643
40644
40645
40646
40647
40648
40649
40650
40651
40652
40653
40654
40655
40656
40657
40658
40659
40660
40661
40662
40663
40664
40665
40666
40667
40668
40669
40670
40671
40672
40673
40674
40675
40676
40677
40678
40679
40680
40681
40682
40683
40684
40685
40686
40687
40688
40689
40690
40691
40692
40693
40694
40695
40696
40697
40698
40699
40700
40701
40702
40703
40704
40705
40706
40707
40708
40709
40710
40711
40712
40713
40714
40715
40716
40717
40718
40719
40720
40721
40722
40723
40724
40725
40726
40727
40728
40729
40730
40731
40732
40733
40734
40735
40736
40737
40738
40739
40740
40741
40742
40743
40744
40745
40746
40747
40748
40749
40750
40751
40752
40753
40754
40755
40756
40757
40758
40759
40760
40761
40762
40763
40764
40765
40766
40767
40768
40769
40770
40771
40772
40773
40774
40775
40776
40777
40778
40779
40780
40781
40782
40783
40784
40785
40786
40787
40788
40789
40790
40791
40792
40793
40794
40795
40796
40797
40798
40799
40800
40801
40802
40803
40804
40805
40806
40807
40808
40809
40810
40811
40812
40813
40814
40815
40816
40817
40818
40819
40820
40821
40822
40823
40824
40825
40826
40827
40828
40829
40830
40831
40832
40833
40834
40835
40836
40837
40838
40839
40840
40841
40842
40843
40844
40845
40846
40847
40848
40849
40850
40851
40852
40853
40854
40855
40856
40857
40858
40859
40860
40861
40862
40863
40864
40865
40866
40867
40868
40869
40870
40871
40872
40873
40874
40875
40876
40877
40878
40879
40880
40881
40882
40883
40884
40885
40886
40887
40888
40889
40890
40891
40892
40893
40894
40895
40896
40897
40898
40899
40900
40901
40902
40903
40904
40905
40906
40907
40908
40909
40910
40911
40912
40913
40914
40915
40916
40917
40918
40919
40920
40921
40922
40923
40924
40925
40926
40927
40928
40929
40930
40931
40932
40933
40934
40935
40936
40937
40938
40939
40940
40941
40942
40943
40944
40945
40946
40947
40948
40949
40950
40951
40952
40953
40954
40955
40956
40957
40958
40959
40960
40961
40962
40963
40964
40965
40966
40967
40968
40969
40970
40971
40972
40973
40974
40975
40976
40977
40978
40979
40980
40981
40982
40983
40984
40985
40986
40987
40988
40989
40990
40991
40992
40993
40994
40995
40996
40997
40998
40999
41000
41001
41002
41003
41004
41005
41006
41007
41008
41009
41010
41011
41012
41013
41014
41015
41016
41017
41018
41019
41020
41021
41022
41023
41024
41025
41026
41027
41028
41029
41030
41031
41032
41033
41034
41035
41036
41037
41038
41039
41040
41041
41042
41043
41044
41045
41046
41047
41048
41049
41050
41051
41052
41053
41054
41055
41056
41057
41058
41059
41060
41061
41062
41063
41064
41065
41066
41067
41068
41069
41070
41071
41072
41073
41074
41075
41076
41077
41078
41079
41080
41081
41082
41083
41084
41085
41086
41087
41088
41089
41090
41091
41092
41093
41094
41095
41096
41097
41098
41099
41100
41101
41102
41103
41104
41105
41106
41107
41108
41109
41110
41111
41112
41113
41114
41115
41116
41117
41118
41119
41120
41121
41122
41123
41124
41125
41126
41127
41128
41129
41130
41131
41132
41133
41134
41135
41136
41137
41138
41139
41140
41141
41142
41143
41144
41145
41146
41147
41148
41149
41150
41151
41152
41153
41154
41155
41156
41157
41158
41159
41160
41161
41162
41163
41164
41165
41166
41167
41168
41169
41170
41171
41172
41173
41174
41175
41176
41177
41178
41179
41180
41181
41182
41183
41184
41185
41186
41187
41188
41189
41190
41191
41192
41193
41194
41195
41196
41197
41198
41199
41200
41201
41202
41203
41204
41205
41206
41207
41208
41209
41210
41211
41212
41213
41214
41215
41216
41217
41218
41219
41220
41221
41222
41223
41224
41225
41226
41227
41228
41229
41230
41231
41232
41233
41234
41235
41236
41237
41238
41239
41240
41241
41242
41243
41244
41245
41246
41247
41248
41249
41250
41251
41252
41253
41254
41255
41256
41257
41258
41259
41260
41261
41262
41263
41264
41265
41266
41267
41268
41269
41270
41271
41272
41273
41274
41275
41276
41277
41278
41279
41280
41281
41282
41283
41284
41285
41286
41287
41288
41289
41290
41291
41292
41293
41294
41295
41296
41297
41298
41299
41300
41301
41302
41303
41304
41305
41306
41307
41308
41309
41310
41311
41312
41313
41314
41315
41316
41317
41318
41319
41320
41321
41322
41323
41324
41325
41326
41327
41328
41329
41330
41331
41332
41333
41334
41335
41336
41337
41338
41339
41340
41341
41342
41343
41344
41345
41346
41347
41348
41349
41350
41351
41352
41353
41354
41355
41356
41357
41358
41359
41360
41361
41362
41363
41364
41365
41366
41367
41368
41369
41370
41371
41372
41373
41374
41375
41376
41377
41378
41379
41380
41381
41382
41383
41384
41385
41386
41387
41388
41389
41390
41391
41392
41393
41394
41395
41396
41397
41398
41399
41400
41401
41402
41403
41404
41405
41406
41407
41408
41409
41410
41411
41412
41413
41414
41415
41416
41417
41418
41419
41420
41421
41422
41423
41424
41425
41426
41427
41428
41429
41430
41431
41432
41433
41434
41435
41436
41437
41438
41439
41440
41441
41442
41443
41444
41445
41446
41447
41448
41449
41450
41451
41452
41453
41454
41455
41456
41457
41458
41459
41460
41461
41462
41463
41464
41465
41466
41467
41468
41469
41470
41471
41472
41473
41474
41475
41476
41477
41478
41479
41480
41481
41482
41483
41484
41485
41486
41487
41488
41489
41490
41491
41492
41493
41494
41495
41496
41497
41498
41499
41500
41501
41502
41503
41504
41505
41506
41507
41508
41509
41510
41511
41512
41513
41514
41515
41516
41517
41518
41519
41520
41521
41522
41523
41524
41525
41526
41527
41528
41529
41530
41531
41532
41533
41534
41535
41536
41537
41538
41539
41540
41541
41542
41543
41544
41545
41546
41547
41548
41549
41550
41551
41552
41553
41554
41555
41556
41557
41558
41559
41560
41561
41562
41563
41564
41565
41566
41567
41568
41569
41570
41571
41572
41573
41574
41575
41576
41577
41578
41579
41580
41581
41582
41583
41584
41585
41586
41587
41588
41589
41590
41591
41592
41593
41594
41595
41596
41597
41598
41599
41600
41601
41602
41603
41604
41605
41606
41607
41608
41609
41610
41611
41612
41613
41614
41615
41616
41617
41618
41619
41620
41621
41622
41623
41624
41625
41626
41627
41628
41629
41630
41631
41632
41633
41634
41635
41636
41637
41638
41639
41640
41641
41642
41643
41644
41645
41646
41647
41648
41649
41650
41651
41652
41653
41654
41655
41656
41657
41658
41659
41660
41661
41662
41663
41664
41665
41666
41667
41668
41669
41670
41671
41672
41673
41674
41675
41676
41677
41678
41679
41680
41681
41682
41683
41684
41685
41686
41687
41688
41689
41690
41691
41692
41693
41694
41695
41696
41697
41698
41699
41700
41701
41702
41703
41704
41705
41706
41707
41708
41709
41710
41711
41712
41713
41714
41715
41716
41717
41718
41719
41720
41721
41722
41723
41724
41725
41726
41727
41728
41729
41730
41731
41732
41733
41734
41735
41736
41737
41738
41739
41740
41741
41742
41743
41744
41745
41746
41747
41748
41749
41750
41751
41752
41753
41754
41755
41756
41757
41758
41759
41760
41761
41762
41763
41764
41765
41766
41767
41768
41769
41770
41771
41772
41773
41774
41775
41776
41777
41778
41779
41780
41781
41782
41783
41784
41785
41786
41787
41788
41789
41790
41791
41792
41793
41794
41795
41796
41797
41798
41799
41800
41801
41802
41803
41804
41805
41806
41807
41808
41809
41810
41811
41812
41813
41814
41815
41816
41817
41818
41819
41820
41821
41822
41823
41824
41825
41826
41827
41828
41829
41830
41831
41832
41833
41834
41835
41836
41837
41838
41839
41840
41841
41842
41843
41844
41845
41846
41847
41848
41849
41850
41851
41852
41853
41854
41855
41856
41857
41858
41859
41860
41861
41862
41863
41864
41865
41866
41867
41868
41869
41870
41871
41872
41873
41874
41875
41876
41877
41878
41879
41880
41881
41882
41883
41884
41885
41886
41887
41888
41889
41890
41891
41892
41893
41894
41895
41896
41897
41898
41899
41900
41901
41902
41903
41904
41905
41906
41907
41908
41909
41910
41911
41912
41913
41914
41915
41916
41917
41918
41919
41920
41921
41922
41923
41924
41925
41926
41927
41928
41929
41930
41931
41932
41933
41934
41935
41936
41937
41938
41939
41940
41941
41942
41943
41944
41945
41946
41947
41948
41949
41950
41951
41952
41953
41954
41955
41956
41957
41958
41959
41960
41961
41962
41963
41964
41965
41966
41967
41968
41969
41970
41971
41972
41973
41974
41975
41976
41977
41978
41979
41980
41981
41982
41983
41984
41985
41986
41987
41988
41989
41990
41991
41992
41993
41994
41995
41996
41997
41998
41999
42000
42001
42002
42003
42004
42005
42006
42007
42008
42009
42010
42011
42012
42013
42014
42015
42016
42017
42018
42019
42020
42021
42022
42023
42024
42025
42026
42027
42028
42029
42030
42031
42032
42033
42034
42035
42036
42037
42038
42039
42040
42041
42042
42043
42044
42045
42046
42047
42048
42049
42050
42051
42052
42053
42054
42055
42056
42057
42058
42059
42060
42061
42062
42063
42064
42065
42066
42067
42068
42069
42070
42071
42072
42073
42074
42075
42076
42077
42078
42079
42080
42081
42082
42083
42084
42085
42086
42087
42088
42089
42090
42091
42092
42093
42094
42095
42096
42097
42098
42099
42100
42101
42102
42103
42104
42105
42106
42107
42108
42109
42110
42111
42112
42113
42114
42115
42116
42117
42118
42119
42120
42121
42122
42123
42124
42125
42126
42127
42128
42129
42130
42131
42132
42133
42134
42135
42136
42137
42138
42139
42140
42141
42142
42143
42144
42145
42146
42147
42148
42149
42150
42151
42152
42153
42154
42155
42156
42157
42158
42159
42160
42161
42162
42163
42164
42165
42166
42167
42168
42169
42170
42171
42172
42173
42174
42175
42176
42177
42178
42179
42180
42181
42182
42183
42184
42185
42186
42187
42188
42189
42190
42191
42192
42193
42194
42195
42196
42197
42198
42199
42200
42201
42202
42203
42204
42205
42206
42207
42208
42209
42210
42211
42212
42213
42214
42215
42216
42217
42218
42219
42220
42221
42222
42223
42224
42225
42226
42227
42228
42229
42230
42231
42232
42233
42234
42235
42236
42237
42238
42239
42240
42241
42242
42243
42244
42245
42246
42247
42248
42249
42250
42251
42252
42253
42254
42255
42256
42257
42258
42259
42260
42261
42262
42263
42264
42265
42266
42267
42268
42269
42270
42271
42272
42273
42274
42275
42276
42277
42278
42279
42280
42281
42282
42283
42284
42285
42286
42287
42288
42289
42290
42291
42292
42293
42294
42295
42296
42297
42298
42299
42300
42301
42302
42303
42304
42305
42306
42307
42308
42309
42310
42311
42312
42313
42314
42315
42316
42317
42318
42319
42320
42321
42322
42323
42324
42325
42326
42327
42328
42329
42330
42331
42332
42333
42334
42335
42336
42337
42338
42339
42340
42341
42342
42343
42344
42345
42346
42347
42348
42349
42350
42351
42352
42353
42354
42355
42356
42357
42358
42359
42360
42361
42362
42363
42364
42365
42366
42367
42368
42369
42370
42371
42372
42373
42374
42375
42376
42377
42378
42379
42380
42381
42382
42383
42384
42385
42386
42387
42388
42389
42390
42391
42392
42393
42394
42395
42396
42397
42398
42399
42400
42401
42402
42403
42404
42405
42406
42407
42408
42409
42410
42411
42412
42413
42414
42415
42416
42417
42418
42419
42420
42421
42422
42423
42424
42425
42426
42427
42428
42429
42430
42431
42432
42433
42434
42435
42436
42437
42438
42439
42440
42441
42442
42443
42444
42445
42446
42447
42448
42449
42450
42451
42452
42453
42454
42455
42456
42457
42458
42459
42460
42461
42462
42463
42464
42465
42466
42467
42468
42469
42470
42471
42472
42473
42474
42475
42476
42477
42478
42479
42480
42481
42482
42483
42484
42485
42486
42487
42488
42489
42490
42491
42492
42493
42494
42495
42496
42497
42498
42499
42500
42501
42502
42503
42504
42505
42506
42507
42508
42509
42510
42511
42512
42513
42514
42515
42516
42517
42518
42519
42520
42521
42522
42523
42524
42525
42526
42527
42528
42529
42530
42531
42532
42533
42534
42535
42536
42537
42538
42539
42540
42541
42542
42543
42544
42545
42546
42547
42548
42549
42550
42551
42552
42553
42554
42555
42556
42557
42558
42559
42560
42561
42562
42563
42564
42565
42566
42567
42568
42569
42570
42571
42572
42573
42574
42575
42576
42577
42578
42579
42580
42581
42582
42583
42584
42585
42586
42587
42588
42589
42590
42591
42592
42593
42594
42595
42596
42597
42598
42599
42600
42601
42602
42603
42604
42605
42606
42607
42608
42609
42610
42611
42612
42613
42614
42615
42616
42617
42618
42619
42620
42621
42622
42623
42624
42625
42626
42627
42628
42629
42630
42631
42632
42633
42634
42635
42636
42637
42638
42639
42640
42641
42642
42643
42644
42645
42646
42647
42648
42649
42650
42651
42652
42653
42654
42655
42656
42657
42658
42659
42660
42661
42662
42663
42664
42665
42666
42667
42668
42669
42670
42671
42672
42673
42674
42675
42676
42677
42678
42679
42680
42681
42682
42683
42684
42685
42686
42687
42688
42689
42690
42691
42692
42693
42694
42695
42696
42697
42698
42699
42700
42701
42702
42703
42704
42705
42706
42707
42708
42709
42710
42711
42712
42713
42714
42715
42716
42717
42718
42719
42720
42721
42722
42723
42724
42725
42726
42727
42728
42729
42730
42731
42732
42733
42734
42735
42736
42737
42738
42739
42740
42741
42742
42743
42744
42745
42746
42747
42748
42749
42750
42751
42752
42753
42754
42755
42756
42757
42758
42759
42760
42761
42762
42763
42764
42765
42766
42767
42768
42769
42770
42771
42772
42773
42774
42775
42776
42777
42778
42779
42780
42781
42782
42783
42784
42785
42786
42787
42788
42789
42790
42791
42792
42793
42794
42795
42796
42797
42798
42799
42800
42801
42802
42803
42804
42805
42806
42807
42808
42809
42810
42811
42812
42813
42814
42815
42816
42817
42818
42819
42820
42821
42822
42823
42824
42825
42826
42827
42828
42829
42830
42831
42832
42833
42834
42835
42836
42837
42838
42839
42840
42841
42842
42843
42844
42845
42846
42847
42848
42849
42850
42851
42852
42853
42854
42855
42856
42857
42858
42859
42860
42861
42862
42863
42864
42865
42866
42867
42868
42869
42870
42871
42872
42873
42874
42875
42876
42877
42878
42879
42880
42881
42882
42883
42884
42885
42886
42887
42888
42889
42890
42891
42892
42893
42894
42895
42896
42897
42898
42899
42900
42901
42902
42903
42904
42905
42906
42907
42908
42909
42910
42911
42912
42913
42914
42915
42916
42917
42918
42919
42920
42921
42922
42923
42924
42925
42926
42927
42928
42929
42930
42931
42932
42933
42934
42935
42936
42937
42938
42939
42940
42941
42942
42943
42944
42945
42946
42947
42948
42949
42950
42951
42952
42953
42954
42955
42956
42957
42958
42959
42960
42961
42962
42963
42964
42965
42966
42967
42968
42969
42970
42971
42972
42973
42974
42975
42976
42977
42978
42979
42980
42981
42982
42983
42984
42985
42986
42987
42988
42989
42990
42991
42992
42993
42994
42995
42996
42997
42998
42999
43000
43001
43002
43003
43004
43005
43006
43007
43008
43009
43010
43011
43012
43013
43014
43015
43016
43017
43018
43019
43020
43021
43022
43023
43024
43025
43026
43027
43028
43029
43030
43031
43032
43033
43034
43035
43036
43037
43038
43039
43040
43041
43042
43043
43044
43045
43046
43047
43048
43049
43050
43051
43052
43053
43054
43055
43056
43057
43058
43059
43060
43061
43062
43063
43064
43065
43066
43067
43068
43069
43070
43071
43072
43073
43074
43075
43076
43077
43078
43079
43080
43081
43082
43083
43084
43085
43086
43087
43088
43089
43090
43091
43092
43093
43094
43095
43096
43097
43098
43099
43100
43101
43102
43103
43104
43105
43106
43107
43108
43109
43110
43111
43112
43113
43114
43115
43116
43117
43118
43119
43120
43121
43122
43123
43124
43125
43126
43127
43128
43129
43130
43131
43132
43133
43134
43135
43136
43137
43138
43139
43140
43141
43142
43143
43144
43145
43146
43147
43148
43149
43150
43151
43152
43153
43154
43155
43156
43157
43158
43159
43160
43161
43162
43163
43164
43165
43166
43167
43168
43169
43170
43171
43172
43173
43174
43175
43176
43177
43178
43179
43180
43181
43182
43183
43184
43185
43186
43187
43188
43189
43190
43191
43192
43193
43194
43195
43196
43197
43198
43199
43200
43201
43202
43203
43204
43205
43206
43207
43208
43209
43210
43211
43212
43213
43214
43215
43216
43217
43218
43219
43220
43221
43222
43223
43224
43225
43226
43227
43228
43229
43230
43231
43232
43233
43234
43235
43236
43237
43238
43239
43240
43241
43242
43243
43244
43245
43246
43247
43248
43249
43250
43251
43252
43253
43254
43255
43256
43257
43258
43259
43260
43261
43262
43263
43264
43265
43266
43267
43268
43269
43270
43271
43272
43273
43274
43275
43276
43277
43278
43279
43280
43281
43282
43283
43284
43285
43286
43287
43288
43289
43290
43291
43292
43293
43294
43295
43296
43297
43298
43299
43300
43301
43302
43303
43304
43305
43306
43307
43308
43309
43310
43311
43312
43313
43314
43315
43316
43317
43318
43319
43320
43321
43322
43323
43324
43325
43326
43327
43328
43329
43330
43331
43332
43333
43334
43335
43336
43337
43338
43339
43340
43341
43342
43343
43344
43345
43346
43347
43348
43349
43350
43351
43352
43353
43354
43355
43356
43357
43358
43359
43360
43361
43362
43363
43364
43365
43366
43367
43368
43369
43370
43371
43372
43373
43374
43375
43376
43377
43378
43379
43380
43381
43382
43383
43384
43385
43386
43387
43388
43389
43390
43391
43392
43393
43394
43395
43396
43397
43398
43399
43400
43401
43402
43403
43404
43405
43406
43407
43408
43409
43410
43411
43412
43413
43414
43415
43416
43417
43418
43419
43420
43421
43422
43423
43424
43425
43426
43427
43428
43429
43430
43431
43432
43433
43434
43435
43436
43437
43438
43439
43440
43441
43442
43443
43444
43445
43446
43447
43448
43449
43450
43451
43452
43453
43454
43455
43456
43457
43458
43459
43460
43461
43462
43463
43464
43465
43466
43467
43468
43469
43470
43471
43472
43473
43474
43475
43476
43477
43478
43479
43480
43481
43482
43483
43484
43485
43486
43487
43488
43489
43490
43491
43492
43493
43494
43495
43496
43497
43498
43499
43500
43501
43502
43503
43504
43505
43506
43507
43508
43509
43510
43511
43512
43513
43514
43515
43516
43517
43518
43519
43520
43521
43522
43523
43524
43525
43526
43527
43528
43529
43530
43531
43532
43533
43534
43535
43536
43537
43538
43539
43540
43541
43542
43543
43544
43545
43546
43547
43548
43549
43550
43551
43552
43553
43554
43555
43556
43557
43558
43559
43560
43561
43562
43563
43564
43565
43566
43567
43568
43569
43570
43571
43572
43573
43574
43575
43576
43577
43578
43579
43580
43581
43582
43583
43584
43585
43586
43587
43588
43589
43590
43591
43592
43593
43594
43595
43596
43597
43598
43599
43600
43601
43602
43603
43604
43605
43606
43607
43608
43609
43610
43611
43612
43613
43614
43615
43616
43617
43618
43619
43620
43621
43622
43623
43624
43625
43626
43627
43628
43629
43630
43631
43632
43633
43634
43635
43636
43637
43638
43639
43640
43641
43642
43643
43644
43645
43646
43647
43648
43649
43650
43651
43652
43653
43654
43655
43656
43657
43658
43659
43660
43661
43662
43663
43664
43665
43666
43667
43668
43669
43670
43671
43672
43673
43674
43675
43676
43677
43678
43679
43680
43681
43682
43683
43684
43685
43686
43687
43688
43689
43690
43691
43692
43693
43694
43695
43696
43697
43698
43699
43700
43701
43702
43703
43704
43705
43706
43707
43708
43709
43710
43711
43712
43713
43714
43715
43716
43717
43718
43719
43720
43721
43722
43723
43724
43725
43726
43727
43728
43729
43730
43731
43732
43733
43734
43735
43736
43737
43738
43739
43740
43741
43742
43743
43744
43745
43746
43747
43748
43749
43750
43751
43752
43753
43754
43755
43756
43757
43758
43759
43760
43761
43762
43763
43764
43765
43766
43767
43768
43769
43770
43771
43772
43773
43774
43775
43776
43777
43778
43779
43780
43781
43782
43783
43784
43785
43786
43787
43788
43789
43790
43791
43792
43793
43794
43795
43796
43797
43798
43799
43800
43801
43802
43803
43804
43805
43806
43807
43808
43809
43810
43811
43812
43813
43814
43815
43816
43817
43818
43819
43820
43821
43822
43823
43824
43825
43826
43827
43828
43829
43830
43831
43832
43833
43834
43835
43836
43837
43838
43839
43840
43841
43842
43843
43844
43845
43846
43847
43848
43849
43850
43851
43852
43853
43854
43855
43856
43857
43858
43859
43860
43861
43862
43863
43864
43865
43866
43867
43868
43869
43870
43871
43872
43873
43874
43875
43876
43877
43878
43879
43880
43881
43882
43883
43884
43885
43886
43887
43888
43889
43890
43891
43892
43893
43894
43895
43896
43897
43898
43899
43900
43901
43902
43903
43904
43905
43906
43907
43908
43909
43910
43911
43912
43913
43914
43915
43916
43917
43918
43919
43920
43921
43922
43923
43924
43925
43926
43927
43928
43929
43930
43931
43932
43933
43934
43935
43936
43937
43938
43939
43940
43941
43942
43943
43944
43945
43946
43947
43948
43949
43950
43951
43952
43953
43954
43955
43956
43957
43958
43959
43960
43961
43962
43963
43964
43965
43966
43967
43968
43969
43970
43971
43972
43973
43974
43975
43976
43977
43978
43979
43980
43981
43982
43983
43984
43985
43986
43987
43988
43989
43990
43991
43992
43993
43994
43995
43996
43997
43998
43999
44000
44001
44002
44003
44004
44005
44006
44007
44008
44009
44010
44011
44012
44013
44014
44015
44016
44017
44018
44019
44020
44021
44022
44023
44024
44025
44026
44027
44028
44029
44030
44031
44032
44033
44034
44035
44036
44037
44038
44039
44040
44041
44042
44043
44044
44045
44046
44047
44048
44049
44050
44051
44052
44053
44054
44055
44056
44057
44058
44059
44060
44061
44062
44063
44064
44065
44066
44067
44068
44069
44070
44071
44072
44073
44074
44075
44076
44077
44078
44079
44080
44081
44082
44083
44084
44085
44086
44087
44088
44089
44090
44091
44092
44093
44094
44095
44096
44097
44098
44099
44100
44101
44102
44103
44104
44105
44106
44107
44108
44109
44110
44111
44112
44113
44114
44115
44116
44117
44118
44119
44120
44121
44122
44123
44124
44125
44126
44127
44128
44129
44130
44131
44132
44133
44134
44135
44136
44137
44138
44139
44140
44141
44142
44143
44144
44145
44146
44147
44148
44149
44150
44151
44152
44153
44154
44155
44156
44157
44158
44159
44160
44161
44162
44163
44164
44165
44166
44167
44168
44169
44170
44171
44172
44173
44174
44175
44176
44177
44178
44179
44180
44181
44182
44183
44184
44185
44186
44187
44188
44189
44190
44191
44192
44193
44194
44195
44196
44197
44198
44199
44200
44201
44202
44203
44204
44205
44206
44207
44208
44209
44210
44211
44212
44213
44214
44215
44216
44217
44218
44219
44220
44221
44222
44223
44224
44225
44226
44227
44228
44229
44230
44231
44232
44233
44234
44235
44236
44237
44238
44239
44240
44241
44242
44243
44244
44245
44246
44247
44248
44249
44250
44251
44252
44253
44254
44255
44256
44257
44258
44259
44260
44261
44262
44263
44264
44265
44266
44267
44268
44269
44270
44271
44272
44273
44274
44275
44276
44277
44278
44279
44280
44281
44282
44283
44284
44285
44286
44287
44288
44289
44290
44291
44292
44293
44294
44295
44296
44297
44298
44299
44300
44301
44302
44303
44304
44305
44306
44307
44308
44309
44310
44311
44312
44313
44314
44315
44316
44317
44318
44319
44320
44321
44322
44323
44324
44325
44326
44327
44328
44329
44330
44331
44332
44333
44334
44335
44336
44337
44338
44339
44340
44341
44342
44343
44344
44345
44346
44347
44348
44349
44350
44351
44352
44353
44354
44355
44356
44357
44358
44359
44360
44361
44362
44363
44364
44365
44366
44367
44368
44369
44370
44371
44372
44373
44374
44375
44376
44377
44378
44379
44380
44381
44382
44383
44384
44385
44386
44387
44388
44389
44390
44391
44392
44393
44394
44395
44396
44397
44398
44399
44400
44401
44402
44403
44404
44405
44406
44407
44408
44409
44410
44411
44412
44413
44414
44415
44416
44417
44418
44419
44420
44421
44422
44423
44424
44425
44426
44427
44428
44429
44430
44431
44432
44433
44434
44435
44436
44437
44438
44439
44440
44441
44442
44443
44444
44445
44446
44447
44448
44449
44450
44451
44452
44453
44454
44455
44456
44457
44458
44459
44460
44461
44462
44463
44464
44465
44466
44467
44468
44469
44470
44471
44472
44473
44474
44475
44476
44477
44478
44479
44480
44481
44482
44483
44484
44485
44486
44487
44488
44489
44490
44491
44492
44493
44494
44495
44496
44497
44498
44499
44500
44501
44502
44503
44504
44505
44506
44507
44508
44509
44510
44511
44512
44513
44514
44515
44516
44517
44518
44519
44520
44521
44522
44523
44524
44525
44526
44527
44528
44529
44530
44531
44532
44533
44534
44535
44536
44537
44538
44539
44540
44541
44542
44543
44544
44545
44546
44547
44548
44549
44550
44551
44552
44553
44554
44555
44556
44557
44558
44559
44560
44561
44562
44563
44564
44565
44566
44567
44568
44569
44570
44571
44572
44573
44574
44575
44576
44577
44578
44579
44580
44581
44582
44583
44584
44585
44586
44587
44588
44589
44590
44591
44592
44593
44594
44595
44596
44597
44598
44599
44600
44601
44602
44603
44604
44605
44606
44607
44608
44609
44610
44611
44612
44613
44614
44615
44616
44617
44618
44619
44620
44621
44622
44623
44624
44625
44626
44627
44628
44629
44630
44631
44632
44633
44634
44635
44636
44637
44638
44639
44640
44641
44642
44643
44644
44645
44646
44647
44648
44649
44650
44651
44652
44653
44654
44655
44656
44657
44658
44659
44660
44661
44662
44663
44664
44665
44666
44667
44668
44669
44670
44671
44672
44673
44674
44675
44676
44677
44678
44679
44680
44681
44682
44683
44684
44685
44686
44687
44688
44689
44690
44691
44692
44693
44694
44695
44696
44697
44698
44699
44700
44701
44702
44703
44704
44705
44706
44707
44708
44709
44710
44711
44712
44713
44714
44715
44716
44717
44718
44719
44720
44721
44722
44723
44724
44725
44726
44727
44728
44729
44730
44731
44732
44733
44734
44735
44736
44737
44738
44739
44740
44741
44742
44743
44744
44745
44746
44747
44748
44749
44750
44751
44752
44753
44754
44755
44756
44757
44758
44759
44760
44761
44762
44763
44764
44765
44766
44767
44768
44769
44770
44771
44772
44773
44774
44775
44776
44777
44778
44779
44780
44781
44782
44783
44784
44785
44786
44787
44788
44789
44790
44791
44792
44793
44794
44795
44796
44797
44798
44799
44800
44801
44802
44803
44804
44805
44806
44807
44808
44809
44810
44811
44812
44813
44814
44815
44816
44817
44818
44819
44820
44821
44822
44823
44824
44825
44826
44827
44828
44829
44830
44831
44832
44833
44834
44835
44836
44837
44838
44839
44840
44841
44842
44843
44844
44845
44846
44847
44848
44849
44850
44851
44852
44853
44854
44855
44856
44857
44858
44859
44860
44861
44862
44863
44864
44865
44866
44867
44868
44869
44870
44871
44872
44873
44874
44875
44876
44877
44878
44879
44880
44881
44882
44883
44884
44885
44886
44887
44888
44889
44890
44891
44892
44893
44894
44895
44896
44897
44898
44899
44900
44901
44902
44903
44904
44905
44906
44907
44908
44909
44910
44911
44912
44913
44914
44915
44916
44917
44918
44919
44920
44921
44922
44923
44924
44925
44926
44927
44928
44929
44930
44931
44932
44933
44934
44935
44936
44937
44938
44939
44940
44941
44942
44943
44944
44945
44946
44947
44948
44949
44950
44951
44952
44953
44954
44955
44956
44957
44958
44959
44960
44961
44962
44963
44964
44965
44966
44967
44968
44969
44970
44971
44972
44973
44974
44975
44976
44977
44978
44979
44980
44981
44982
44983
44984
44985
44986
44987
44988
44989
44990
44991
44992
44993
44994
44995
44996
44997
44998
44999
45000
45001
45002
45003
45004
45005
45006
45007
45008
45009
45010
45011
45012
45013
45014
45015
45016
45017
45018
45019
45020
45021
45022
45023
45024
45025
45026
45027
45028
45029
45030
45031
45032
45033
45034
45035
45036
45037
45038
45039
45040
45041
45042
45043
45044
45045
45046
45047
45048
45049
45050
45051
45052
45053
45054
45055
45056
45057
45058
45059
45060
45061
45062
45063
45064
45065
45066
45067
45068
45069
45070
45071
45072
45073
45074
45075
45076
45077
45078
45079
45080
45081
45082
45083
45084
45085
45086
45087
45088
45089
45090
45091
45092
45093
45094
45095
45096
45097
45098
45099
45100
45101
45102
45103
45104
45105
45106
45107
45108
45109
45110
45111
45112
45113
45114
45115
45116
45117
45118
45119
45120
45121
45122
45123
45124
45125
45126
45127
45128
45129
45130
45131
45132
45133
45134
45135
45136
45137
45138
45139
45140
45141
45142
45143
45144
45145
45146
45147
45148
45149
45150
45151
45152
45153
45154
45155
45156
45157
45158
45159
45160
45161
45162
45163
45164
45165
45166
45167
45168
45169
45170
45171
45172
45173
45174
45175
45176
45177
45178
45179
45180
45181
45182
45183
45184
45185
45186
45187
45188
45189
45190
45191
45192
45193
45194
45195
45196
45197
45198
45199
45200
45201
45202
45203
45204
45205
45206
45207
45208
45209
45210
45211
45212
45213
45214
45215
45216
45217
45218
45219
45220
45221
45222
45223
45224
45225
45226
45227
45228
45229
45230
45231
45232
45233
45234
45235
45236
45237
45238
45239
45240
45241
45242
45243
45244
45245
45246
45247
45248
45249
45250
45251
45252
45253
45254
45255
45256
45257
45258
45259
45260
45261
45262
45263
45264
45265
45266
45267
45268
45269
45270
45271
45272
45273
45274
45275
45276
45277
45278
45279
45280
45281
45282
45283
45284
45285
45286
45287
45288
45289
45290
45291
45292
45293
45294
45295
45296
45297
45298
45299
45300
45301
45302
45303
45304
45305
45306
45307
45308
45309
45310
45311
45312
45313
45314
45315
45316
45317
45318
45319
45320
45321
45322
45323
45324
45325
45326
45327
45328
45329
45330
45331
45332
45333
45334
45335
45336
45337
45338
45339
45340
45341
45342
45343
45344
45345
45346
45347
45348
45349
45350
45351
45352
45353
45354
45355
45356
45357
45358
45359
45360
45361
45362
45363
45364
45365
45366
45367
45368
45369
45370
45371
45372
45373
45374
45375
45376
45377
45378
45379
45380
45381
45382
45383
45384
45385
45386
45387
45388
45389
45390
45391
45392
45393
45394
45395
45396
45397
45398
45399
45400
45401
45402
45403
45404
45405
45406
45407
45408
45409
45410
45411
45412
45413
45414
45415
45416
45417
45418
45419
45420
45421
45422
45423
45424
45425
45426
45427
45428
45429
45430
45431
45432
45433
45434
45435
45436
45437
45438
45439
45440
45441
45442
45443
45444
45445
45446
45447
45448
45449
45450
45451
45452
45453
45454
45455
45456
45457
45458
45459
45460
45461
45462
45463
45464
45465
45466
45467
45468
45469
45470
45471
45472
45473
45474
45475
45476
45477
45478
45479
45480
45481
45482
45483
45484
45485
45486
45487
45488
45489
45490
45491
45492
45493
45494
45495
45496
45497
45498
45499
45500
45501
45502
45503
45504
45505
45506
45507
45508
45509
45510
45511
45512
45513
45514
45515
45516
45517
45518
45519
45520
45521
45522
45523
45524
45525
45526
45527
45528
45529
45530
45531
45532
45533
45534
45535
45536
45537
45538
45539
45540
45541
45542
45543
45544
45545
45546
45547
45548
45549
45550
45551
45552
45553
45554
45555
45556
45557
45558
45559
45560
45561
45562
45563
45564
45565
45566
45567
45568
45569
45570
45571
45572
45573
45574
45575
45576
45577
45578
45579
45580
45581
45582
45583
45584
45585
45586
45587
45588
45589
45590
45591
45592
45593
45594
45595
45596
45597
45598
45599
45600
45601
45602
45603
45604
45605
45606
45607
45608
45609
45610
45611
45612
45613
45614
45615
45616
45617
45618
45619
45620
45621
45622
45623
45624
45625
45626
45627
45628
45629
45630
45631
45632
45633
45634
45635
45636
45637
45638
45639
45640
45641
45642
45643
45644
45645
45646
45647
45648
45649
45650
45651
45652
45653
45654
45655
45656
45657
45658
45659
45660
45661
45662
45663
45664
45665
45666
45667
45668
45669
45670
45671
45672
45673
45674
45675
45676
45677
45678
45679
45680
45681
45682
45683
45684
45685
45686
45687
45688
45689
45690
45691
45692
45693
45694
45695
45696
45697
45698
45699
45700
45701
45702
45703
45704
45705
45706
45707
45708
45709
45710
45711
45712
45713
45714
45715
45716
45717
45718
45719
45720
45721
45722
45723
45724
45725
45726
45727
45728
45729
45730
45731
45732
45733
45734
45735
45736
45737
45738
45739
45740
45741
45742
45743
45744
45745
45746
45747
45748
45749
45750
45751
45752
45753
45754
45755
45756
45757
45758
45759
45760
45761
45762
45763
45764
45765
45766
45767
45768
45769
45770
45771
45772
45773
45774
45775
45776
45777
45778
45779
45780
45781
45782
45783
45784
45785
45786
45787
45788
45789
45790
45791
45792
45793
45794
45795
45796
45797
45798
45799
45800
45801
45802
45803
45804
45805
45806
45807
45808
45809
45810
45811
45812
45813
45814
45815
45816
45817
45818
45819
45820
45821
45822
45823
45824
45825
45826
45827
45828
45829
45830
45831
45832
45833
45834
45835
45836
45837
45838
45839
45840
45841
45842
45843
45844
45845
45846
45847
45848
45849
45850
45851
45852
45853
45854
45855
45856
45857
45858
45859
45860
45861
45862
45863
45864
45865
45866
45867
45868
45869
45870
45871
45872
45873
45874
45875
45876
45877
45878
45879
45880
45881
45882
45883
45884
45885
45886
45887
45888
45889
45890
45891
45892
45893
45894
45895
45896
45897
45898
45899
45900
45901
45902
45903
45904
45905
45906
45907
45908
45909
45910
45911
45912
45913
45914
45915
45916
45917
45918
45919
45920
45921
45922
45923
45924
45925
45926
45927
45928
45929
45930
45931
45932
45933
45934
45935
45936
45937
45938
45939
45940
45941
45942
45943
45944
45945
45946
45947
45948
45949
45950
45951
45952
45953
45954
45955
45956
45957
45958
45959
45960
45961
45962
45963
45964
45965
45966
45967
45968
45969
45970
45971
45972
45973
45974
45975
45976
45977
45978
45979
45980
45981
45982
45983
45984
45985
45986
45987
45988
45989
45990
45991
45992
45993
45994
45995
45996
45997
45998
45999
46000
46001
46002
46003
46004
46005
46006
46007
46008
46009
46010
46011
46012
46013
46014
46015
46016
46017
46018
46019
46020
46021
46022
46023
46024
46025
46026
46027
46028
46029
46030
46031
46032
46033
46034
46035
46036
46037
46038
46039
46040
46041
46042
46043
46044
46045
46046
46047
46048
46049
46050
46051
46052
46053
46054
46055
46056
46057
46058
46059
46060
46061
46062
46063
46064
46065
46066
46067
46068
46069
46070
46071
46072
46073
46074
46075
46076
46077
46078
46079
46080
46081
46082
46083
46084
46085
46086
46087
46088
46089
46090
46091
46092
46093
46094
46095
46096
46097
46098
46099
46100
46101
46102
46103
46104
46105
46106
46107
46108
46109
46110
46111
46112
46113
46114
46115
46116
46117
46118
46119
46120
46121
46122
46123
46124
46125
46126
46127
46128
46129
46130
46131
46132
46133
46134
46135
46136
46137
46138
46139
46140
46141
46142
46143
46144
46145
46146
46147
46148
46149
46150
46151
46152
46153
46154
46155
46156
46157
46158
46159
46160
46161
46162
46163
46164
46165
46166
46167
46168
46169
46170
46171
46172
46173
46174
46175
46176
46177
46178
46179
46180
46181
46182
46183
46184
46185
46186
46187
46188
46189
46190
46191
46192
46193
46194
46195
46196
46197
46198
46199
46200
46201
46202
46203
46204
46205
46206
46207
46208
46209
46210
46211
46212
46213
46214
46215
46216
46217
46218
46219
46220
46221
46222
46223
46224
46225
46226
46227
46228
46229
46230
46231
46232
46233
46234
46235
46236
46237
46238
46239
46240
46241
46242
46243
46244
46245
46246
46247
46248
46249
46250
46251
46252
46253
46254
46255
46256
46257
46258
46259
46260
46261
46262
46263
46264
46265
46266
46267
46268
46269
46270
46271
46272
46273
46274
46275
46276
46277
46278
46279
46280
46281
46282
46283
46284
46285
46286
46287
46288
46289
46290
46291
46292
46293
46294
46295
46296
46297
46298
46299
46300
46301
46302
46303
46304
46305
46306
46307
46308
46309
46310
46311
46312
46313
46314
46315
46316
46317
46318
46319
46320
46321
46322
46323
46324
46325
46326
46327
46328
46329
46330
46331
46332
46333
46334
46335
46336
46337
46338
46339
46340
46341
46342
46343
46344
46345
46346
46347
46348
46349
46350
46351
46352
46353
46354
46355
46356
46357
46358
46359
46360
46361
46362
46363
46364
46365
46366
46367
46368
46369
46370
46371
46372
46373
46374
46375
46376
46377
46378
46379
46380
46381
46382
46383
46384
46385
46386
46387
46388
46389
46390
46391
46392
46393
46394
46395
46396
46397
46398
46399
46400
46401
46402
46403
46404
46405
46406
46407
46408
46409
46410
46411
46412
46413
46414
46415
46416
46417
46418
46419
46420
46421
46422
46423
46424
46425
46426
46427
46428
46429
46430
46431
46432
46433
46434
46435
46436
46437
46438
46439
46440
46441
46442
46443
46444
46445
46446
46447
46448
46449
46450
46451
46452
46453
46454
46455
46456
46457
46458
46459
46460
46461
46462
46463
46464
46465
46466
46467
46468
46469
46470
46471
46472
46473
46474
46475
46476
46477
46478
46479
46480
46481
46482
46483
46484
46485
46486
46487
46488
46489
46490
46491
46492
46493
46494
46495
46496
46497
46498
46499
46500
46501
46502
46503
46504
46505
46506
46507
46508
46509
46510
46511
46512
46513
46514
46515
46516
46517
46518
46519
46520
46521
46522
46523
46524
46525
46526
46527
46528
46529
46530
46531
46532
46533
46534
46535
46536
46537
46538
46539
46540
46541
46542
46543
46544
46545
46546
46547
46548
46549
46550
46551
46552
46553
46554
46555
46556
46557
46558
46559
46560
46561
46562
46563
46564
46565
46566
46567
46568
46569
46570
46571
46572
46573
46574
46575
46576
46577
46578
46579
46580
46581
46582
46583
46584
46585
46586
46587
46588
46589
46590
46591
46592
46593
46594
46595
46596
46597
46598
46599
46600
46601
46602
46603
46604
46605
46606
46607
46608
46609
46610
46611
46612
46613
46614
46615
46616
46617
46618
46619
46620
46621
46622
46623
46624
46625
46626
46627
46628
46629
46630
46631
46632
46633
46634
46635
46636
46637
46638
46639
46640
46641
46642
46643
46644
46645
46646
46647
46648
46649
46650
46651
46652
46653
46654
46655
46656
46657
46658
46659
46660
46661
46662
46663
46664
46665
46666
46667
46668
46669
46670
46671
46672
46673
46674
46675
46676
46677
46678
46679
46680
46681
46682
46683
46684
46685
46686
46687
46688
46689
46690
46691
46692
46693
46694
46695
46696
46697
46698
46699
46700
46701
46702
46703
46704
46705
46706
46707
46708
46709
46710
46711
46712
46713
46714
46715
46716
46717
46718
46719
46720
46721
46722
46723
46724
46725
46726
46727
46728
46729
46730
46731
46732
46733
46734
46735
46736
46737
46738
46739
46740
46741
46742
46743
46744
46745
46746
46747
46748
46749
46750
46751
46752
46753
46754
46755
46756
46757
46758
46759
46760
46761
46762
46763
46764
46765
46766
46767
46768
46769
46770
46771
46772
46773
46774
46775
46776
46777
46778
46779
46780
46781
46782
46783
46784
46785
46786
46787
46788
46789
46790
46791
46792
46793
46794
46795
46796
46797
46798
46799
46800
46801
46802
46803
46804
46805
46806
46807
46808
46809
46810
46811
46812
46813
46814
46815
46816
46817
46818
46819
46820
46821
46822
46823
46824
46825
46826
46827
46828
46829
46830
46831
46832
46833
46834
46835
46836
46837
46838
46839
46840
46841
46842
46843
46844
46845
46846
46847
46848
46849
46850
46851
46852
46853
46854
46855
46856
46857
46858
46859
46860
46861
46862
46863
46864
46865
46866
46867
46868
46869
46870
46871
46872
46873
46874
46875
46876
46877
46878
46879
46880
46881
46882
46883
46884
46885
46886
46887
46888
46889
46890
46891
46892
46893
46894
46895
46896
46897
46898
46899
46900
46901
46902
46903
46904
46905
46906
46907
46908
46909
46910
46911
46912
46913
46914
46915
46916
46917
46918
46919
46920
46921
46922
46923
46924
46925
46926
46927
46928
46929
46930
46931
46932
46933
46934
46935
46936
46937
46938
46939
46940
46941
46942
46943
46944
46945
46946
46947
46948
46949
46950
46951
46952
46953
46954
46955
46956
46957
46958
46959
46960
46961
46962
46963
46964
46965
46966
46967
46968
46969
46970
46971
46972
46973
46974
46975
46976
46977
46978
46979
46980
46981
46982
46983
46984
46985
46986
46987
46988
46989
46990
46991
46992
46993
46994
46995
46996
46997
46998
46999
47000
47001
47002
47003
47004
47005
47006
47007
47008
47009
47010
47011
47012
47013
47014
47015
47016
47017
47018
47019
47020
47021
47022
47023
47024
47025
47026
47027
47028
47029
47030
47031
47032
47033
47034
47035
47036
47037
47038
47039
47040
47041
47042
47043
47044
47045
47046
47047
47048
47049
47050
47051
47052
47053
47054
47055
47056
47057
47058
47059
47060
47061
47062
47063
47064
47065
47066
47067
47068
47069
47070
47071
47072
47073
47074
47075
47076
47077
47078
47079
47080
47081
47082
47083
47084
47085
47086
47087
47088
47089
47090
47091
47092
47093
47094
47095
47096
47097
47098
47099
47100
47101
47102
47103
47104
47105
47106
47107
47108
47109
47110
47111
47112
47113
47114
47115
47116
47117
47118
47119
47120
47121
47122
47123
47124
47125
47126
47127
47128
47129
47130
47131
47132
47133
47134
47135
47136
47137
47138
47139
47140
47141
47142
47143
47144
47145
47146
47147
47148
47149
47150
47151
47152
47153
47154
47155
47156
47157
47158
47159
47160
47161
47162
47163
47164
47165
47166
47167
47168
47169
47170
47171
47172
47173
47174
47175
47176
47177
47178
47179
47180
47181
47182
47183
47184
47185
47186
47187
47188
47189
47190
47191
47192
47193
47194
47195
47196
47197
47198
47199
47200
47201
47202
47203
47204
47205
47206
47207
47208
47209
47210
47211
47212
47213
47214
47215
47216
47217
47218
47219
47220
47221
47222
47223
47224
47225
47226
47227
47228
47229
47230
47231
47232
47233
47234
47235
47236
47237
47238
47239
47240
47241
47242
47243
47244
47245
47246
47247
47248
47249
47250
47251
47252
47253
47254
47255
47256
47257
47258
47259
47260
47261
47262
47263
47264
47265
47266
47267
47268
47269
47270
47271
47272
47273
47274
47275
47276
47277
47278
47279
47280
47281
47282
47283
47284
47285
47286
47287
47288
47289
47290
47291
47292
47293
47294
47295
47296
47297
47298
47299
47300
47301
47302
47303
47304
47305
47306
47307
47308
47309
47310
47311
47312
47313
47314
47315
47316
47317
47318
47319
47320
47321
47322
47323
47324
47325
47326
47327
47328
47329
47330
47331
47332
47333
47334
47335
47336
47337
47338
47339
47340
47341
47342
47343
47344
47345
47346
47347
47348
47349
47350
47351
47352
47353
47354
47355
47356
47357
47358
47359
47360
47361
47362
47363
47364
47365
47366
47367
47368
47369
47370
47371
47372
47373
47374
47375
47376
47377
47378
47379
47380
47381
47382
47383
47384
47385
47386
47387
47388
47389
47390
47391
47392
47393
47394
47395
47396
47397
47398
47399
47400
47401
47402
47403
47404
47405
47406
47407
47408
47409
47410
47411
47412
47413
47414
47415
47416
47417
47418
47419
47420
47421
47422
47423
47424
47425
47426
47427
47428
47429
47430
47431
47432
47433
47434
47435
47436
47437
47438
47439
47440
47441
47442
47443
47444
47445
47446
47447
47448
47449
47450
47451
47452
47453
47454
47455
47456
47457
47458
47459
47460
47461
47462
47463
47464
47465
47466
47467
47468
47469
47470
47471
47472
47473
47474
47475
47476
47477
47478
47479
47480
47481
47482
47483
47484
47485
47486
47487
47488
47489
47490
47491
47492
47493
47494
47495
47496
47497
47498
47499
47500
47501
47502
47503
47504
47505
47506
47507
47508
47509
47510
47511
47512
47513
47514
47515
47516
47517
47518
47519
47520
47521
47522
47523
47524
47525
47526
47527
47528
47529
47530
47531
47532
47533
47534
47535
47536
47537
47538
47539
47540
47541
47542
47543
47544
47545
47546
47547
47548
47549
47550
47551
47552
47553
47554
47555
47556
47557
47558
47559
47560
47561
47562
47563
47564
47565
47566
47567
47568
47569
47570
47571
47572
47573
47574
47575
47576
47577
47578
47579
47580
47581
47582
47583
47584
47585
47586
47587
47588
47589
47590
47591
47592
47593
47594
47595
47596
47597
47598
47599
47600
47601
47602
47603
47604
47605
47606
47607
47608
47609
47610
47611
47612
47613
47614
47615
47616
47617
47618
47619
47620
47621
47622
47623
47624
47625
47626
47627
47628
47629
47630
47631
47632
47633
47634
47635
47636
47637
47638
47639
47640
47641
47642
47643
47644
47645
47646
47647
47648
47649
47650
47651
47652
47653
47654
47655
47656
47657
47658
47659
47660
47661
47662
47663
47664
47665
47666
47667
47668
47669
47670
47671
47672
47673
47674
47675
47676
47677
47678
47679
47680
47681
47682
47683
47684
47685
47686
47687
47688
47689
47690
47691
47692
47693
47694
47695
47696
47697
47698
47699
47700
47701
47702
47703
47704
47705
47706
47707
47708
47709
47710
47711
47712
47713
47714
47715
47716
47717
47718
47719
47720
47721
47722
47723
47724
47725
47726
47727
47728
47729
47730
47731
47732
47733
47734
47735
47736
47737
47738
47739
47740
47741
47742
47743
47744
47745
47746
47747
47748
47749
47750
47751
47752
47753
47754
47755
47756
47757
47758
47759
47760
47761
47762
47763
47764
47765
47766
47767
47768
47769
47770
47771
47772
47773
47774
47775
47776
47777
47778
47779
47780
47781
47782
47783
47784
47785
47786
47787
47788
47789
47790
47791
47792
47793
47794
47795
47796
47797
47798
47799
47800
47801
47802
47803
47804
47805
47806
47807
47808
47809
47810
47811
47812
47813
47814
47815
47816
47817
47818
47819
47820
47821
47822
47823
47824
47825
47826
47827
47828
47829
47830
47831
47832
47833
47834
47835
47836
47837
47838
47839
47840
47841
47842
47843
47844
47845
47846
47847
47848
47849
47850
47851
47852
47853
47854
47855
47856
47857
47858
47859
47860
47861
47862
47863
47864
47865
47866
47867
47868
47869
47870
47871
47872
47873
47874
47875
47876
47877
47878
47879
47880
47881
47882
47883
47884
47885
47886
47887
47888
47889
47890
47891
47892
47893
47894
47895
47896
47897
47898
47899
47900
47901
47902
47903
47904
47905
47906
47907
47908
47909
47910
47911
47912
47913
47914
47915
47916
47917
47918
47919
47920
47921
47922
47923
47924
47925
47926
47927
47928
47929
47930
47931
47932
47933
47934
47935
47936
47937
47938
47939
47940
47941
47942
47943
47944
47945
47946
47947
47948
47949
47950
47951
47952
47953
47954
47955
47956
47957
47958
47959
47960
47961
47962
47963
47964
47965
47966
47967
47968
47969
47970
47971
47972
47973
47974
47975
47976
47977
47978
47979
47980
47981
47982
47983
47984
47985
47986
47987
47988
47989
47990
47991
47992
47993
47994
47995
47996
47997
47998
47999
48000
48001
48002
48003
48004
48005
48006
48007
48008
48009
48010
48011
48012
48013
48014
48015
48016
48017
48018
48019
48020
48021
48022
48023
48024
48025
48026
48027
48028
48029
48030
48031
48032
48033
48034
48035
48036
48037
48038
48039
48040
48041
48042
48043
48044
48045
48046
48047
48048
48049
48050
48051
48052
48053
48054
48055
48056
48057
48058
48059
48060
48061
48062
48063
48064
48065
48066
48067
48068
48069
48070
48071
48072
48073
48074
48075
48076
48077
48078
48079
48080
48081
48082
48083
48084
48085
48086
48087
48088
48089
48090
48091
48092
48093
48094
48095
48096
48097
48098
48099
48100
48101
48102
48103
48104
48105
48106
48107
48108
48109
48110
48111
48112
48113
48114
48115
48116
48117
48118
48119
48120
48121
48122
48123
48124
48125
48126
48127
48128
48129
48130
48131
48132
48133
48134
48135
48136
48137
48138
48139
48140
48141
48142
48143
48144
48145
48146
48147
48148
48149
48150
48151
48152
48153
48154
48155
48156
48157
48158
48159
48160
48161
48162
48163
48164
48165
48166
48167
48168
48169
48170
48171
48172
48173
48174
48175
48176
48177
48178
48179
48180
48181
48182
48183
48184
48185
48186
48187
48188
48189
48190
48191
48192
48193
48194
48195
48196
48197
48198
48199
48200
48201
48202
48203
48204
48205
48206
48207
48208
48209
48210
48211
48212
48213
48214
48215
48216
48217
48218
48219
48220
48221
48222
48223
48224
48225
48226
48227
48228
48229
48230
48231
48232
48233
48234
48235
48236
48237
48238
48239
48240
48241
48242
48243
48244
48245
48246
48247
48248
48249
48250
48251
48252
48253
48254
48255
48256
48257
48258
48259
48260
48261
48262
48263
48264
48265
48266
48267
48268
48269
48270
48271
48272
48273
48274
48275
48276
48277
48278
48279
48280
48281
48282
48283
48284
48285
48286
48287
48288
48289
48290
48291
48292
48293
48294
48295
48296
48297
48298
48299
48300
48301
48302
48303
48304
48305
48306
48307
48308
48309
48310
48311
48312
48313
48314
48315
48316
48317
48318
48319
48320
48321
48322
48323
48324
48325
48326
48327
48328
48329
48330
48331
48332
48333
48334
48335
48336
48337
48338
48339
48340
48341
48342
48343
48344
48345
48346
48347
48348
48349
48350
48351
48352
48353
48354
48355
48356
48357
48358
48359
48360
48361
48362
48363
48364
48365
48366
48367
48368
48369
48370
48371
48372
48373
48374
48375
48376
48377
48378
48379
48380
48381
48382
48383
48384
48385
48386
48387
48388
48389
48390
48391
48392
48393
48394
48395
48396
48397
48398
48399
48400
48401
48402
48403
48404
48405
48406
48407
48408
48409
48410
48411
48412
48413
48414
48415
48416
48417
48418
48419
48420
48421
48422
48423
48424
48425
48426
48427
48428
48429
48430
48431
48432
48433
48434
48435
48436
48437
48438
48439
48440
48441
48442
48443
48444
48445
48446
48447
48448
48449
48450
48451
48452
48453
48454
48455
48456
48457
48458
48459
48460
48461
48462
48463
48464
48465
48466
48467
48468
48469
48470
48471
48472
48473
48474
48475
48476
48477
48478
48479
48480
48481
48482
48483
48484
48485
48486
48487
48488
48489
48490
48491
48492
48493
48494
48495
48496
48497
48498
48499
48500
48501
48502
48503
48504
48505
48506
48507
48508
48509
48510
48511
48512
48513
48514
48515
48516
48517
48518
48519
48520
48521
48522
48523
48524
48525
48526
48527
48528
48529
48530
48531
48532
48533
48534
48535
48536
48537
48538
48539
48540
48541
48542
48543
48544
48545
48546
48547
48548
48549
48550
48551
48552
48553
48554
48555
48556
48557
48558
48559
48560
48561
48562
48563
48564
48565
48566
48567
48568
48569
48570
48571
48572
48573
48574
48575
48576
48577
48578
48579
48580
48581
48582
48583
48584
48585
48586
48587
48588
48589
48590
48591
48592
48593
48594
48595
48596
48597
48598
48599
48600
48601
48602
48603
48604
48605
48606
48607
48608
48609
48610
48611
48612
48613
48614
48615
48616
48617
48618
48619
48620
48621
48622
48623
48624
48625
48626
48627
48628
48629
48630
48631
48632
48633
48634
48635
48636
48637
48638
48639
48640
48641
48642
48643
48644
48645
48646
48647
48648
48649
48650
48651
48652
48653
48654
48655
48656
48657
48658
48659
48660
48661
48662
48663
48664
48665
48666
48667
48668
48669
48670
48671
48672
48673
48674
48675
48676
48677
48678
48679
48680
48681
48682
48683
48684
48685
48686
48687
48688
48689
48690
48691
48692
48693
48694
48695
48696
48697
48698
48699
48700
48701
48702
48703
48704
48705
48706
48707
48708
48709
48710
48711
48712
48713
48714
48715
48716
48717
48718
48719
48720
48721
48722
48723
48724
48725
48726
48727
48728
48729
48730
48731
48732
48733
48734
48735
48736
48737
48738
48739
48740
48741
48742
48743
48744
48745
48746
48747
48748
48749
48750
48751
48752
48753
48754
48755
48756
48757
48758
48759
48760
48761
48762
48763
48764
48765
48766
48767
48768
48769
48770
48771
48772
48773
48774
48775
48776
48777
48778
48779
48780
48781
48782
48783
48784
48785
48786
48787
48788
48789
48790
48791
48792
48793
48794
48795
48796
48797
48798
48799
48800
48801
48802
48803
48804
48805
48806
48807
48808
48809
48810
48811
48812
48813
48814
48815
48816
48817
48818
48819
48820
48821
48822
48823
48824
48825
48826
48827
48828
48829
48830
48831
48832
48833
48834
48835
48836
48837
48838
48839
48840
48841
48842
48843
48844
48845
48846
48847
48848
48849
48850
48851
48852
48853
48854
48855
48856
48857
48858
48859
48860
48861
48862
48863
48864
48865
48866
48867
48868
48869
48870
48871
48872
48873
48874
48875
48876
48877
48878
48879
48880
48881
48882
48883
48884
48885
48886
48887
48888
48889
48890
48891
48892
48893
48894
48895
48896
48897
48898
48899
48900
48901
48902
48903
48904
48905
48906
48907
48908
48909
48910
48911
48912
48913
48914
48915
48916
48917
48918
48919
48920
48921
48922
48923
48924
48925
48926
48927
48928
48929
48930
48931
48932
48933
48934
48935
48936
48937
48938
48939
48940
48941
48942
48943
48944
48945
48946
48947
48948
48949
48950
48951
48952
48953
48954
48955
48956
48957
48958
48959
48960
48961
48962
48963
48964
48965
48966
48967
48968
48969
48970
48971
48972
48973
48974
48975
48976
48977
48978
48979
48980
48981
48982
48983
48984
48985
48986
48987
48988
48989
48990
48991
48992
48993
48994
48995
48996
48997
48998
48999
49000
49001
49002
49003
49004
49005
49006
49007
49008
49009
49010
49011
49012
49013
49014
49015
49016
49017
49018
49019
49020
49021
49022
49023
49024
49025
49026
49027
49028
49029
49030
49031
49032
49033
49034
49035
49036
49037
49038
49039
49040
49041
49042
49043
49044
49045
49046
49047
49048
49049
49050
49051
49052
49053
49054
49055
49056
49057
49058
49059
49060
49061
49062
49063
49064
49065
49066
49067
49068
49069
49070
49071
49072
49073
49074
49075
49076
49077
49078
49079
49080
49081
49082
49083
49084
49085
49086
49087
49088
49089
49090
49091
49092
49093
49094
49095
49096
49097
49098
49099
49100
49101
49102
49103
49104
49105
49106
49107
49108
49109
49110
49111
49112
49113
49114
49115
49116
49117
49118
49119
49120
49121
49122
49123
49124
49125
49126
49127
49128
49129
49130
49131
49132
49133
49134
49135
49136
49137
49138
49139
49140
49141
49142
49143
49144
49145
49146
49147
49148
49149
49150
49151
49152
49153
49154
49155
49156
49157
49158
49159
49160
49161
49162
49163
49164
49165
49166
49167
49168
49169
49170
49171
49172
49173
49174
49175
49176
49177
49178
49179
49180
49181
49182
49183
49184
49185
49186
49187
49188
49189
49190
49191
49192
49193
49194
49195
49196
49197
49198
49199
49200
49201
49202
49203
49204
49205
49206
49207
49208
49209
49210
49211
49212
49213
49214
49215
49216
49217
49218
49219
49220
49221
49222
49223
49224
49225
49226
49227
49228
49229
49230
49231
49232
49233
49234
49235
49236
49237
49238
49239
49240
49241
49242
49243
49244
49245
49246
49247
49248
49249
49250
49251
49252
49253
49254
49255
49256
49257
49258
49259
49260
49261
49262
49263
49264
49265
49266
49267
49268
49269
49270
49271
49272
49273
49274
49275
49276
49277
49278
49279
49280
49281
49282
49283
49284
49285
49286
49287
49288
49289
49290
49291
49292
49293
49294
49295
49296
49297
49298
49299
49300
49301
49302
49303
49304
49305
49306
49307
49308
49309
49310
49311
49312
49313
49314
49315
49316
49317
49318
49319
49320
49321
49322
49323
49324
49325
49326
49327
49328
49329
49330
49331
49332
49333
49334
49335
49336
49337
49338
49339
49340
49341
49342
49343
49344
49345
49346
49347
49348
49349
49350
49351
49352
49353
49354
49355
49356
49357
49358
49359
49360
49361
49362
49363
49364
49365
49366
49367
49368
49369
49370
49371
49372
49373
49374
49375
49376
49377
49378
49379
49380
49381
49382
49383
49384
49385
49386
49387
49388
49389
49390
49391
49392
49393
49394
49395
49396
49397
49398
49399
49400
49401
49402
49403
49404
49405
49406
49407
49408
49409
49410
49411
49412
49413
49414
49415
49416
49417
49418
49419
49420
49421
49422
49423
49424
49425
49426
49427
49428
49429
49430
49431
49432
49433
49434
49435
49436
49437
49438
49439
49440
49441
49442
49443
49444
49445
49446
49447
49448
49449
49450
49451
49452
49453
49454
49455
49456
49457
49458
49459
49460
49461
49462
49463
49464
49465
49466
49467
49468
49469
49470
49471
49472
49473
49474
49475
49476
49477
49478
49479
49480
49481
49482
49483
49484
49485
49486
49487
49488
49489
49490
49491
49492
49493
49494
49495
49496
49497
49498
49499
49500
49501
49502
49503
49504
49505
49506
49507
49508
49509
49510
49511
49512
49513
49514
49515
49516
49517
49518
49519
49520
49521
49522
49523
49524
49525
49526
49527
49528
49529
49530
49531
49532
49533
49534
49535
49536
49537
49538
49539
49540
49541
49542
49543
49544
49545
49546
49547
49548
49549
49550
49551
49552
49553
49554
49555
49556
49557
49558
49559
49560
49561
49562
49563
49564
49565
49566
49567
49568
49569
49570
49571
49572
49573
49574
49575
49576
49577
49578
49579
49580
49581
49582
49583
49584
49585
49586
49587
49588
49589
49590
49591
49592
49593
49594
49595
49596
49597
49598
49599
49600
49601
49602
49603
49604
49605
49606
49607
49608
49609
49610
49611
49612
49613
49614
49615
49616
49617
49618
49619
49620
49621
49622
49623
49624
49625
49626
49627
49628
49629
49630
49631
49632
49633
49634
49635
49636
49637
49638
49639
49640
49641
49642
49643
49644
49645
49646
49647
49648
49649
49650
49651
49652
49653
49654
49655
49656
49657
49658
49659
49660
49661
49662
49663
49664
49665
49666
49667
49668
49669
49670
49671
49672
49673
49674
49675
49676
49677
49678
49679
49680
49681
49682
49683
49684
49685
49686
49687
49688
49689
49690
49691
49692
49693
49694
49695
49696
49697
49698
49699
49700
49701
49702
49703
49704
49705
49706
49707
49708
49709
49710
49711
49712
49713
49714
49715
49716
49717
49718
49719
49720
49721
49722
49723
49724
49725
49726
49727
49728
49729
49730
49731
49732
49733
49734
49735
49736
49737
49738
49739
49740
49741
49742
49743
49744
49745
49746
49747
49748
49749
49750
49751
49752
49753
49754
49755
49756
49757
49758
49759
49760
49761
49762
49763
49764
49765
49766
49767
49768
49769
49770
49771
49772
49773
49774
49775
49776
49777
49778
49779
49780
49781
49782
49783
49784
49785
49786
49787
49788
49789
49790
49791
49792
49793
49794
49795
49796
49797
49798
49799
49800
49801
49802
49803
49804
49805
49806
49807
49808
49809
49810
49811
49812
49813
49814
49815
49816
49817
49818
49819
49820
49821
49822
49823
49824
49825
49826
49827
49828
49829
49830
49831
49832
49833
49834
49835
49836
49837
49838
49839
49840
49841
49842
49843
49844
49845
49846
49847
49848
49849
49850
49851
49852
49853
49854
49855
49856
49857
49858
49859
49860
49861
49862
49863
49864
49865
49866
49867
49868
49869
49870
49871
49872
49873
49874
49875
49876
49877
49878
49879
49880
49881
49882
49883
49884
49885
49886
49887
49888
49889
49890
49891
49892
49893
49894
49895
49896
49897
49898
49899
49900
49901
49902
49903
49904
49905
49906
49907
49908
49909
49910
49911
49912
49913
49914
49915
49916
49917
49918
49919
49920
49921
49922
49923
49924
49925
49926
49927
49928
49929
49930
49931
49932
49933
49934
49935
49936
49937
49938
49939
49940
49941
49942
49943
49944
49945
49946
49947
49948
49949
49950
49951
49952
49953
49954
49955
49956
49957
49958
49959
49960
49961
49962
49963
49964
49965
49966
49967
49968
49969
49970
49971
49972
49973
49974
49975
49976
49977
49978
49979
49980
49981
49982
49983
49984
49985
49986
49987
49988
49989
49990
49991
49992
49993
49994
49995
49996
49997
49998
49999
50000
50001
50002
50003
50004
50005
50006
50007
50008
50009
50010
50011
50012
50013
50014
50015
50016
50017
50018
50019
50020
50021
50022
50023
50024
50025
50026
50027
50028
50029
50030
50031
50032
50033
50034
50035
50036
50037
50038
50039
50040
50041
50042
50043
50044
50045
50046
50047
50048
50049
50050
50051
50052
50053
50054
50055
50056
50057
50058
50059
50060
50061
50062
50063
50064
50065
50066
50067
50068
50069
50070
50071
50072
50073
50074
50075
50076
50077
50078
50079
50080
50081
50082
50083
50084
50085
50086
50087
50088
50089
50090
50091
50092
50093
50094
50095
50096
50097
50098
50099
50100
50101
50102
50103
50104
50105
50106
50107
50108
50109
50110
50111
50112
50113
50114
50115
50116
50117
50118
50119
50120
50121
50122
50123
50124
50125
50126
50127
50128
50129
50130
50131
50132
50133
50134
50135
50136
50137
50138
50139
50140
50141
50142
50143
50144
50145
50146
50147
50148
50149
50150
50151
50152
50153
50154
50155
50156
50157
50158
50159
50160
50161
50162
50163
50164
50165
50166
50167
50168
50169
50170
50171
50172
50173
50174
50175
50176
50177
50178
50179
50180
50181
50182
50183
50184
50185
50186
50187
50188
50189
50190
50191
50192
50193
50194
50195
50196
50197
50198
50199
50200
50201
50202
50203
50204
50205
50206
50207
50208
50209
50210
50211
50212
50213
50214
50215
50216
50217
50218
50219
50220
50221
50222
50223
50224
50225
50226
50227
50228
50229
50230
50231
50232
50233
50234
50235
50236
50237
50238
50239
50240
50241
50242
50243
50244
50245
50246
50247
50248
50249
50250
50251
50252
50253
50254
50255
50256
50257
50258
50259
50260
50261
50262
50263
50264
50265
50266
50267
50268
50269
50270
50271
50272
50273
50274
50275
50276
50277
50278
50279
50280
50281
50282
50283
50284
50285
50286
50287
50288
50289
50290
50291
50292
50293
50294
50295
50296
50297
50298
50299
50300
50301
50302
50303
50304
50305
50306
50307
50308
50309
50310
50311
50312
50313
50314
50315
50316
50317
50318
50319
50320
50321
50322
50323
50324
50325
50326
50327
50328
50329
50330
50331
50332
50333
50334
50335
50336
50337
50338
50339
50340
50341
50342
50343
50344
50345
50346
50347
50348
50349
50350
50351
50352
50353
50354
50355
50356
50357
50358
50359
50360
50361
50362
50363
50364
50365
50366
50367
50368
50369
50370
50371
50372
50373
50374
50375
50376
50377
50378
50379
50380
50381
50382
50383
50384
50385
50386
50387
50388
50389
50390
50391
50392
50393
50394
50395
50396
50397
50398
50399
50400
50401
50402
50403
50404
50405
50406
50407
50408
50409
50410
50411
50412
50413
50414
50415
50416
50417
50418
50419
50420
50421
50422
50423
50424
50425
50426
50427
50428
50429
50430
50431
50432
50433
50434
50435
50436
50437
50438
50439
50440
50441
50442
50443
50444
50445
50446
50447
50448
50449
50450
50451
50452
50453
50454
50455
50456
50457
50458
50459
50460
50461
50462
50463
50464
50465
50466
50467
50468
50469
50470
50471
50472
50473
50474
50475
50476
50477
50478
50479
50480
50481
50482
50483
50484
50485
50486
50487
50488
50489
50490
50491
50492
50493
50494
50495
50496
50497
50498
50499
50500
50501
50502
50503
50504
50505
50506
50507
50508
50509
50510
50511
50512
50513
50514
50515
50516
50517
50518
50519
50520
50521
50522
50523
50524
50525
50526
50527
50528
50529
50530
50531
50532
50533
50534
50535
50536
50537
50538
50539
50540
50541
50542
50543
50544
50545
50546
50547
50548
50549
50550
50551
50552
50553
50554
50555
50556
50557
50558
50559
50560
50561
50562
50563
50564
50565
50566
50567
50568
50569
50570
50571
50572
50573
50574
50575
50576
50577
50578
50579
50580
50581
50582
50583
50584
50585
50586
50587
50588
50589
50590
50591
50592
50593
50594
50595
50596
50597
50598
50599
50600
50601
50602
50603
50604
50605
50606
50607
50608
50609
50610
50611
50612
50613
50614
50615
50616
50617
50618
50619
50620
50621
50622
50623
50624
50625
50626
50627
50628
50629
50630
50631
50632
50633
50634
50635
50636
50637
50638
50639
50640
50641
50642
50643
50644
50645
50646
50647
50648
50649
50650
50651
50652
50653
50654
50655
50656
50657
50658
50659
50660
50661
50662
50663
50664
50665
50666
50667
50668
50669
50670
50671
50672
50673
50674
50675
50676
50677
50678
50679
50680
50681
50682
50683
50684
50685
50686
50687
50688
50689
50690
50691
50692
50693
50694
50695
50696
50697
50698
50699
50700
50701
50702
50703
50704
50705
50706
50707
50708
50709
50710
50711
50712
50713
50714
50715
50716
50717
50718
50719
50720
50721
50722
50723
50724
50725
50726
50727
50728
50729
50730
50731
50732
50733
50734
50735
50736
50737
50738
50739
50740
50741
50742
50743
50744
50745
50746
50747
50748
50749
50750
50751
50752
50753
50754
50755
50756
50757
50758
50759
50760
50761
50762
50763
50764
50765
50766
50767
50768
50769
50770
50771
50772
50773
50774
50775
50776
50777
50778
50779
50780
50781
50782
50783
50784
50785
50786
50787
50788
50789
50790
50791
50792
50793
50794
50795
50796
50797
50798
50799
50800
50801
50802
50803
50804
50805
50806
50807
50808
50809
50810
50811
50812
50813
50814
50815
50816
50817
50818
50819
50820
50821
50822
50823
50824
50825
50826
50827
50828
50829
50830
50831
50832
50833
50834
50835
50836
50837
50838
50839
50840
50841
50842
50843
50844
50845
50846
50847
50848
50849
50850
50851
50852
50853
50854
50855
50856
50857
50858
50859
50860
50861
50862
50863
50864
50865
50866
50867
50868
50869
50870
50871
50872
50873
50874
50875
50876
50877
50878
50879
50880
50881
50882
50883
50884
50885
50886
50887
50888
50889
50890
50891
50892
50893
50894
50895
50896
50897
50898
50899
50900
50901
50902
50903
50904
50905
50906
50907
50908
50909
50910
50911
50912
50913
50914
50915
50916
50917
50918
50919
50920
50921
50922
50923
50924
50925
50926
50927
50928
50929
50930
50931
50932
50933
50934
50935
50936
50937
50938
50939
50940
50941
50942
50943
50944
50945
50946
50947
50948
50949
50950
50951
50952
50953
50954
50955
50956
50957
50958
50959
50960
50961
50962
50963
50964
50965
50966
50967
50968
50969
50970
50971
50972
50973
50974
50975
50976
50977
50978
50979
50980
50981
50982
50983
50984
50985
50986
50987
50988
50989
50990
50991
50992
50993
50994
50995
50996
50997
50998
50999
51000
51001
51002
51003
51004
51005
51006
51007
51008
51009
51010
51011
51012
51013
51014
51015
51016
51017
51018
51019
51020
51021
51022
51023
51024
51025
51026
51027
51028
51029
51030
51031
51032
51033
51034
51035
51036
51037
51038
51039
51040
51041
51042
51043
51044
51045
51046
51047
51048
51049
51050
51051
51052
51053
51054
51055
51056
51057
51058
51059
51060
51061
51062
51063
51064
51065
51066
51067
51068
51069
51070
51071
51072
51073
51074
51075
51076
51077
51078
51079
51080
51081
51082
51083
51084
51085
51086
51087
51088
51089
51090
51091
51092
51093
51094
51095
51096
51097
51098
51099
51100
51101
51102
51103
51104
51105
51106
51107
51108
51109
51110
51111
51112
51113
51114
51115
51116
51117
51118
51119
51120
51121
51122
51123
51124
51125
51126
51127
51128
51129
51130
51131
51132
51133
51134
51135
51136
51137
51138
51139
51140
51141
51142
51143
51144
51145
51146
51147
51148
51149
51150
51151
51152
51153
51154
51155
51156
51157
51158
51159
51160
51161
51162
51163
51164
51165
51166
51167
51168
51169
51170
51171
51172
51173
51174
51175
51176
51177
51178
51179
51180
51181
51182
51183
51184
51185
51186
51187
51188
51189
51190
51191
51192
51193
51194
51195
51196
51197
51198
51199
51200
51201
51202
51203
51204
51205
51206
51207
51208
51209
51210
51211
51212
51213
51214
51215
51216
51217
51218
51219
51220
51221
51222
51223
51224
51225
51226
51227
51228
51229
51230
51231
51232
51233
51234
51235
51236
51237
51238
51239
51240
51241
51242
51243
51244
51245
51246
51247
51248
51249
51250
51251
51252
51253
51254
51255
51256
51257
51258
51259
51260
51261
51262
51263
51264
51265
51266
51267
51268
51269
51270
51271
51272
51273
51274
51275
51276
51277
51278
51279
51280
51281
51282
51283
51284
51285
51286
51287
51288
51289
51290
51291
51292
51293
51294
51295
51296
51297
51298
51299
51300
51301
51302
51303
51304
51305
51306
51307
51308
51309
51310
51311
51312
51313
51314
51315
51316
51317
51318
51319
51320
51321
51322
51323
51324
51325
51326
51327
51328
51329
51330
51331
51332
51333
51334
51335
51336
51337
51338
51339
51340
51341
51342
51343
51344
51345
51346
51347
51348
51349
51350
51351
51352
51353
51354
51355
51356
51357
51358
51359
51360
51361
51362
51363
51364
51365
51366
51367
51368
51369
51370
51371
51372
51373
51374
51375
51376
51377
51378
51379
51380
51381
51382
51383
51384
51385
51386
51387
51388
51389
51390
51391
51392
51393
51394
51395
51396
51397
51398
51399
51400
51401
51402
51403
51404
51405
51406
51407
51408
51409
51410
51411
51412
51413
51414
51415
51416
51417
51418
51419
51420
51421
51422
51423
51424
51425
51426
51427
51428
51429
51430
51431
51432
51433
51434
51435
51436
51437
51438
51439
51440
51441
51442
51443
51444
51445
51446
51447
51448
51449
51450
51451
51452
51453
51454
51455
51456
51457
51458
51459
51460
51461
51462
51463
51464
51465
51466
51467
51468
51469
51470
51471
51472
51473
51474
51475
51476
51477
51478
51479
51480
51481
51482
51483
51484
51485
51486
51487
51488
51489
51490
51491
51492
51493
51494
51495
51496
51497
51498
51499
51500
51501
51502
51503
51504
51505
51506
51507
51508
51509
51510
51511
51512
51513
51514
51515
51516
51517
51518
51519
51520
51521
51522
51523
51524
51525
51526
51527
51528
51529
51530
51531
51532
51533
51534
51535
51536
51537
51538
51539
51540
51541
51542
51543
51544
51545
51546
51547
51548
51549
51550
51551
51552
51553
51554
51555
51556
51557
51558
51559
51560
51561
51562
51563
51564
51565
51566
51567
51568
51569
51570
51571
51572
51573
51574
51575
51576
51577
51578
51579
51580
51581
51582
51583
51584
51585
51586
51587
51588
51589
51590
51591
51592
51593
51594
51595
51596
51597
51598
51599
51600
51601
51602
51603
51604
51605
51606
51607
51608
51609
51610
51611
51612
51613
51614
51615
51616
51617
51618
51619
51620
51621
51622
51623
51624
51625
51626
51627
51628
51629
51630
51631
51632
51633
51634
51635
51636
51637
51638
51639
51640
51641
51642
51643
51644
51645
51646
51647
51648
51649
51650
51651
51652
51653
51654
51655
51656
51657
51658
51659
51660
51661
51662
51663
51664
51665
51666
51667
51668
51669
51670
51671
51672
51673
51674
51675
51676
51677
51678
51679
51680
51681
51682
51683
51684
51685
51686
51687
51688
51689
51690
51691
51692
51693
51694
51695
51696
51697
51698
51699
51700
51701
51702
51703
51704
51705
51706
51707
51708
51709
51710
51711
51712
51713
51714
51715
51716
51717
51718
51719
51720
51721
51722
51723
51724
51725
51726
51727
51728
51729
51730
51731
51732
51733
51734
51735
51736
51737
51738
51739
51740
51741
51742
51743
51744
51745
51746
51747
51748
51749
51750
51751
51752
51753
51754
51755
51756
51757
51758
51759
51760
51761
51762
51763
51764
51765
51766
51767
51768
51769
51770
51771
51772
51773
51774
51775
51776
51777
51778
51779
51780
51781
51782
51783
51784
51785
51786
51787
51788
51789
51790
51791
51792
51793
51794
51795
51796
51797
51798
51799
51800
51801
51802
51803
51804
51805
51806
51807
51808
51809
51810
51811
51812
51813
51814
51815
51816
51817
51818
51819
51820
51821
51822
51823
51824
51825
51826
51827
51828
51829
51830
51831
51832
51833
51834
51835
51836
51837
51838
51839
51840
51841
51842
51843
51844
51845
51846
51847
51848
51849
51850
51851
51852
51853
51854
51855
51856
51857
51858
51859
51860
51861
51862
51863
51864
51865
51866
51867
51868
51869
51870
51871
51872
51873
51874
51875
51876
51877
51878
51879
51880
51881
51882
51883
51884
51885
51886
51887
51888
51889
51890
51891
51892
51893
51894
51895
51896
51897
51898
51899
51900
51901
51902
51903
51904
51905
51906
51907
51908
51909
51910
51911
51912
51913
51914
51915
51916
51917
51918
51919
51920
51921
51922
51923
51924
51925
51926
51927
51928
51929
51930
51931
51932
51933
51934
51935
51936
51937
51938
51939
51940
51941
51942
51943
51944
51945
51946
51947
51948
51949
51950
51951
51952
51953
51954
51955
51956
51957
51958
51959
51960
51961
51962
51963
51964
51965
51966
51967
51968
51969
51970
51971
51972
51973
51974
51975
51976
51977
51978
51979
51980
51981
51982
51983
51984
51985
51986
51987
51988
51989
51990
51991
51992
51993
51994
51995
51996
51997
51998
51999
52000
52001
52002
52003
52004
52005
52006
52007
52008
52009
52010
52011
52012
52013
52014
52015
52016
52017
52018
52019
52020
52021
52022
52023
52024
52025
52026
52027
52028
52029
52030
52031
52032
52033
52034
52035
52036
52037
52038
52039
52040
52041
52042
52043
52044
52045
52046
52047
52048
52049
52050
52051
52052
52053
52054
52055
52056
52057
52058
52059
52060
52061
52062
52063
52064
52065
52066
52067
52068
52069
52070
52071
52072
52073
52074
52075
52076
52077
52078
52079
52080
52081
52082
52083
52084
52085
52086
52087
52088
52089
52090
52091
52092
52093
52094
52095
52096
52097
52098
52099
52100
52101
52102
52103
52104
52105
52106
52107
52108
52109
52110
52111
52112
52113
52114
52115
52116
52117
52118
52119
52120
52121
52122
52123
52124
52125
52126
52127
52128
52129
52130
52131
52132
52133
52134
52135
52136
52137
52138
52139
52140
52141
52142
52143
52144
52145
52146
52147
52148
52149
52150
52151
52152
52153
52154
52155
52156
52157
52158
52159
52160
52161
52162
52163
52164
52165
52166
52167
52168
52169
52170
52171
52172
52173
52174
52175
52176
52177
52178
52179
52180
52181
52182
52183
52184
52185
52186
52187
52188
52189
52190
52191
52192
52193
52194
52195
52196
52197
52198
52199
52200
52201
52202
52203
52204
52205
52206
52207
52208
52209
52210
52211
52212
52213
52214
52215
52216
52217
52218
52219
52220
52221
52222
52223
52224
52225
52226
52227
52228
52229
52230
52231
52232
52233
52234
52235
52236
52237
52238
52239
52240
52241
52242
52243
52244
52245
52246
52247
52248
52249
52250
52251
52252
52253
52254
52255
52256
52257
52258
52259
52260
52261
52262
52263
52264
52265
52266
52267
52268
52269
52270
52271
52272
52273
52274
52275
52276
52277
52278
52279
52280
52281
52282
52283
52284
52285
52286
52287
52288
52289
52290
52291
52292
52293
52294
52295
52296
52297
52298
52299
52300
52301
52302
52303
52304
52305
52306
52307
52308
52309
52310
52311
52312
52313
52314
52315
52316
52317
52318
52319
52320
52321
52322
52323
52324
52325
52326
52327
52328
52329
52330
52331
52332
52333
52334
52335
52336
52337
52338
52339
52340
52341
52342
52343
52344
52345
52346
52347
52348
52349
52350
52351
52352
52353
52354
52355
52356
52357
52358
52359
52360
52361
52362
52363
52364
52365
52366
52367
52368
52369
52370
52371
52372
52373
52374
52375
52376
52377
52378
52379
52380
52381
52382
52383
52384
52385
52386
52387
52388
52389
52390
52391
52392
52393
52394
52395
52396
52397
52398
52399
52400
52401
52402
52403
52404
52405
52406
52407
52408
52409
52410
52411
52412
52413
52414
52415
52416
52417
52418
52419
52420
52421
52422
52423
52424
52425
52426
52427
52428
52429
52430
52431
52432
52433
52434
52435
52436
52437
52438
52439
52440
52441
52442
52443
52444
52445
52446
52447
52448
52449
52450
52451
52452
52453
52454
52455
52456
52457
52458
52459
52460
52461
52462
52463
52464
52465
52466
52467
52468
52469
52470
52471
52472
52473
52474
52475
52476
52477
52478
52479
52480
52481
52482
52483
52484
52485
52486
52487
52488
52489
52490
52491
52492
52493
52494
52495
52496
52497
52498
52499
52500
52501
52502
52503
52504
52505
52506
52507
52508
52509
52510
52511
52512
52513
52514
52515
52516
52517
52518
52519
52520
52521
52522
52523
52524
52525
52526
52527
52528
52529
52530
52531
52532
52533
52534
52535
52536
52537
52538
52539
52540
52541
52542
52543
52544
52545
52546
52547
52548
52549
52550
52551
52552
52553
52554
52555
52556
52557
52558
52559
52560
52561
52562
52563
52564
52565
52566
52567
52568
52569
52570
52571
52572
52573
52574
52575
52576
52577
52578
52579
52580
52581
52582
52583
52584
52585
52586
52587
52588
52589
52590
52591
52592
52593
52594
52595
52596
52597
52598
52599
52600
52601
52602
52603
52604
52605
52606
52607
52608
52609
52610
52611
52612
52613
52614
52615
52616
52617
52618
52619
52620
52621
52622
52623
52624
52625
52626
52627
52628
52629
52630
52631
52632
52633
52634
52635
52636
52637
52638
52639
52640
52641
52642
52643
52644
52645
52646
52647
52648
52649
52650
52651
52652
52653
52654
52655
52656
52657
52658
52659
52660
52661
52662
52663
52664
52665
52666
52667
52668
52669
52670
52671
52672
52673
52674
52675
52676
52677
52678
52679
52680
52681
52682
52683
52684
52685
52686
52687
52688
52689
52690
52691
52692
52693
52694
52695
52696
52697
52698
52699
52700
52701
52702
52703
52704
52705
52706
52707
52708
52709
52710
52711
52712
52713
52714
52715
52716
52717
52718
52719
52720
52721
52722
52723
52724
52725
52726
52727
52728
52729
52730
52731
52732
52733
52734
52735
52736
52737
52738
52739
52740
52741
52742
52743
52744
52745
52746
52747
52748
52749
52750
52751
52752
52753
52754
52755
52756
52757
52758
52759
52760
52761
52762
52763
52764
52765
52766
52767
52768
52769
52770
52771
52772
52773
52774
52775
52776
52777
52778
52779
52780
52781
52782
52783
52784
52785
52786
52787
52788
52789
52790
52791
52792
52793
52794
52795
52796
52797
52798
52799
52800
52801
52802
52803
52804
52805
52806
52807
52808
52809
52810
52811
52812
52813
52814
52815
52816
52817
52818
52819
52820
52821
52822
52823
52824
52825
52826
52827
52828
52829
52830
52831
52832
52833
52834
52835
52836
52837
52838
52839
52840
52841
52842
52843
52844
52845
52846
52847
52848
52849
52850
52851
52852
52853
52854
52855
52856
52857
52858
52859
52860
52861
52862
52863
52864
52865
52866
52867
52868
52869
52870
52871
52872
52873
52874
52875
52876
52877
52878
52879
52880
52881
52882
52883
52884
52885
52886
52887
52888
52889
52890
52891
52892
52893
52894
52895
52896
52897
52898
52899
52900
52901
52902
52903
52904
52905
52906
52907
52908
52909
52910
52911
52912
52913
52914
52915
52916
52917
52918
52919
52920
52921
52922
52923
52924
52925
52926
52927
52928
52929
52930
52931
52932
52933
52934
52935
52936
52937
52938
52939
52940
52941
52942
52943
52944
52945
52946
52947
52948
52949
52950
52951
52952
52953
52954
52955
52956
52957
52958
52959
52960
52961
52962
52963
52964
52965
52966
52967
52968
52969
52970
52971
52972
52973
52974
52975
52976
52977
52978
52979
52980
52981
52982
52983
52984
52985
52986
52987
52988
52989
52990
52991
52992
52993
52994
52995
52996
52997
52998
52999
53000
53001
53002
53003
53004
53005
53006
53007
53008
53009
53010
53011
53012
53013
53014
53015
53016
53017
53018
53019
53020
53021
53022
53023
53024
53025
53026
53027
53028
53029
53030
53031
53032
53033
53034
53035
53036
53037
53038
53039
53040
53041
53042
53043
53044
53045
53046
53047
53048
53049
53050
53051
53052
53053
53054
53055
53056
53057
53058
53059
53060
53061
53062
53063
53064
53065
53066
53067
53068
53069
53070
53071
53072
53073
53074
53075
53076
53077
53078
53079
53080
53081
53082
53083
53084
53085
53086
53087
53088
53089
53090
53091
53092
53093
53094
53095
53096
53097
53098
53099
53100
53101
53102
53103
53104
53105
53106
53107
53108
53109
53110
53111
53112
53113
53114
53115
53116
53117
53118
53119
53120
53121
53122
53123
53124
53125
53126
53127
53128
53129
53130
53131
53132
53133
53134
53135
53136
53137
53138
53139
53140
53141
53142
53143
53144
53145
53146
53147
53148
53149
53150
53151
53152
53153
53154
53155
53156
53157
53158
53159
53160
53161
53162
53163
53164
53165
53166
53167
53168
53169
53170
53171
53172
53173
53174
53175
53176
53177
53178
53179
53180
53181
53182
53183
53184
53185
53186
53187
53188
53189
53190
53191
53192
53193
53194
53195
53196
53197
53198
53199
53200
53201
53202
53203
53204
53205
53206
53207
53208
53209
53210
53211
53212
53213
53214
53215
53216
53217
53218
53219
53220
53221
53222
53223
53224
53225
53226
53227
53228
53229
53230
53231
53232
53233
53234
53235
53236
53237
53238
53239
53240
53241
53242
53243
53244
53245
53246
53247
53248
53249
53250
53251
53252
53253
53254
53255
53256
53257
53258
53259
53260
53261
53262
53263
53264
53265
53266
53267
53268
53269
53270
53271
53272
53273
53274
53275
53276
53277
53278
53279
53280
53281
53282
53283
53284
53285
53286
53287
53288
53289
53290
53291
53292
53293
53294
53295
53296
53297
53298
53299
53300
53301
53302
53303
53304
53305
53306
53307
53308
53309
53310
53311
53312
53313
53314
53315
53316
53317
53318
53319
53320
53321
53322
53323
53324
53325
53326
53327
53328
53329
53330
53331
53332
53333
53334
53335
53336
53337
53338
53339
53340
53341
53342
53343
53344
53345
53346
53347
53348
53349
53350
53351
53352
53353
53354
53355
53356
53357
53358
53359
53360
53361
53362
53363
53364
53365
53366
53367
53368
53369
53370
53371
53372
53373
53374
53375
53376
53377
53378
53379
53380
53381
53382
53383
53384
53385
53386
53387
53388
53389
53390
53391
53392
53393
53394
53395
53396
53397
53398
53399
53400
53401
53402
53403
53404
53405
53406
53407
53408
53409
53410
53411
53412
53413
53414
53415
53416
53417
53418
53419
53420
53421
53422
53423
53424
53425
53426
53427
53428
53429
53430
53431
53432
53433
53434
53435
53436
53437
53438
53439
53440
53441
53442
53443
53444
53445
53446
53447
53448
53449
53450
53451
53452
53453
53454
53455
53456
53457
53458
53459
53460
53461
53462
53463
53464
53465
53466
53467
53468
53469
53470
53471
53472
53473
53474
53475
53476
53477
53478
53479
53480
53481
53482
53483
53484
53485
53486
53487
53488
53489
53490
53491
53492
53493
53494
53495
53496
53497
53498
53499
53500
53501
53502
53503
53504
53505
53506
53507
53508
53509
53510
53511
53512
53513
53514
53515
53516
53517
53518
53519
53520
53521
53522
53523
53524
53525
53526
53527
53528
53529
53530
53531
53532
53533
53534
53535
53536
53537
53538
53539
53540
53541
53542
53543
53544
53545
53546
53547
53548
53549
53550
53551
53552
53553
53554
53555
53556
53557
53558
53559
53560
53561
53562
53563
53564
53565
53566
53567
53568
53569
53570
53571
53572
53573
53574
53575
53576
53577
53578
53579
53580
53581
53582
53583
53584
53585
53586
53587
53588
53589
53590
53591
53592
53593
53594
53595
53596
53597
53598
53599
53600
53601
53602
53603
53604
53605
53606
53607
53608
53609
53610
53611
53612
53613
53614
53615
53616
53617
53618
53619
53620
53621
53622
53623
53624
53625
53626
53627
53628
53629
53630
53631
53632
53633
53634
53635
53636
53637
53638
53639
53640
53641
53642
53643
53644
53645
53646
53647
53648
53649
53650
53651
53652
53653
53654
53655
53656
53657
53658
53659
53660
53661
53662
53663
53664
53665
53666
53667
53668
53669
53670
53671
53672
53673
53674
53675
53676
53677
53678
53679
53680
53681
53682
53683
53684
53685
53686
53687
53688
53689
53690
53691
53692
53693
53694
53695
53696
53697
53698
53699
53700
53701
53702
53703
53704
53705
53706
53707
53708
53709
53710
53711
53712
53713
53714
53715
53716
53717
53718
53719
53720
53721
53722
53723
53724
53725
53726
53727
53728
53729
53730
53731
53732
53733
53734
53735
53736
53737
53738
53739
53740
53741
53742
53743
53744
53745
53746
53747
53748
53749
53750
53751
53752
53753
53754
53755
53756
53757
53758
53759
53760
53761
53762
53763
53764
53765
53766
53767
53768
53769
53770
53771
53772
53773
53774
53775
53776
53777
53778
53779
53780
53781
53782
53783
53784
53785
53786
53787
53788
53789
53790
53791
53792
53793
53794
53795
53796
53797
53798
53799
53800
53801
53802
53803
53804
53805
53806
53807
53808
53809
53810
53811
53812
53813
53814
53815
53816
53817
53818
53819
53820
53821
53822
53823
53824
53825
53826
53827
53828
53829
53830
53831
53832
53833
53834
53835
53836
53837
53838
53839
53840
53841
53842
53843
53844
53845
53846
53847
53848
53849
53850
53851
53852
53853
53854
53855
53856
53857
53858
53859
53860
53861
53862
53863
53864
53865
53866
53867
53868
53869
53870
53871
53872
53873
53874
53875
53876
53877
53878
53879
53880
53881
53882
53883
53884
53885
53886
53887
53888
53889
53890
53891
53892
53893
53894
53895
53896
53897
53898
53899
53900
53901
53902
53903
53904
53905
53906
53907
53908
53909
53910
53911
53912
53913
53914
53915
53916
53917
53918
53919
53920
53921
53922
53923
53924
53925
53926
53927
53928
53929
53930
53931
53932
53933
53934
53935
53936
53937
53938
53939
53940
53941
53942
53943
53944
53945
53946
53947
53948
53949
53950
53951
53952
53953
53954
53955
53956
53957
53958
53959
53960
53961
53962
53963
53964
53965
53966
53967
53968
53969
53970
53971
53972
53973
53974
53975
53976
53977
53978
53979
53980
53981
53982
53983
53984
53985
53986
53987
53988
53989
53990
53991
53992
53993
53994
53995
53996
53997
53998
53999
54000
54001
54002
54003
54004
54005
54006
54007
54008
54009
54010
54011
54012
54013
54014
54015
54016
54017
54018
54019
54020
54021
54022
54023
54024
54025
54026
54027
54028
54029
54030
54031
54032
54033
54034
54035
54036
54037
54038
54039
54040
54041
54042
54043
54044
54045
54046
54047
54048
54049
54050
54051
54052
54053
54054
54055
54056
54057
54058
54059
54060
54061
54062
54063
54064
54065
54066
54067
54068
54069
54070
54071
54072
54073
54074
54075
54076
54077
54078
54079
54080
54081
54082
54083
54084
54085
54086
54087
54088
54089
54090
54091
54092
54093
54094
54095
54096
54097
54098
54099
54100
54101
54102
54103
54104
54105
54106
54107
54108
54109
54110
54111
54112
54113
54114
54115
54116
54117
54118
54119
54120
54121
54122
54123
54124
54125
54126
54127
54128
54129
54130
54131
54132
54133
54134
54135
54136
54137
54138
54139
54140
54141
54142
54143
54144
54145
54146
54147
54148
54149
54150
54151
54152
54153
54154
54155
54156
54157
54158
54159
54160
54161
54162
54163
54164
54165
54166
54167
54168
54169
54170
54171
54172
54173
54174
54175
54176
54177
54178
54179
54180
54181
54182
54183
54184
54185
54186
54187
54188
54189
54190
54191
54192
54193
54194
54195
54196
54197
54198
54199
54200
54201
54202
54203
54204
54205
54206
54207
54208
54209
54210
54211
54212
54213
54214
54215
54216
54217
54218
54219
54220
54221
54222
54223
54224
54225
54226
54227
54228
54229
54230
54231
54232
54233
54234
54235
54236
54237
54238
54239
54240
54241
54242
54243
54244
54245
54246
54247
54248
54249
54250
54251
54252
54253
54254
54255
54256
54257
54258
54259
54260
54261
54262
54263
54264
54265
54266
54267
54268
54269
54270
54271
54272
54273
54274
54275
54276
54277
54278
54279
54280
54281
54282
54283
54284
54285
54286
54287
54288
54289
54290
54291
54292
54293
54294
54295
54296
54297
54298
54299
54300
54301
54302
54303
54304
54305
54306
54307
54308
54309
54310
54311
54312
54313
54314
54315
54316
54317
54318
54319
54320
54321
54322
54323
54324
54325
54326
54327
54328
54329
54330
54331
54332
54333
54334
54335
54336
54337
54338
54339
54340
54341
54342
54343
54344
54345
54346
54347
54348
54349
54350
54351
54352
54353
54354
54355
54356
54357
54358
54359
54360
54361
54362
54363
54364
54365
54366
54367
54368
54369
54370
54371
54372
54373
54374
54375
54376
54377
54378
54379
54380
54381
54382
54383
54384
54385
54386
54387
54388
54389
54390
54391
54392
54393
54394
54395
54396
54397
54398
54399
54400
54401
54402
54403
54404
54405
54406
54407
54408
54409
54410
54411
54412
54413
54414
54415
54416
54417
54418
54419
54420
54421
54422
54423
54424
54425
54426
54427
54428
54429
54430
54431
54432
54433
54434
54435
54436
54437
54438
54439
54440
54441
54442
54443
54444
54445
54446
54447
54448
54449
54450
54451
54452
54453
54454
54455
54456
54457
54458
54459
54460
54461
54462
54463
54464
54465
54466
54467
54468
54469
54470
54471
54472
54473
54474
54475
54476
54477
54478
54479
54480
54481
54482
54483
54484
54485
54486
54487
54488
54489
54490
54491
54492
54493
54494
54495
54496
54497
54498
54499
54500
54501
54502
54503
54504
54505
54506
54507
54508
54509
54510
54511
54512
54513
54514
54515
54516
54517
54518
54519
54520
54521
54522
54523
54524
54525
54526
54527
54528
54529
54530
54531
54532
54533
54534
54535
54536
54537
54538
54539
54540
54541
54542
54543
54544
54545
54546
54547
54548
54549
54550
54551
54552
54553
54554
54555
54556
54557
54558
54559
54560
54561
54562
54563
54564
54565
54566
54567
54568
54569
54570
54571
54572
54573
54574
54575
54576
54577
54578
54579
54580
54581
54582
54583
54584
54585
54586
54587
54588
54589
54590
54591
54592
54593
54594
54595
54596
54597
54598
54599
54600
54601
54602
54603
54604
54605
54606
54607
54608
54609
54610
54611
54612
54613
54614
54615
54616
54617
54618
54619
54620
54621
54622
54623
54624
54625
54626
54627
54628
54629
54630
54631
54632
54633
54634
54635
54636
54637
54638
54639
54640
54641
54642
54643
54644
54645
54646
54647
54648
54649
54650
54651
54652
54653
54654
54655
54656
54657
54658
54659
54660
54661
54662
54663
54664
54665
54666
54667
54668
54669
54670
54671
54672
54673
54674
54675
54676
54677
54678
54679
54680
54681
54682
54683
54684
54685
54686
54687
54688
54689
54690
54691
54692
54693
54694
54695
54696
54697
54698
54699
54700
54701
54702
54703
54704
54705
54706
54707
54708
54709
54710
54711
54712
54713
54714
54715
54716
54717
54718
54719
54720
54721
54722
54723
54724
54725
54726
54727
54728
54729
54730
54731
54732
54733
54734
54735
54736
54737
54738
54739
54740
54741
54742
54743
54744
54745
54746
54747
54748
54749
54750
54751
54752
54753
54754
54755
54756
54757
54758
54759
54760
54761
54762
54763
54764
54765
54766
54767
54768
54769
54770
54771
54772
54773
54774
54775
54776
54777
54778
54779
54780
54781
54782
54783
54784
54785
54786
54787
54788
54789
54790
54791
54792
54793
54794
54795
54796
54797
54798
54799
54800
54801
54802
54803
54804
54805
54806
54807
54808
54809
54810
54811
54812
54813
54814
54815
54816
54817
54818
54819
54820
54821
54822
54823
54824
54825
54826
54827
54828
54829
54830
54831
54832
54833
54834
54835
54836
54837
54838
54839
54840
54841
54842
54843
54844
54845
54846
54847
54848
54849
54850
54851
54852
54853
54854
54855
54856
54857
54858
54859
54860
54861
54862
54863
54864
54865
54866
54867
54868
54869
54870
54871
54872
54873
54874
54875
54876
54877
54878
54879
54880
54881
54882
54883
54884
54885
54886
54887
54888
54889
54890
54891
54892
54893
54894
54895
54896
54897
54898
54899
54900
54901
54902
54903
54904
54905
54906
54907
54908
54909
54910
54911
54912
54913
54914
54915
54916
54917
54918
54919
54920
54921
54922
54923
54924
54925
54926
54927
54928
54929
54930
54931
54932
54933
54934
54935
54936
54937
54938
54939
54940
54941
54942
54943
54944
54945
54946
54947
54948
54949
54950
54951
54952
54953
54954
54955
54956
54957
54958
54959
54960
54961
54962
54963
54964
54965
54966
54967
54968
54969
54970
54971
54972
54973
54974
54975
54976
54977
54978
54979
54980
54981
54982
54983
54984
54985
54986
54987
54988
54989
54990
54991
54992
54993
54994
54995
54996
54997
54998
54999
55000
55001
55002
55003
55004
55005
55006
55007
55008
55009
55010
55011
55012
55013
55014
55015
55016
55017
55018
55019
55020
55021
55022
55023
55024
55025
55026
55027
55028
55029
55030
55031
55032
55033
55034
55035
55036
55037
55038
55039
55040
55041
55042
55043
55044
55045
55046
55047
55048
55049
55050
55051
55052
55053
55054
55055
55056
55057
55058
55059
55060
55061
55062
55063
55064
55065
55066
55067
55068
55069
55070
55071
55072
55073
55074
55075
55076
55077
55078
55079
55080
55081
55082
55083
55084
55085
55086
55087
55088
55089
55090
55091
55092
55093
55094
55095
55096
55097
55098
55099
55100
55101
55102
55103
55104
55105
55106
55107
55108
55109
55110
55111
55112
55113
55114
55115
55116
55117
55118
55119
55120
55121
55122
55123
55124
55125
55126
55127
55128
55129
55130
55131
55132
55133
55134
55135
55136
55137
55138
55139
55140
55141
55142
55143
55144
55145
55146
55147
55148
55149
55150
55151
55152
55153
55154
55155
55156
55157
55158
55159
55160
55161
55162
55163
55164
55165
55166
55167
55168
55169
55170
55171
55172
55173
55174
55175
55176
55177
55178
55179
55180
55181
55182
55183
55184
55185
55186
55187
55188
55189
55190
55191
55192
55193
55194
55195
55196
55197
55198
55199
55200
55201
55202
55203
55204
55205
55206
55207
55208
55209
55210
55211
55212
55213
55214
55215
55216
55217
55218
55219
55220
55221
55222
55223
55224
55225
55226
55227
55228
55229
55230
55231
55232
55233
55234
55235
55236
55237
55238
55239
55240
55241
55242
55243
55244
55245
55246
55247
55248
55249
55250
55251
55252
55253
55254
55255
55256
55257
55258
55259
55260
55261
55262
55263
55264
55265
55266
55267
55268
55269
55270
55271
55272
55273
55274
55275
55276
55277
55278
55279
55280
55281
55282
55283
55284
55285
55286
55287
55288
55289
55290
55291
55292
55293
55294
55295
55296
55297
55298
55299
55300
55301
55302
55303
55304
55305
55306
55307
55308
55309
55310
55311
55312
55313
55314
55315
55316
55317
55318
55319
55320
55321
55322
55323
55324
55325
55326
55327
55328
55329
55330
55331
55332
55333
55334
55335
55336
55337
55338
55339
55340
55341
55342
55343
55344
55345
55346
55347
55348
55349
55350
55351
55352
55353
55354
55355
55356
55357
55358
55359
55360
55361
55362
55363
55364
55365
55366
55367
55368
55369
55370
55371
55372
55373
55374
55375
55376
55377
55378
55379
55380
55381
55382
55383
55384
55385
55386
55387
55388
55389
55390
55391
55392
55393
55394
55395
55396
55397
55398
55399
55400
55401
55402
55403
55404
55405
55406
55407
55408
55409
55410
55411
55412
55413
55414
55415
55416
55417
55418
55419
55420
55421
55422
55423
55424
55425
55426
55427
55428
55429
55430
55431
55432
55433
55434
55435
55436
55437
55438
55439
55440
55441
55442
55443
55444
55445
55446
55447
55448
55449
55450
55451
55452
55453
55454
55455
55456
55457
55458
55459
55460
55461
55462
55463
55464
55465
55466
55467
55468
55469
55470
55471
55472
55473
55474
55475
55476
55477
55478
55479
55480
55481
55482
55483
55484
55485
55486
55487
55488
55489
55490
55491
55492
55493
55494
55495
55496
55497
55498
55499
55500
55501
55502
55503
55504
55505
55506
55507
55508
55509
55510
55511
55512
55513
55514
55515
55516
55517
55518
55519
55520
55521
55522
55523
55524
55525
55526
55527
55528
55529
55530
55531
55532
55533
55534
55535
55536
55537
55538
55539
55540
55541
55542
55543
55544
55545
55546
55547
55548
55549
55550
55551
55552
55553
55554
55555
55556
55557
55558
55559
55560
55561
55562
55563
55564
55565
55566
55567
55568
55569
55570
55571
55572
55573
55574
55575
55576
55577
55578
55579
55580
55581
55582
55583
55584
55585
55586
55587
55588
55589
55590
55591
55592
55593
55594
55595
55596
55597
55598
55599
55600
55601
55602
55603
55604
55605
55606
55607
55608
55609
55610
55611
55612
55613
55614
55615
55616
55617
55618
55619
55620
55621
55622
55623
55624
55625
55626
55627
55628
55629
55630
55631
55632
55633
55634
55635
55636
55637
55638
55639
55640
55641
55642
55643
55644
55645
55646
55647
55648
55649
55650
55651
55652
55653
55654
55655
55656
55657
55658
55659
55660
55661
55662
55663
55664
55665
55666
55667
55668
55669
55670
55671
55672
55673
55674
55675
55676
55677
55678
55679
55680
55681
55682
55683
55684
55685
55686
55687
55688
55689
55690
55691
55692
55693
55694
55695
55696
55697
55698
55699
55700
55701
55702
55703
55704
55705
55706
55707
55708
55709
55710
55711
55712
55713
55714
55715
55716
55717
55718
55719
55720
55721
55722
55723
55724
55725
55726
55727
55728
55729
55730
55731
55732
55733
55734
55735
55736
55737
55738
55739
55740
55741
55742
55743
55744
55745
55746
55747
55748
55749
55750
55751
55752
55753
55754
55755
55756
55757
55758
55759
55760
55761
55762
55763
55764
55765
55766
55767
55768
55769
55770
55771
55772
55773
55774
55775
55776
55777
55778
55779
55780
55781
55782
55783
55784
55785
55786
55787
55788
55789
55790
55791
55792
55793
55794
55795
55796
55797
55798
55799
55800
55801
55802
55803
55804
55805
55806
55807
55808
55809
55810
55811
55812
55813
55814
55815
55816
55817
55818
55819
55820
55821
55822
55823
55824
55825
55826
55827
55828
55829
55830
55831
55832
55833
55834
55835
55836
55837
55838
55839
55840
55841
55842
55843
55844
55845
55846
55847
55848
55849
55850
55851
55852
55853
55854
55855
55856
55857
55858
55859
55860
55861
55862
55863
55864
55865
55866
55867
55868
55869
55870
55871
55872
55873
55874
55875
55876
55877
55878
55879
55880
55881
55882
55883
55884
55885
55886
55887
55888
55889
55890
55891
55892
55893
55894
55895
55896
55897
55898
55899
55900
55901
55902
55903
55904
55905
55906
55907
55908
55909
55910
55911
55912
55913
55914
55915
55916
55917
55918
55919
55920
55921
55922
55923
55924
55925
55926
55927
55928
55929
55930
55931
55932
55933
55934
55935
55936
55937
55938
55939
55940
55941
55942
55943
55944
55945
55946
55947
55948
55949
55950
55951
55952
55953
55954
55955
55956
55957
55958
55959
55960
55961
55962
55963
55964
55965
55966
55967
55968
55969
55970
55971
55972
55973
55974
55975
55976
55977
55978
55979
55980
55981
55982
55983
55984
55985
55986
55987
55988
55989
55990
55991
55992
55993
55994
55995
55996
55997
55998
55999
56000
56001
56002
56003
56004
56005
56006
56007
56008
56009
56010
56011
56012
56013
56014
56015
56016
56017
56018
56019
56020
56021
56022
56023
56024
56025
56026
56027
56028
56029
56030
56031
56032
56033
56034
56035
56036
56037
56038
56039
56040
56041
56042
56043
56044
56045
56046
56047
56048
56049
56050
56051
56052
56053
56054
56055
56056
56057
56058
56059
56060
56061
56062
56063
56064
56065
56066
56067
56068
56069
56070
56071
56072
56073
56074
56075
56076
56077
56078
56079
56080
56081
56082
56083
56084
56085
56086
56087
56088
56089
56090
56091
56092
56093
56094
56095
56096
56097
56098
56099
56100
56101
56102
56103
56104
56105
56106
56107
56108
56109
56110
56111
56112
56113
56114
56115
56116
56117
56118
56119
56120
56121
56122
56123
56124
56125
56126
56127
56128
56129
56130
56131
56132
56133
56134
56135
56136
56137
56138
56139
56140
56141
56142
56143
56144
56145
56146
56147
56148
56149
56150
56151
56152
56153
56154
56155
56156
56157
56158
56159
56160
56161
56162
56163
56164
56165
56166
56167
56168
56169
56170
56171
56172
56173
56174
56175
56176
56177
56178
56179
56180
56181
56182
56183
56184
56185
56186
56187
56188
56189
56190
56191
56192
56193
56194
56195
56196
56197
56198
56199
56200
56201
56202
56203
56204
56205
56206
56207
56208
56209
56210
56211
56212
56213
56214
56215
56216
56217
56218
56219
56220
56221
56222
56223
56224
56225
56226
56227
56228
56229
56230
56231
56232
56233
56234
56235
56236
56237
56238
56239
56240
56241
56242
56243
56244
56245
56246
56247
56248
56249
56250
56251
56252
56253
56254
56255
56256
56257
56258
56259
56260
56261
56262
56263
56264
56265
56266
56267
56268
56269
56270
56271
56272
56273
56274
56275
56276
56277
56278
56279
56280
56281
56282
56283
56284
56285
56286
56287
56288
56289
56290
56291
56292
56293
56294
56295
56296
56297
56298
56299
56300
56301
56302
56303
56304
56305
56306
56307
56308
56309
56310
56311
56312
56313
56314
56315
56316
56317
56318
56319
56320
56321
56322
56323
56324
56325
56326
56327
56328
56329
56330
56331
56332
56333
56334
56335
56336
56337
56338
56339
56340
56341
56342
56343
56344
56345
56346
56347
56348
56349
56350
56351
56352
56353
56354
56355
56356
56357
56358
56359
56360
56361
56362
56363
56364
56365
56366
56367
56368
56369
56370
56371
56372
56373
56374
56375
56376
56377
56378
56379
56380
56381
56382
56383
56384
56385
56386
56387
56388
56389
56390
56391
56392
56393
56394
56395
56396
56397
56398
56399
56400
56401
56402
56403
56404
56405
56406
56407
56408
56409
56410
56411
56412
56413
56414
56415
56416
56417
56418
56419
56420
56421
56422
56423
56424
56425
56426
56427
56428
56429
56430
56431
56432
56433
56434
56435
56436
56437
56438
56439
56440
56441
56442
56443
56444
56445
56446
56447
56448
56449
56450
56451
56452
56453
56454
56455
56456
56457
56458
56459
56460
56461
56462
56463
56464
56465
56466
56467
56468
56469
56470
56471
56472
56473
56474
56475
56476
56477
56478
56479
56480
56481
56482
56483
56484
56485
56486
56487
56488
56489
56490
56491
56492
56493
56494
56495
56496
56497
56498
56499
56500
56501
56502
56503
56504
56505
56506
56507
56508
56509
56510
56511
56512
56513
56514
56515
56516
56517
56518
56519
56520
56521
56522
56523
56524
56525
56526
56527
56528
56529
56530
56531
56532
56533
56534
56535
56536
56537
56538
56539
56540
56541
56542
56543
56544
56545
56546
56547
56548
56549
56550
56551
56552
56553
56554
56555
56556
56557
56558
56559
56560
56561
56562
56563
56564
56565
56566
56567
56568
56569
56570
56571
56572
56573
56574
56575
56576
56577
56578
56579
56580
56581
56582
56583
56584
56585
56586
56587
56588
56589
56590
56591
56592
56593
56594
56595
56596
56597
56598
56599
56600
56601
56602
56603
56604
56605
56606
56607
56608
56609
56610
56611
56612
56613
56614
56615
56616
56617
56618
56619
56620
56621
56622
56623
56624
56625
56626
56627
56628
56629
56630
56631
56632
56633
56634
56635
56636
56637
56638
56639
56640
56641
56642
56643
56644
56645
56646
56647
56648
56649
56650
56651
56652
56653
56654
56655
56656
56657
56658
56659
56660
56661
56662
56663
56664
56665
56666
56667
56668
56669
56670
56671
56672
56673
56674
56675
56676
56677
56678
56679
56680
56681
56682
56683
56684
56685
56686
56687
56688
56689
56690
56691
56692
56693
56694
56695
56696
56697
56698
56699
56700
56701
56702
56703
56704
56705
56706
56707
56708
56709
56710
56711
56712
56713
56714
56715
56716
56717
56718
56719
56720
56721
56722
56723
56724
56725
56726
56727
56728
56729
56730
56731
56732
56733
56734
56735
56736
56737
56738
56739
56740
56741
56742
56743
56744
56745
56746
56747
56748
56749
56750
56751
56752
56753
56754
56755
56756
56757
56758
56759
56760
56761
56762
56763
56764
56765
56766
56767
56768
56769
56770
56771
56772
56773
56774
56775
56776
56777
56778
56779
56780
56781
56782
56783
56784
56785
56786
56787
56788
56789
56790
56791
56792
56793
56794
56795
56796
56797
56798
56799
56800
56801
56802
56803
56804
56805
56806
56807
56808
56809
56810
56811
56812
56813
56814
56815
56816
56817
56818
56819
56820
56821
56822
56823
56824
56825
56826
56827
56828
56829
56830
56831
56832
56833
56834
56835
56836
56837
56838
56839
56840
56841
56842
56843
56844
56845
56846
56847
56848
56849
56850
56851
56852
56853
56854
56855
56856
56857
56858
56859
56860
56861
56862
56863
56864
56865
56866
56867
56868
56869
56870
56871
56872
56873
56874
56875
56876
56877
56878
56879
56880
56881
56882
56883
56884
56885
56886
56887
56888
56889
56890
56891
56892
56893
56894
56895
56896
56897
56898
56899
56900
56901
56902
56903
56904
56905
56906
56907
56908
56909
56910
56911
56912
56913
56914
56915
56916
56917
56918
56919
56920
56921
56922
56923
56924
56925
56926
56927
56928
56929
56930
56931
56932
56933
56934
56935
56936
56937
56938
56939
56940
56941
56942
56943
56944
56945
56946
56947
56948
56949
56950
56951
56952
56953
56954
56955
56956
56957
56958
56959
56960
56961
56962
56963
56964
56965
56966
56967
56968
56969
56970
56971
56972
56973
56974
56975
56976
56977
56978
56979
56980
56981
56982
56983
56984
56985
56986
56987
56988
56989
56990
56991
56992
56993
56994
56995
56996
56997
56998
56999
57000
57001
57002
57003
57004
57005
57006
57007
57008
57009
57010
57011
57012
57013
57014
57015
57016
57017
57018
57019
57020
57021
57022
57023
57024
57025
57026
57027
57028
57029
57030
57031
57032
57033
57034
57035
57036
57037
57038
57039
57040
57041
57042
57043
57044
57045
57046
57047
57048
57049
57050
57051
57052
57053
57054
57055
57056
57057
57058
57059
57060
57061
57062
57063
57064
57065
57066
57067
57068
57069
57070
57071
57072
57073
57074
57075
57076
57077
57078
57079
57080
57081
57082
57083
57084
57085
57086
57087
57088
57089
57090
57091
57092
57093
57094
57095
57096
57097
57098
57099
57100
57101
57102
57103
57104
57105
57106
57107
57108
57109
57110
57111
57112
57113
57114
57115
57116
57117
57118
57119
57120
57121
57122
57123
57124
57125
57126
57127
57128
57129
57130
57131
57132
57133
57134
57135
57136
57137
57138
57139
57140
57141
57142
57143
57144
57145
57146
57147
57148
57149
57150
57151
57152
57153
57154
57155
57156
57157
57158
57159
57160
57161
57162
57163
57164
57165
57166
57167
57168
57169
57170
57171
57172
57173
57174
57175
57176
57177
57178
57179
57180
57181
57182
57183
57184
57185
57186
57187
57188
57189
57190
57191
57192
57193
57194
57195
57196
57197
57198
57199
57200
57201
57202
57203
57204
57205
57206
57207
57208
57209
57210
57211
57212
57213
57214
57215
57216
57217
57218
57219
57220
57221
57222
57223
57224
57225
57226
57227
57228
57229
57230
57231
57232
57233
57234
57235
57236
57237
57238
57239
57240
57241
57242
57243
57244
57245
57246
57247
57248
57249
57250
57251
57252
57253
57254
57255
57256
57257
57258
57259
57260
57261
57262
57263
57264
57265
57266
57267
57268
57269
57270
57271
57272
57273
57274
57275
57276
57277
57278
57279
57280
57281
57282
57283
57284
57285
57286
57287
57288
57289
57290
57291
57292
57293
57294
57295
57296
57297
57298
57299
57300
57301
57302
57303
57304
57305
57306
57307
57308
57309
57310
57311
57312
57313
57314
57315
57316
57317
57318
57319
57320
57321
57322
57323
57324
57325
57326
57327
57328
57329
57330
57331
57332
57333
57334
57335
57336
57337
57338
57339
57340
57341
57342
57343
57344
57345
57346
57347
57348
57349
57350
57351
57352
57353
57354
57355
57356
57357
57358
57359
57360
57361
57362
57363
57364
57365
57366
57367
57368
57369
57370
57371
57372
57373
57374
57375
57376
57377
57378
57379
57380
57381
57382
57383
57384
57385
57386
57387
57388
57389
57390
57391
57392
57393
57394
57395
57396
57397
57398
57399
57400
57401
57402
57403
57404
57405
57406
57407
57408
57409
57410
57411
57412
57413
57414
57415
57416
57417
57418
57419
57420
57421
57422
57423
57424
57425
57426
57427
57428
57429
57430
57431
57432
57433
57434
57435
57436
57437
57438
57439
57440
57441
57442
57443
57444
57445
57446
57447
57448
57449
57450
57451
57452
57453
57454
57455
57456
57457
57458
57459
57460
57461
57462
57463
57464
57465
57466
57467
57468
57469
57470
57471
57472
57473
57474
57475
57476
57477
57478
57479
57480
57481
57482
57483
57484
57485
57486
57487
57488
57489
57490
57491
57492
57493
57494
57495
57496
57497
57498
57499
57500
57501
57502
57503
57504
57505
57506
57507
57508
57509
57510
57511
57512
57513
57514
57515
57516
57517
57518
57519
57520
57521
57522
57523
57524
57525
57526
57527
57528
57529
57530
57531
57532
57533
57534
57535
57536
57537
57538
57539
57540
57541
57542
57543
57544
57545
57546
57547
57548
57549
57550
57551
57552
57553
57554
57555
57556
57557
57558
57559
57560
57561
57562
57563
57564
57565
57566
57567
57568
57569
57570
57571
57572
57573
57574
57575
57576
57577
57578
57579
57580
57581
57582
57583
57584
57585
57586
57587
57588
57589
57590
57591
57592
57593
57594
57595
57596
57597
57598
57599
57600
57601
57602
57603
57604
57605
57606
57607
57608
57609
57610
57611
57612
57613
57614
57615
57616
57617
57618
57619
57620
57621
57622
57623
57624
57625
57626
57627
57628
57629
57630
57631
57632
57633
57634
57635
57636
57637
57638
57639
57640
57641
57642
57643
57644
57645
57646
57647
57648
57649
57650
57651
57652
57653
57654
57655
57656
57657
57658
57659
57660
57661
57662
57663
57664
57665
57666
57667
57668
57669
57670
57671
57672
57673
57674
57675
57676
57677
57678
57679
57680
57681
57682
57683
57684
57685
57686
57687
57688
57689
57690
57691
57692
57693
57694
57695
57696
57697
57698
57699
57700
57701
57702
57703
57704
57705
57706
57707
57708
57709
57710
57711
57712
57713
57714
57715
57716
57717
57718
57719
57720
57721
57722
57723
57724
57725
57726
57727
57728
57729
57730
57731
57732
57733
57734
57735
57736
57737
57738
57739
57740
57741
57742
57743
57744
57745
57746
57747
57748
57749
57750
57751
57752
57753
57754
57755
57756
57757
57758
57759
57760
57761
57762
57763
57764
57765
57766
57767
57768
57769
57770
57771
57772
57773
57774
57775
57776
57777
57778
57779
57780
57781
57782
57783
57784
57785
57786
57787
57788
57789
57790
57791
57792
57793
57794
57795
57796
57797
57798
57799
57800
57801
57802
57803
57804
57805
57806
57807
57808
57809
57810
57811
57812
57813
57814
57815
57816
57817
57818
57819
57820
57821
57822
57823
57824
57825
57826
57827
57828
57829
57830
57831
57832
57833
57834
57835
57836
57837
57838
57839
57840
57841
57842
57843
57844
57845
57846
57847
57848
57849
57850
57851
57852
57853
57854
57855
57856
57857
57858
57859
57860
57861
57862
57863
57864
57865
57866
57867
57868
57869
57870
57871
57872
57873
57874
57875
57876
57877
57878
57879
57880
57881
57882
57883
57884
57885
57886
57887
57888
57889
57890
57891
57892
57893
57894
57895
57896
57897
57898
57899
57900
57901
57902
57903
57904
57905
57906
57907
57908
57909
57910
57911
57912
57913
57914
57915
57916
57917
57918
57919
57920
57921
57922
57923
57924
57925
57926
57927
57928
57929
57930
57931
57932
57933
57934
57935
57936
57937
57938
57939
57940
57941
57942
57943
57944
57945
57946
57947
57948
57949
57950
57951
57952
57953
57954
57955
57956
57957
57958
57959
57960
57961
57962
57963
57964
57965
57966
57967
57968
57969
57970
57971
57972
57973
57974
57975
57976
57977
57978
57979
57980
57981
57982
57983
57984
57985
57986
57987
57988
57989
57990
57991
57992
57993
57994
57995
57996
57997
57998
57999
58000
58001
58002
58003
58004
58005
58006
58007
58008
58009
58010
58011
58012
58013
58014
58015
58016
58017
58018
58019
58020
58021
58022
58023
58024
58025
58026
58027
58028
58029
58030
58031
58032
58033
58034
58035
58036
58037
58038
58039
58040
58041
58042
58043
58044
58045
58046
58047
58048
58049
58050
58051
58052
58053
58054
58055
58056
58057
58058
58059
58060
58061
58062
58063
58064
58065
58066
58067
58068
58069
58070
58071
58072
58073
58074
58075
58076
58077
58078
58079
58080
58081
58082
58083
58084
58085
58086
58087
58088
58089
58090
58091
58092
58093
58094
58095
58096
58097
58098
58099
58100
58101
58102
58103
58104
58105
58106
58107
58108
58109
58110
58111
58112
58113
58114
58115
58116
58117
58118
58119
58120
58121
58122
58123
58124
58125
58126
58127
58128
58129
58130
58131
58132
58133
58134
58135
58136
58137
58138
58139
58140
58141
58142
58143
58144
58145
58146
58147
58148
58149
58150
58151
58152
58153
58154
58155
58156
58157
58158
58159
58160
58161
58162
58163
58164
58165
58166
58167
58168
58169
58170
58171
58172
58173
58174
58175
58176
58177
58178
58179
58180
58181
58182
58183
58184
58185
58186
58187
58188
58189
58190
58191
58192
58193
58194
58195
58196
58197
58198
58199
58200
58201
58202
58203
58204
58205
58206
58207
58208
58209
58210
58211
58212
58213
58214
58215
58216
58217
58218
58219
58220
58221
58222
58223
58224
58225
58226
58227
58228
58229
58230
58231
58232
58233
58234
58235
58236
58237
58238
58239
58240
58241
58242
58243
58244
58245
58246
58247
58248
58249
58250
58251
58252
58253
58254
58255
58256
58257
58258
58259
58260
58261
58262
58263
58264
58265
58266
58267
58268
58269
58270
58271
58272
58273
58274
58275
58276
58277
58278
58279
58280
58281
58282
58283
58284
58285
58286
58287
58288
58289
58290
58291
58292
58293
58294
58295
58296
58297
58298
58299
58300
58301
58302
58303
58304
58305
58306
58307
58308
58309
58310
58311
58312
58313
58314
58315
58316
58317
58318
58319
58320
58321
58322
58323
58324
58325
58326
58327
58328
58329
58330
58331
58332
58333
58334
58335
58336
58337
58338
58339
58340
58341
58342
58343
58344
58345
58346
58347
58348
58349
58350
58351
58352
58353
58354
58355
58356
58357
58358
58359
58360
58361
58362
58363
58364
58365
58366
58367
58368
58369
58370
58371
58372
58373
58374
58375
58376
58377
58378
58379
58380
58381
58382
58383
58384
58385
58386
58387
58388
58389
58390
58391
58392
58393
58394
58395
58396
58397
58398
58399
58400
58401
58402
58403
58404
58405
58406
58407
58408
58409
58410
58411
58412
58413
58414
58415
58416
58417
58418
58419
58420
58421
58422
58423
58424
58425
58426
58427
58428
58429
58430
58431
58432
58433
58434
58435
58436
58437
58438
58439
58440
58441
58442
58443
58444
58445
58446
58447
58448
58449
58450
58451
58452
58453
58454
58455
58456
58457
58458
58459
58460
58461
58462
58463
58464
58465
58466
58467
58468
58469
58470
58471
58472
58473
58474
58475
58476
58477
58478
58479
58480
58481
58482
58483
58484
58485
58486
58487
58488
58489
58490
58491
58492
58493
58494
58495
58496
58497
58498
58499
58500
58501
58502
58503
58504
58505
58506
58507
58508
58509
58510
58511
58512
58513
58514
58515
58516
58517
58518
58519
58520
58521
58522
58523
58524
58525
58526
58527
58528
58529
58530
58531
58532
58533
58534
58535
58536
58537
58538
58539
58540
58541
58542
58543
58544
58545
58546
58547
58548
58549
58550
58551
58552
58553
58554
58555
58556
58557
58558
58559
58560
58561
58562
58563
58564
58565
58566
58567
58568
58569
58570
58571
58572
58573
58574
58575
58576
58577
58578
58579
58580
58581
58582
58583
58584
58585
58586
58587
58588
58589
58590
58591
58592
58593
58594
58595
58596
58597
58598
58599
58600
58601
58602
58603
58604
58605
58606
58607
58608
58609
58610
58611
58612
58613
58614
58615
58616
58617
58618
58619
58620
58621
58622
58623
58624
58625
58626
58627
58628
58629
58630
58631
58632
58633
58634
58635
58636
58637
58638
58639
58640
58641
58642
58643
58644
58645
58646
58647
58648
58649
58650
58651
58652
58653
58654
58655
58656
58657
58658
58659
58660
58661
58662
58663
58664
58665
58666
58667
58668
58669
58670
58671
58672
58673
58674
58675
58676
58677
58678
58679
58680
58681
58682
58683
58684
58685
58686
58687
58688
58689
58690
58691
58692
58693
58694
58695
58696
58697
58698
58699
58700
58701
58702
58703
58704
58705
58706
58707
58708
58709
58710
58711
58712
58713
58714
58715
58716
58717
58718
58719
58720
58721
58722
58723
58724
58725
58726
58727
58728
58729
58730
58731
58732
58733
58734
58735
58736
58737
58738
58739
58740
58741
58742
58743
58744
58745
58746
58747
58748
58749
58750
58751
58752
58753
58754
58755
58756
58757
58758
58759
58760
58761
58762
58763
58764
58765
58766
58767
58768
58769
58770
58771
58772
58773
58774
58775
58776
58777
58778
58779
58780
58781
58782
58783
58784
58785
58786
58787
58788
58789
58790
58791
58792
58793
58794
58795
58796
58797
58798
58799
58800
58801
58802
58803
58804
58805
58806
58807
58808
58809
58810
58811
58812
58813
58814
58815
58816
58817
58818
58819
58820
58821
58822
58823
58824
58825
58826
58827
58828
58829
58830
58831
58832
58833
58834
58835
58836
58837
58838
58839
58840
58841
58842
58843
58844
58845
58846
58847
58848
58849
58850
58851
58852
58853
58854
58855
58856
58857
58858
58859
58860
58861
58862
58863
58864
58865
58866
58867
58868
58869
58870
58871
58872
58873
58874
58875
58876
58877
58878
58879
58880
58881
58882
58883
58884
58885
58886
58887
58888
58889
58890
58891
58892
58893
58894
58895
58896
58897
58898
58899
58900
58901
58902
58903
58904
58905
58906
58907
58908
58909
58910
58911
58912
58913
58914
58915
58916
58917
58918
58919
58920
58921
58922
58923
58924
58925
58926
58927
58928
58929
58930
58931
58932
58933
58934
58935
58936
58937
58938
58939
58940
58941
58942
58943
58944
58945
58946
58947
58948
58949
58950
58951
58952
58953
58954
58955
58956
58957
58958
58959
58960
58961
58962
58963
58964
58965
58966
58967
58968
58969
58970
58971
58972
58973
58974
58975
58976
58977
58978
58979
58980
58981
58982
58983
58984
58985
58986
58987
58988
58989
58990
58991
58992
58993
58994
58995
58996
58997
58998
58999
59000
59001
59002
59003
59004
59005
59006
59007
59008
59009
59010
59011
59012
59013
59014
59015
59016
59017
59018
59019
59020
59021
59022
59023
59024
59025
59026
59027
59028
59029
59030
59031
59032
59033
59034
59035
59036
59037
59038
59039
59040
59041
59042
59043
59044
59045
59046
59047
59048
59049
59050
59051
59052
59053
59054
59055
59056
59057
59058
59059
59060
59061
59062
59063
59064
59065
59066
59067
59068
59069
59070
59071
59072
59073
59074
59075
59076
59077
59078
59079
59080
59081
59082
59083
59084
59085
59086
59087
59088
59089
59090
59091
59092
59093
59094
59095
59096
59097
59098
59099
59100
59101
59102
59103
59104
59105
59106
59107
59108
59109
59110
59111
59112
59113
59114
59115
59116
59117
59118
59119
59120
59121
59122
59123
59124
59125
59126
59127
59128
59129
59130
59131
59132
59133
59134
59135
59136
59137
59138
59139
59140
59141
59142
59143
59144
59145
59146
59147
59148
59149
59150
59151
59152
59153
59154
59155
59156
59157
59158
59159
59160
59161
59162
59163
59164
59165
59166
59167
59168
59169
59170
59171
59172
59173
59174
59175
59176
59177
59178
59179
59180
59181
59182
59183
59184
59185
59186
59187
59188
59189
59190
59191
59192
59193
59194
59195
59196
59197
59198
59199
59200
59201
59202
59203
59204
59205
59206
59207
59208
59209
59210
59211
59212
59213
59214
59215
59216
59217
59218
59219
59220
59221
59222
59223
59224
59225
59226
59227
59228
59229
59230
59231
59232
59233
59234
59235
59236
59237
59238
59239
59240
59241
59242
59243
59244
59245
59246
59247
59248
59249
59250
59251
59252
59253
59254
59255
59256
59257
59258
59259
59260
59261
59262
59263
59264
59265
59266
59267
59268
59269
59270
59271
59272
59273
59274
59275
59276
59277
59278
59279
59280
59281
59282
59283
59284
59285
59286
59287
59288
59289
59290
59291
59292
59293
59294
59295
59296
59297
59298
59299
59300
59301
59302
59303
59304
59305
59306
59307
59308
59309
59310
59311
59312
59313
59314
59315
59316
59317
59318
59319
59320
59321
59322
59323
59324
59325
59326
59327
59328
59329
59330
59331
59332
59333
59334
59335
59336
59337
59338
59339
59340
59341
59342
59343
59344
59345
59346
59347
59348
59349
59350
59351
59352
59353
59354
59355
59356
59357
59358
59359
59360
59361
59362
59363
59364
59365
59366
59367
59368
59369
59370
59371
59372
59373
59374
59375
59376
59377
59378
59379
59380
59381
59382
59383
59384
59385
59386
59387
59388
59389
59390
59391
59392
59393
59394
59395
59396
59397
59398
59399
59400
59401
59402
59403
59404
59405
59406
59407
59408
59409
59410
59411
59412
59413
59414
59415
59416
59417
59418
59419
59420
59421
59422
59423
59424
59425
59426
59427
59428
59429
59430
59431
59432
59433
59434
59435
59436
59437
59438
59439
59440
59441
59442
59443
59444
59445
59446
59447
59448
59449
59450
59451
59452
59453
59454
59455
59456
59457
59458
59459
59460
59461
59462
59463
59464
59465
59466
59467
59468
59469
59470
59471
59472
59473
59474
59475
59476
59477
59478
59479
59480
59481
59482
59483
59484
59485
59486
59487
59488
59489
59490
59491
59492
59493
59494
59495
59496
59497
59498
59499
59500
59501
59502
59503
59504
59505
59506
59507
59508
59509
59510
59511
59512
59513
59514
59515
59516
59517
59518
59519
59520
59521
59522
59523
59524
59525
59526
59527
59528
59529
59530
59531
59532
59533
59534
59535
59536
59537
59538
59539
59540
59541
59542
59543
59544
59545
59546
59547
59548
59549
59550
59551
59552
59553
59554
59555
59556
59557
59558
59559
59560
59561
59562
59563
59564
59565
59566
59567
59568
59569
59570
59571
59572
59573
59574
59575
59576
59577
59578
59579
59580
59581
59582
59583
59584
59585
59586
59587
59588
59589
59590
59591
59592
59593
59594
59595
59596
59597
59598
59599
59600
59601
59602
59603
59604
59605
59606
59607
59608
59609
59610
59611
59612
59613
59614
59615
59616
59617
59618
59619
59620
59621
59622
59623
59624
59625
59626
59627
59628
59629
59630
59631
59632
59633
59634
59635
59636
59637
59638
59639
59640
59641
59642
59643
59644
59645
59646
59647
59648
59649
59650
59651
59652
59653
59654
59655
59656
59657
59658
59659
59660
59661
59662
59663
59664
59665
59666
59667
59668
59669
59670
59671
59672
59673
59674
59675
59676
59677
59678
59679
59680
59681
59682
59683
59684
59685
59686
59687
59688
59689
59690
59691
59692
59693
59694
59695
59696
59697
59698
59699
59700
59701
59702
59703
59704
59705
59706
59707
59708
59709
59710
59711
59712
59713
59714
59715
59716
59717
59718
59719
59720
59721
59722
59723
59724
59725
59726
59727
59728
59729
59730
59731
59732
59733
59734
59735
59736
59737
59738
59739
59740
59741
59742
59743
59744
59745
59746
59747
59748
59749
59750
59751
59752
59753
59754
59755
59756
59757
59758
59759
59760
59761
59762
59763
59764
59765
59766
59767
59768
59769
59770
59771
59772
59773
59774
59775
59776
59777
59778
59779
59780
59781
59782
59783
59784
59785
59786
59787
59788
59789
59790
59791
59792
59793
59794
59795
59796
59797
59798
59799
59800
59801
59802
59803
59804
59805
59806
59807
59808
59809
59810
59811
59812
59813
59814
59815
59816
59817
59818
59819
59820
59821
59822
59823
59824
59825
59826
59827
59828
59829
59830
59831
59832
59833
59834
59835
59836
59837
59838
59839
59840
59841
59842
59843
59844
59845
59846
59847
59848
59849
59850
59851
59852
59853
59854
59855
59856
59857
59858
59859
59860
59861
59862
59863
59864
59865
59866
59867
59868
59869
59870
59871
59872
59873
59874
59875
59876
59877
59878
59879
59880
59881
59882
59883
59884
59885
59886
59887
59888
59889
59890
59891
59892
59893
59894
59895
59896
59897
59898
59899
59900
59901
59902
59903
59904
59905
59906
59907
59908
59909
59910
59911
59912
59913
59914
59915
59916
59917
59918
59919
59920
59921
59922
59923
59924
59925
59926
59927
59928
59929
59930
59931
59932
59933
59934
59935
59936
59937
59938
59939
59940
59941
59942
59943
59944
59945
59946
59947
59948
59949
59950
59951
59952
59953
59954
59955
59956
59957
59958
59959
59960
59961
59962
59963
59964
59965
59966
59967
59968
59969
59970
59971
59972
59973
59974
59975
59976
59977
59978
59979
59980
59981
59982
59983
59984
59985
59986
59987
59988
59989
59990
59991
59992
59993
59994
59995
59996
59997
59998
59999
60000
60001
60002
60003
60004
60005
60006
60007
60008
60009
60010
60011
60012
60013
60014
60015
60016
60017
60018
60019
60020
60021
60022
60023
60024
60025
60026
60027
60028
60029
60030
60031
60032
60033
60034
60035
60036
60037
60038
60039
60040
60041
60042
60043
60044
60045
60046
60047
60048
60049
60050
60051
60052
60053
60054
60055
60056
60057
60058
60059
60060
60061
60062
60063
60064
60065
60066
60067
60068
60069
60070
60071
60072
60073
60074
60075
60076
60077
60078
60079
60080
60081
60082
60083
60084
60085
60086
60087
60088
60089
60090
60091
60092
60093
60094
60095
60096
60097
60098
60099
60100
60101
60102
60103
60104
60105
60106
60107
60108
60109
60110
60111
60112
60113
60114
60115
60116
60117
60118
60119
60120
60121
60122
60123
60124
60125
60126
60127
60128
60129
60130
60131
60132
60133
60134
60135
60136
60137
60138
60139
60140
60141
60142
60143
60144
60145
60146
60147
60148
60149
60150
60151
60152
60153
60154
60155
60156
60157
60158
60159
60160
60161
60162
60163
60164
60165
60166
60167
60168
60169
60170
60171
60172
60173
60174
60175
60176
60177
60178
60179
60180
60181
60182
60183
60184
60185
60186
60187
60188
60189
60190
60191
60192
60193
60194
60195
60196
60197
60198
60199
60200
60201
60202
60203
60204
60205
60206
60207
60208
60209
60210
60211
60212
60213
60214
60215
60216
60217
60218
60219
60220
60221
60222
60223
60224
60225
60226
60227
60228
60229
60230
60231
60232
60233
60234
60235
60236
60237
60238
60239
60240
60241
60242
60243
60244
60245
60246
60247
60248
60249
60250
60251
60252
60253
60254
60255
60256
60257
60258
60259
60260
60261
60262
60263
60264
60265
60266
60267
60268
60269
60270
60271
60272
60273
60274
60275
60276
60277
60278
60279
60280
60281
60282
60283
60284
60285
60286
60287
60288
60289
60290
60291
60292
60293
60294
60295
60296
60297
60298
60299
60300
60301
60302
60303
60304
60305
60306
60307
60308
60309
60310
60311
60312
60313
60314
60315
60316
60317
60318
60319
60320
60321
60322
60323
60324
60325
60326
60327
60328
60329
60330
60331
60332
60333
60334
60335
60336
60337
60338
60339
60340
60341
60342
60343
60344
60345
60346
60347
60348
60349
60350
60351
60352
60353
60354
60355
60356
60357
60358
60359
60360
60361
60362
60363
60364
60365
60366
60367
60368
60369
60370
60371
60372
60373
60374
60375
60376
60377
60378
60379
60380
60381
60382
60383
60384
60385
60386
60387
60388
60389
60390
60391
60392
60393
60394
60395
60396
60397
60398
60399
60400
60401
60402
60403
60404
60405
60406
60407
60408
60409
60410
60411
60412
60413
60414
60415
60416
60417
60418
60419
60420
60421
60422
60423
60424
60425
60426
60427
60428
60429
60430
60431
60432
60433
60434
60435
60436
60437
60438
60439
60440
60441
60442
60443
60444
60445
60446
60447
60448
60449
60450
60451
60452
60453
60454
60455
60456
60457
60458
60459
60460
60461
60462
60463
60464
60465
60466
60467
60468
60469
60470
60471
60472
60473
60474
60475
60476
60477
60478
60479
60480
60481
60482
60483
60484
60485
60486
60487
60488
60489
60490
60491
60492
60493
60494
60495
60496
60497
60498
60499
60500
60501
60502
60503
60504
60505
60506
60507
60508
60509
60510
60511
60512
60513
60514
60515
60516
60517
60518
60519
60520
60521
60522
60523
60524
60525
60526
60527
60528
60529
60530
60531
60532
60533
60534
60535
60536
60537
60538
60539
60540
60541
60542
60543
60544
60545
60546
60547
60548
60549
60550
60551
60552
60553
60554
60555
60556
60557
60558
60559
60560
60561
60562
60563
60564
60565
60566
60567
60568
60569
60570
60571
60572
60573
60574
60575
60576
60577
60578
60579
60580
60581
60582
60583
60584
60585
60586
60587
60588
60589
60590
60591
60592
60593
60594
60595
60596
60597
60598
60599
60600
60601
60602
60603
60604
60605
60606
60607
60608
60609
60610
60611
60612
60613
60614
60615
60616
60617
60618
60619
60620
60621
60622
60623
60624
60625
60626
60627
60628
60629
60630
60631
60632
60633
60634
60635
60636
60637
60638
60639
60640
60641
60642
60643
60644
60645
60646
60647
60648
60649
60650
60651
60652
60653
60654
60655
60656
60657
60658
60659
60660
60661
60662
60663
60664
60665
60666
60667
60668
60669
60670
60671
60672
60673
60674
60675
60676
60677
60678
60679
60680
60681
60682
60683
60684
60685
60686
60687
60688
60689
60690
60691
60692
60693
60694
60695
60696
60697
60698
60699
60700
60701
60702
60703
60704
60705
60706
60707
60708
60709
60710
60711
60712
60713
60714
60715
60716
60717
60718
60719
60720
60721
60722
60723
60724
60725
60726
60727
60728
60729
60730
60731
60732
60733
60734
60735
60736
60737
60738
60739
60740
60741
60742
60743
60744
60745
60746
60747
60748
60749
60750
60751
60752
60753
60754
60755
60756
60757
60758
60759
60760
60761
60762
60763
60764
60765
60766
60767
60768
60769
60770
60771
60772
60773
60774
60775
60776
60777
60778
60779
60780
60781
60782
60783
60784
60785
60786
60787
60788
60789
60790
60791
60792
60793
60794
60795
60796
60797
60798
60799
60800
60801
60802
60803
60804
60805
60806
60807
60808
60809
60810
60811
60812
60813
60814
60815
60816
60817
60818
60819
60820
60821
60822
60823
60824
60825
60826
60827
60828
60829
60830
60831
60832
60833
60834
60835
60836
60837
60838
60839
60840
60841
60842
60843
60844
60845
60846
60847
60848
60849
60850
60851
60852
60853
60854
60855
60856
60857
60858
60859
60860
60861
60862
60863
60864
60865
60866
60867
60868
60869
60870
60871
60872
60873
60874
60875
60876
60877
60878
60879
60880
60881
60882
60883
60884
60885
60886
60887
60888
60889
60890
60891
60892
60893
60894
60895
60896
60897
60898
60899
60900
60901
60902
60903
60904
60905
60906
60907
60908
60909
60910
60911
60912
60913
60914
60915
60916
60917
60918
60919
60920
60921
60922
60923
60924
60925
60926
60927
60928
60929
60930
60931
60932
60933
60934
60935
60936
60937
60938
60939
60940
60941
60942
60943
60944
60945
60946
60947
60948
60949
60950
60951
60952
60953
60954
60955
60956
60957
60958
60959
60960
60961
60962
60963
60964
60965
60966
60967
60968
60969
60970
60971
60972
60973
60974
60975
60976
60977
60978
60979
60980
60981
60982
60983
60984
60985
60986
60987
60988
60989
60990
60991
60992
60993
60994
60995
60996
60997
60998
60999
61000
61001
61002
61003
61004
61005
61006
61007
61008
61009
61010
61011
61012
61013
61014
61015
61016
61017
61018
61019
61020
61021
61022
61023
61024
61025
61026
61027
61028
61029
61030
61031
61032
61033
61034
61035
61036
61037
61038
61039
61040
61041
61042
61043
61044
61045
61046
61047
61048
61049
61050
61051
61052
61053
61054
61055
61056
61057
61058
61059
61060
61061
61062
61063
61064
61065
61066
61067
61068
61069
61070
61071
61072
61073
61074
61075
61076
61077
61078
61079
61080
61081
61082
61083
61084
61085
61086
61087
61088
61089
61090
61091
61092
61093
61094
61095
61096
61097
61098
61099
61100
61101
61102
61103
61104
61105
61106
61107
61108
61109
61110
61111
61112
61113
61114
61115
61116
61117
61118
61119
61120
61121
61122
61123
61124
61125
61126
61127
61128
61129
61130
61131
61132
61133
61134
61135
61136
61137
61138
61139
61140
61141
61142
61143
61144
61145
61146
61147
61148
61149
61150
61151
61152
61153
61154
61155
61156
61157
61158
61159
61160
61161
61162
61163
61164
61165
61166
61167
61168
61169
61170
61171
61172
61173
61174
61175
61176
61177
61178
61179
61180
61181
61182
61183
61184
61185
61186
61187
61188
61189
61190
61191
61192
61193
61194
61195
61196
61197
61198
61199
61200
61201
61202
61203
61204
61205
61206
61207
61208
61209
61210
61211
61212
61213
61214
61215
61216
61217
61218
61219
61220
61221
61222
61223
61224
61225
61226
61227
61228
61229
61230
61231
61232
61233
61234
61235
61236
61237
61238
61239
61240
61241
61242
61243
61244
61245
61246
61247
61248
61249
61250
61251
61252
61253
61254
61255
61256
61257
61258
61259
61260
61261
61262
61263
61264
61265
61266
61267
61268
61269
61270
61271
61272
61273
61274
61275
61276
61277
61278
61279
61280
61281
61282
61283
61284
61285
61286
61287
61288
61289
61290
61291
61292
61293
61294
61295
61296
61297
61298
61299
61300
61301
61302
61303
61304
61305
61306
61307
61308
61309
61310
61311
61312
61313
61314
61315
61316
61317
61318
61319
61320
61321
61322
61323
61324
61325
61326
61327
61328
61329
61330
61331
61332
61333
61334
61335
61336
61337
61338
61339
61340
61341
61342
61343
61344
61345
61346
61347
61348
61349
61350
61351
61352
61353
61354
61355
61356
61357
61358
61359
61360
61361
61362
61363
61364
61365
61366
61367
61368
61369
61370
61371
61372
61373
61374
61375
61376
61377
61378
61379
61380
61381
61382
61383
61384
61385
61386
61387
61388
61389
61390
61391
61392
61393
61394
61395
61396
61397
61398
61399
61400
61401
61402
61403
61404
61405
61406
61407
61408
61409
61410
61411
61412
61413
61414
61415
61416
61417
61418
61419
61420
61421
61422
61423
61424
61425
61426
61427
61428
61429
61430
61431
61432
61433
61434
61435
61436
61437
61438
61439
61440
61441
61442
61443
61444
61445
61446
61447
61448
61449
61450
61451
61452
61453
61454
61455
61456
61457
61458
61459
61460
61461
61462
61463
61464
61465
61466
61467
61468
61469
61470
61471
61472
61473
61474
61475
61476
61477
61478
61479
61480
61481
61482
61483
61484
61485
61486
61487
61488
61489
61490
61491
61492
61493
61494
61495
61496
61497
61498
61499
61500
61501
61502
61503
61504
61505
61506
61507
61508
61509
61510
61511
61512
61513
61514
61515
61516
61517
61518
61519
61520
61521
61522
61523
61524
61525
61526
61527
61528
61529
61530
61531
61532
61533
61534
61535
61536
61537
61538
61539
61540
61541
61542
61543
61544
61545
61546
61547
61548
61549
61550
61551
61552
61553
61554
61555
61556
61557
61558
61559
61560
61561
61562
61563
61564
61565
61566
61567
61568
61569
61570
61571
61572
61573
61574
61575
61576
61577
61578
61579
61580
61581
61582
61583
61584
61585
61586
61587
61588
61589
61590
61591
61592
61593
61594
61595
61596
61597
61598
61599
61600
61601
61602
61603
61604
61605
61606
61607
61608
61609
61610
61611
61612
61613
61614
61615
61616
61617
61618
61619
61620
61621
61622
61623
61624
61625
61626
61627
61628
61629
61630
61631
61632
61633
61634
61635
61636
61637
61638
61639
61640
61641
61642
61643
61644
61645
61646
61647
61648
61649
61650
61651
61652
61653
61654
61655
61656
61657
61658
61659
61660
61661
61662
61663
61664
61665
61666
61667
61668
61669
61670
61671
61672
61673
61674
61675
61676
61677
61678
61679
61680
61681
61682
61683
61684
61685
61686
61687
61688
61689
61690
61691
61692
61693
61694
61695
61696
61697
61698
61699
61700
61701
61702
61703
61704
61705
61706
61707
61708
61709
61710
61711
61712
61713
61714
61715
61716
61717
61718
61719
61720
61721
61722
61723
61724
61725
61726
61727
61728
61729
61730
61731
61732
61733
61734
61735
61736
61737
61738
61739
61740
61741
61742
61743
61744
61745
61746
61747
61748
61749
61750
61751
61752
61753
61754
61755
61756
61757
61758
61759
61760
61761
61762
61763
61764
61765
61766
61767
61768
61769
61770
61771
61772
61773
61774
61775
61776
61777
61778
61779
61780
61781
61782
61783
61784
61785
61786
61787
61788
61789
61790
61791
61792
61793
61794
61795
61796
61797
61798
61799
61800
61801
61802
61803
61804
61805
61806
61807
61808
61809
61810
61811
61812
61813
61814
61815
61816
61817
61818
61819
61820
61821
61822
61823
61824
61825
61826
61827
61828
61829
61830
61831
61832
61833
61834
61835
61836
61837
61838
61839
61840
61841
61842
61843
61844
61845
61846
61847
61848
61849
61850
61851
61852
61853
61854
61855
61856
61857
61858
61859
61860
61861
61862
61863
61864
61865
61866
61867
61868
61869
61870
61871
61872
61873
61874
61875
61876
61877
61878
61879
61880
61881
61882
61883
61884
61885
61886
61887
61888
61889
61890
61891
61892
61893
61894
61895
61896
61897
61898
61899
61900
61901
61902
61903
61904
61905
61906
61907
61908
61909
61910
61911
61912
61913
61914
61915
61916
61917
61918
61919
61920
61921
61922
61923
61924
61925
61926
61927
61928
61929
61930
61931
61932
61933
61934
61935
61936
61937
61938
61939
61940
61941
61942
61943
61944
61945
61946
61947
61948
61949
61950
61951
61952
61953
61954
61955
61956
61957
61958
61959
61960
61961
61962
61963
61964
61965
61966
61967
61968
61969
61970
61971
61972
61973
61974
61975
61976
61977
61978
61979
61980
61981
61982
61983
61984
61985
61986
61987
61988
61989
61990
61991
61992
61993
61994
61995
61996
61997
61998
61999
62000
62001
62002
62003
62004
62005
62006
62007
62008
62009
62010
62011
62012
62013
62014
62015
62016
62017
62018
62019
62020
62021
62022
62023
62024
62025
62026
62027
62028
62029
62030
62031
62032
62033
62034
62035
62036
62037
62038
62039
62040
62041
62042
62043
62044
62045
62046
62047
62048
62049
62050
62051
62052
62053
62054
62055
62056
62057
62058
62059
62060
62061
62062
62063
62064
62065
62066
62067
62068
62069
62070
62071
62072
62073
62074
62075
62076
62077
62078
62079
62080
62081
62082
62083
62084
62085
62086
62087
62088
62089
62090
62091
62092
62093
62094
62095
62096
62097
62098
62099
62100
62101
62102
62103
62104
62105
62106
62107
62108
62109
62110
62111
62112
62113
62114
62115
62116
62117
62118
62119
62120
62121
62122
62123
62124
62125
62126
62127
62128
62129
62130
62131
62132
62133
62134
62135
62136
62137
62138
62139
62140
62141
62142
62143
62144
62145
62146
62147
62148
62149
62150
62151
62152
62153
62154
62155
62156
62157
62158
62159
62160
62161
62162
62163
62164
62165
62166
62167
62168
62169
62170
62171
62172
62173
62174
62175
62176
62177
62178
62179
62180
62181
62182
62183
62184
62185
62186
62187
62188
62189
62190
62191
62192
62193
62194
62195
62196
62197
62198
62199
62200
62201
62202
62203
62204
62205
62206
62207
62208
62209
62210
62211
62212
62213
62214
62215
62216
62217
62218
62219
62220
62221
62222
62223
62224
62225
62226
62227
62228
62229
62230
62231
62232
62233
62234
62235
62236
62237
62238
62239
62240
62241
62242
62243
62244
62245
62246
62247
62248
62249
62250
62251
62252
62253
62254
62255
62256
62257
62258
62259
62260
62261
62262
62263
62264
62265
62266
62267
62268
62269
62270
62271
62272
62273
62274
62275
62276
62277
62278
62279
62280
62281
62282
62283
62284
62285
62286
62287
62288
62289
62290
62291
62292
62293
62294
62295
62296
62297
62298
62299
62300
62301
62302
62303
62304
62305
62306
62307
62308
62309
62310
62311
62312
62313
62314
62315
62316
62317
62318
62319
62320
62321
62322
62323
62324
62325
62326
62327
62328
62329
62330
62331
62332
62333
62334
62335
62336
62337
62338
62339
62340
62341
62342
62343
62344
62345
62346
62347
62348
62349
62350
62351
62352
62353
62354
62355
62356
62357
62358
62359
62360
62361
62362
62363
62364
62365
62366
62367
62368
62369
62370
62371
62372
62373
62374
62375
62376
62377
62378
62379
62380
62381
62382
62383
62384
62385
62386
62387
62388
62389
62390
62391
62392
62393
62394
62395
62396
62397
62398
62399
62400
62401
62402
62403
62404
62405
62406
62407
62408
62409
62410
62411
62412
62413
62414
62415
62416
62417
62418
62419
62420
62421
62422
62423
62424
62425
62426
62427
62428
62429
62430
62431
62432
62433
62434
62435
62436
62437
62438
62439
62440
62441
62442
62443
62444
62445
62446
62447
62448
62449
62450
62451
62452
62453
62454
62455
62456
62457
62458
62459
62460
62461
62462
62463
62464
62465
62466
62467
62468
62469
62470
62471
62472
62473
62474
62475
62476
62477
62478
62479
62480
62481
62482
62483
62484
62485
62486
62487
62488
62489
62490
62491
62492
62493
62494
62495
62496
62497
62498
62499
62500
62501
62502
62503
62504
62505
62506
62507
62508
62509
62510
62511
62512
62513
62514
62515
62516
62517
62518
62519
62520
62521
62522
62523
62524
62525
62526
62527
62528
62529
62530
62531
62532
62533
62534
62535
62536
62537
62538
62539
62540
62541
62542
62543
62544
62545
62546
62547
62548
62549
62550
62551
62552
62553
62554
62555
62556
62557
62558
62559
62560
62561
62562
62563
62564
62565
62566
62567
62568
62569
62570
62571
62572
62573
62574
62575
62576
62577
62578
62579
62580
62581
62582
62583
62584
62585
62586
62587
62588
62589
62590
62591
62592
62593
62594
62595
62596
62597
62598
62599
62600
62601
62602
62603
62604
62605
62606
62607
62608
62609
62610
62611
62612
62613
62614
62615
62616
62617
62618
62619
62620
62621
62622
62623
62624
62625
62626
62627
62628
62629
62630
62631
62632
62633
62634
62635
62636
62637
62638
62639
62640
62641
62642
62643
62644
62645
62646
62647
62648
62649
62650
62651
62652
62653
62654
62655
62656
62657
62658
62659
62660
62661
62662
62663
62664
62665
62666
62667
62668
62669
62670
62671
62672
62673
62674
62675
62676
62677
62678
62679
62680
62681
62682
62683
62684
62685
62686
62687
62688
62689
62690
62691
62692
62693
62694
62695
62696
62697
62698
62699
62700
62701
62702
62703
62704
62705
62706
62707
62708
62709
62710
62711
62712
62713
62714
62715
62716
62717
62718
62719
62720
62721
62722
62723
62724
62725
62726
62727
62728
62729
62730
62731
62732
62733
62734
62735
62736
62737
62738
62739
62740
62741
62742
62743
62744
62745
62746
62747
62748
62749
62750
62751
62752
62753
62754
62755
62756
62757
62758
62759
62760
62761
62762
62763
62764
62765
62766
62767
62768
62769
62770
62771
62772
62773
62774
62775
62776
62777
62778
62779
62780
62781
62782
62783
62784
62785
62786
62787
62788
62789
62790
62791
62792
62793
62794
62795
62796
62797
62798
62799
62800
62801
62802
62803
62804
62805
62806
62807
62808
62809
62810
62811
62812
62813
62814
62815
62816
62817
62818
62819
62820
62821
62822
62823
62824
62825
62826
62827
62828
62829
62830
62831
62832
62833
62834
62835
62836
62837
62838
62839
62840
62841
62842
62843
62844
62845
62846
62847
62848
62849
62850
62851
62852
62853
62854
62855
62856
62857
62858
62859
62860
62861
62862
62863
62864
62865
62866
62867
62868
62869
62870
62871
62872
62873
62874
62875
62876
62877
62878
62879
62880
62881
62882
62883
62884
62885
62886
62887
62888
62889
62890
62891
62892
62893
62894
62895
62896
62897
62898
62899
62900
62901
62902
62903
62904
62905
62906
62907
62908
62909
62910
62911
62912
62913
62914
62915
62916
62917
62918
62919
62920
62921
62922
62923
62924
62925
62926
62927
62928
62929
62930
62931
62932
62933
62934
62935
62936
62937
62938
62939
62940
62941
62942
62943
62944
62945
62946
62947
62948
62949
62950
62951
62952
62953
62954
62955
62956
62957
62958
62959
62960
62961
62962
62963
62964
62965
62966
62967
62968
62969
62970
62971
62972
62973
62974
62975
62976
62977
62978
62979
62980
62981
62982
62983
62984
62985
62986
62987
62988
62989
62990
62991
62992
62993
62994
62995
62996
62997
62998
62999
63000
63001
63002
63003
63004
63005
63006
63007
63008
63009
63010
63011
63012
63013
63014
63015
63016
63017
63018
63019
63020
63021
63022
63023
63024
63025
63026
63027
63028
63029
63030
63031
63032
63033
63034
63035
63036
63037
63038
63039
63040
63041
63042
63043
63044
63045
63046
63047
63048
63049
63050
63051
63052
63053
63054
63055
63056
63057
63058
63059
63060
63061
63062
63063
63064
63065
63066
63067
63068
63069
63070
63071
63072
63073
63074
63075
63076
63077
63078
63079
63080
63081
63082
63083
63084
63085
63086
63087
63088
63089
63090
63091
63092
63093
63094
63095
63096
63097
63098
63099
63100
63101
63102
63103
63104
63105
63106
63107
63108
63109
63110
63111
63112
63113
63114
63115
63116
63117
63118
63119
63120
63121
63122
63123
63124
63125
63126
63127
63128
63129
63130
63131
63132
63133
63134
63135
63136
63137
63138
63139
63140
63141
63142
63143
63144
63145
63146
63147
63148
63149
63150
63151
63152
63153
63154
63155
63156
63157
63158
From 9164464963a87499b1448d436821bd3f7a7646d5 Mon Sep 17 00:00:00 2001
From: Adrian Calianu <adrian.calianu@enea.com>
Date: Thu, 21 May 2020 09:02:54 +0200
Subject: [PATCH] Upgrade i40e drivers to 2.11.29

Signed-off-by: Adrian Calianu <adrian.calianu@enea.com>
---
 drivers/net/ethernet/intel/i40e/Makefile      |   209 +-
 .../net/ethernet/intel/i40e/Module.supported  |     1 +
 drivers/net/ethernet/intel/i40e/common.mk     |   305 +
 drivers/net/ethernet/intel/i40e/i40e.h        |   507 +-
 drivers/net/ethernet/intel/i40e/i40e_adminq.c |   385 +-
 drivers/net/ethernet/intel/i40e/i40e_adminq.h |    33 +-
 .../net/ethernet/intel/i40e/i40e_adminq_cmd.h |   575 +-
 drivers/net/ethernet/intel/i40e/i40e_alloc.h  |    27 +-
 drivers/net/ethernet/intel/i40e/i40e_client.c |   210 +-
 drivers/net/ethernet/intel/i40e/i40e_client.h |    41 +-
 drivers/net/ethernet/intel/i40e/i40e_common.c |  2968 +++-
 drivers/net/ethernet/intel/i40e/i40e_dcb.c    |   579 +-
 drivers/net/ethernet/intel/i40e/i40e_dcb.h    |    60 +-
 drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c |    46 +-
 drivers/net/ethernet/intel/i40e/i40e_ddp.c    |   478 +
 .../net/ethernet/intel/i40e/i40e_debugfs.c    |  1154 +-
 drivers/net/ethernet/intel/i40e/i40e_devids.h |    41 +-
 drivers/net/ethernet/intel/i40e/i40e_diag.c   |    70 +-
 drivers/net/ethernet/intel/i40e/i40e_diag.h   |    27 +-
 .../net/ethernet/intel/i40e/i40e_ethtool.c    |  3837 ++++-
 .../ethernet/intel/i40e/i40e_ethtool_stats.h  |   293 +
 .../net/ethernet/intel/i40e/i40e_filters.c    |    40 +
 .../net/ethernet/intel/i40e/i40e_filters.h    |    11 +
 drivers/net/ethernet/intel/i40e/i40e_helper.h |   128 +
 drivers/net/ethernet/intel/i40e/i40e_hmc.c    |    68 +-
 drivers/net/ethernet/intel/i40e/i40e_hmc.h    |    27 +-
 .../net/ethernet/intel/i40e/i40e_lan_hmc.c    |   135 +-
 .../net/ethernet/intel/i40e/i40e_lan_hmc.h    |    27 +-
 drivers/net/ethernet/intel/i40e/i40e_main.c   | 12375 +++++++++++-----
 drivers/net/ethernet/intel/i40e/i40e_nvm.c    |   553 +-
 drivers/net/ethernet/intel/i40e/i40e_osdep.h  |    99 +-
 .../net/ethernet/intel/i40e/i40e_prototype.h  |   327 +-
 drivers/net/ethernet/intel/i40e/i40e_ptp.c    |  1120 +-
 .../net/ethernet/intel/i40e/i40e_register.h   |   172 +-
 drivers/net/ethernet/intel/i40e/i40e_status.h |    28 +-
 drivers/net/ethernet/intel/i40e/i40e_trace.h  |    62 +-
 drivers/net/ethernet/intel/i40e/i40e_txrx.c   |  1348 +-
 drivers/net/ethernet/intel/i40e/i40e_txrx.h   |   171 +-
 drivers/net/ethernet/intel/i40e/i40e_type.h   |   270 +-
 .../ethernet/intel/i40e/i40e_virtchnl_pf.c    |  8342 ++++++++---
 .../ethernet/intel/i40e/i40e_virtchnl_pf.h    |   113 +-
 drivers/net/ethernet/intel/i40e/kcompat.c     |  2761 ++++
 drivers/net/ethernet/intel/i40e/kcompat.h     |  6838 +++++++++
 .../ethernet/intel/i40e/kcompat_overflow.h    |   319 +
 drivers/net/ethernet/intel/i40e/kcompat_vfd.c |  2550 ++++
 drivers/net/ethernet/intel/i40e/kcompat_vfd.h |   141 +
 drivers/net/ethernet/intel/i40e/virtchnl.h    |   949 ++
 47 files changed, 41086 insertions(+), 9734 deletions(-)
 create mode 100644 drivers/net/ethernet/intel/i40e/Module.supported
 create mode 100644 drivers/net/ethernet/intel/i40e/common.mk
 create mode 100644 drivers/net/ethernet/intel/i40e/i40e_ddp.c
 create mode 100644 drivers/net/ethernet/intel/i40e/i40e_ethtool_stats.h
 create mode 100644 drivers/net/ethernet/intel/i40e/i40e_filters.c
 create mode 100644 drivers/net/ethernet/intel/i40e/i40e_filters.h
 create mode 100644 drivers/net/ethernet/intel/i40e/i40e_helper.h
 create mode 100644 drivers/net/ethernet/intel/i40e/kcompat.c
 create mode 100644 drivers/net/ethernet/intel/i40e/kcompat.h
 create mode 100644 drivers/net/ethernet/intel/i40e/kcompat_overflow.h
 create mode 100644 drivers/net/ethernet/intel/i40e/kcompat_vfd.c
 create mode 100644 drivers/net/ethernet/intel/i40e/kcompat_vfd.h
 create mode 100644 drivers/net/ethernet/intel/i40e/virtchnl.h

diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 3da482c3d..81f5ab908 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -1,31 +1,10 @@
-################################################################################
-#
-# Intel Ethernet Controller XL710 Family Linux Driver
-# Copyright(c) 2013 - 2015 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2013 - 2020 Intel Corporation.
 
+ifneq ($(KERNELRELEASE),)
+# kbuild part of makefile
 #
-# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver
+# Makefile for the Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
 #
 
 ccflags-y += -I$(src)
@@ -33,18 +12,170 @@ subdir-ccflags-y += -I$(src)
 
 obj-$(CONFIG_I40E) += i40e.o
 
-i40e-objs := i40e_main.o \
-	i40e_ethtool.o	\
-	i40e_adminq.o	\
-	i40e_common.o	\
-	i40e_hmc.o	\
-	i40e_lan_hmc.o	\
-	i40e_nvm.o	\
-	i40e_debugfs.o	\
-	i40e_diag.o	\
-	i40e_txrx.o	\
-	i40e_ptp.o	\
-	i40e_client.o   \
+i40e-objs := i40e_main.o \
+	i40e_ethtool.o \
+	i40e_adminq.o \
+	i40e_common.o \
+	i40e_hmc.o \
+	i40e_lan_hmc.o \
+	i40e_nvm.o \
+	i40e_debugfs.o \
+	i40e_diag.o \
+	i40e_txrx.o \
+	i40e_ptp.o \
+	i40e_filters.o \
+	i40e_ddp.o \
+	i40e_client.o \
 	i40e_virtchnl_pf.o
 
-i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
+i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
+i40e-y += kcompat.o
+i40e-y += kcompat_vfd.o
+
+else	# ifneq($(KERNELRELEASE),)
+# normal makefile
+
+DRIVER := i40e
+
+# If the user just wants to print the help output, don't include common.mk or
+# perform any other checks. This ensures that running "make help" will always
+# work even if kernel-devel is not installed, or if the common.mk fails under
+# any other error condition.
+ifneq ($(MAKECMDGOALS),help)
+include common.mk
+
+# i40e does not support building on kernels older than 2.6.32
+$(call minimum_kver_check,2,6,32)
+endif
+
+# Command to update initramfs or display a warning message
+ifeq (${cmd_initrd},)
+define cmd_initramfs
+@echo "Unable to update initramfs. You may need to do this manaully."
+endef
+else
+define cmd_initramfs
+@echo "Updating initramfs..."
+-@$(call cmd_initrd)
+endef
+endif
+
+###############
+# Build rules #
+###############
+
+# Standard compilation, with regular output
+default:
+	@+$(call kernelbuild,modules)
+
+# Noisy output, for extra debugging
+noisy:
+	@+$(call kernelbuild,modules,V=1)
+
+# Silence any output generated
+silent:
+	@+$(call kernelbuild,modules,>/dev/null)
+
+# Enable higher warning level
+checkwarnings: clean
+	@+$(call kernelbuild,modules,W=1)
+
+# Run sparse static analyzer
+sparse: clean
+	@+$(call kernelbuild,modules,C=2 CF="-D__CHECK_ENDIAN__ -Wbitwise -Wcontext")
+
+# Run coccicheck static analyzer
+ccc: clean
+	@+$(call kernelbuild,modules,coccicheck MODE=report)
+
+# Build manfiles
+manfile:
+	@gzip -c ../${DRIVER}.${MANSECTION} > ${DRIVER}.${MANSECTION}.gz
+
+# Clean the module subdirectories
+clean:
+	@+$(call kernelbuild,clean)
+	@-rm -rf *.${MANSECTION}.gz *.ko
+
+mandocs_install: manfile
+	@echo "Copying manpages..."
+	@install -D -m 644 ${DRIVER}.${MANSECTION}.gz ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz
+
+# Install kernel module files. This target is called by the RPM specfile when
+# generating binary RPMs, and is not expected to modify files outside of the
+# build root. Thus, it must not update initramfs, or run depmod.
+modules_install: default
+	@echo "Installing modules..."
+	@+$(call kernelbuild,modules_install)
+
+# After installing all the files, perform necessary work to ensure the system
+# will use the new modules. This includes running depmod to update module
+# dependencies and updating the initramfs image in case the module is loaded
+# during early boot.
+install: modules_install
+	$(call cmd_depmod)
+	$(call cmd_initramfs)
+	$(MAKE) mandocs_install
+
+# Target used by rpmbuild spec file
+rpm: modules_install
+	$(MAKE) mandocs_install
+
+mandocs_uninstall:
+	if [ -e ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz ] ; then \
+		rm -f ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz ; \
+	fi;
+
+# Remove installed module files. This target is called by the RPM specfile when
+# generating binary RPMs, and is not expected to modify files outside of the
+# build root. Thus, it must not update the initramfs image or run depmod.
+modules_uninstall:
+	rm -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_MOD_DIR}/${DRIVER}.ko;
+
+# After uninstalling all the files, perform necessary work to restore the
+# system back to using the default kernel modules. This includes running depmod
+# to update module dependencies and updating the initramfs image.
+uninstall: modules_uninstall mandocs_uninstall
+	$(call cmd_depmod)
+	$(call cmd_initramfs)
+
+########
+# Help #
+########
+help:
+	@echo 'Build targets:'
+	@echo '  default             - Build module(s) with standard verbosity'
+	@echo '  noisy               - Build module(s) with V=1 verbosity -- very noisy'
+	@echo '  silent              - Build module(s), squelching all output'
+	@echo ''
+	@echo 'Static Analysis:'
+	@echo '  checkwarnings       - Clean, then build module(s) with W=1 warnings enabled'
+	@echo '  sparse              - Clean, then check module(s) using sparse'
+	@echo '  ccc                 - Clean, then check module(s) using coccicheck'
+	@echo ''
+	@echo 'Cleaning targets:'
+	@echo '  clean               - Clean files generated by kernel module build'
+	@echo ''
+	@echo 'Other targets:'
+	@echo '  manfile             - Generate a gzipped manpage'
+	@echo '  modules_install     - install the module(s) only'
+	@echo '  mandocs_install     - install the manpage only'
+	@echo '  install             - Build then install the module(s) and manpage, and update initramfs'
+	@echo '  modules_uninstall   - uninstall the module(s) only'
+	@echo '  mandocs_uninstall   - uninstall the manpage only'
+	@echo '  uninstall           - Uninstall the module(s) and manpage, and update initramfs'
+	@echo '  help                - Display this help message'
+	@echo ''
+	@echo 'Variables:'
+	@echo '  LINUX_VERSION       - Debug tool to force kernel LINUX_VERSION_CODE. Use at your own risk.'
+	@echo '  W=N                 - Kernel variable for setting warning levels'
+	@echo '  V=N                 - Kernel variable for setting output verbosity'
+	@echo '  INSTALL_MOD_PATH    - Add prefix for the module and manpage installation path'
+	@echo '  INSTALL_MOD_DIR     - Use module directory other than updates/drivers/net/ethernet/intel/${DRIVER}'
+	@echo '  KSRC                - Specifies the full path to the kernel tree to build against'
+	@echo ' Other variables may be available for tuning make process, see'
+	@echo ' Kernel Kbuild documentation for more information'
+
+.PHONY: default noisy clean manfile silent sparse ccc install uninstall help
+
+endif	# ifneq($(KERNELRELEASE),)
diff --git a/drivers/net/ethernet/intel/i40e/Module.supported b/drivers/net/ethernet/intel/i40e/Module.supported
new file mode 100644
index 000000000..c0e31cf7f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/Module.supported
@@ -0,0 +1 @@
+i40e.ko external
diff --git a/drivers/net/ethernet/intel/i40e/common.mk b/drivers/net/ethernet/intel/i40e/common.mk
new file mode 100644
index 000000000..1b95715e7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/common.mk
@@ -0,0 +1,305 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2013 - 2020 Intel Corporation.
+
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2015-2019 Intel Corporation
+#
+# common Makefile rules useful for out-of-tree Linux driver builds
+#
+# Usage: include common.mk
+#
+# After including, you probably want to add a minimum_kver_check call
+#
+# Required Variables:
+# DRIVER
+#   -- Set to the lowercase driver name
+
+#####################
+# Helpful functions #
+#####################
+
+readlink = $(shell readlink -f ${1})
+
+# helper functions for converting kernel version to version codes
+get_kver = $(or $(word ${2},$(subst ., ,${1})),0)
+get_kvercode = $(shell [ "${1}" -ge 0 -a "${1}" -le 255 2>/dev/null ] && \
+                       [ "${2}" -ge 0 -a "${2}" -le 255 2>/dev/null ] && \
+                       [ "${3}" -ge 0 -a "${3}" -le 255 2>/dev/null ] && \
+                       printf %d $$(( ( ${1} << 16 ) + ( ${2} << 8 ) + ( ${3} ) )) )
+
+################
+# depmod Macro #
+################
+
+cmd_depmod = /sbin/depmod $(if ${SYSTEM_MAP_FILE},-e -F ${SYSTEM_MAP_FILE}) \
+                          $(if $(strip ${INSTALL_MOD_PATH}),-b ${INSTALL_MOD_PATH}) \
+                          -a ${KVER}
+
+################
+# dracut Macro #
+################
+
+cmd_initrd := $(shell \
+                if which dracut > /dev/null 2>&1 ; then \
+                    echo "dracut --force"; \
+                elif which update-initramfs > /dev/null 2>&1 ; then \
+                    echo "update-initramfs -u"; \
+                fi )
+
+#####################
+# Environment tests #
+#####################
+
+DRIVER_UPPERCASE := $(shell echo ${DRIVER} | tr "[:lower:]" "[:upper:]")
+
+ifeq (,${BUILD_KERNEL})
+BUILD_KERNEL=$(shell uname -r)
+endif
+
+# Kernel Search Path
+# All the places we look for kernel source
+KSP :=  /lib/modules/${BUILD_KERNEL}/source \
+        /lib/modules/${BUILD_KERNEL}/build \
+        /usr/src/linux-${BUILD_KERNEL} \
+        /usr/src/linux-$(${BUILD_KERNEL} | sed 's/-.*//') \
+        /usr/src/kernel-headers-${BUILD_KERNEL} \
+        /usr/src/kernel-source-${BUILD_KERNEL} \
+        /usr/src/linux-$(${BUILD_KERNEL} | sed 's/\([0-9]*\.[0-9]*\)\..*/\1/') \
+        /usr/src/linux \
+        /usr/src/kernels/${BUILD_KERNEL} \
+        /usr/src/kernels
+
+# prune the list down to only values that exist and have an include/linux
+# sub-directory. We can't use include/config because some older kernels don't
+# have this.
+test_dir = $(shell [ -e ${dir}/include/linux ] && echo ${dir})
+KSP := $(foreach dir, ${KSP}, ${test_dir})
+
+# we will use this first valid entry in the search path
+ifeq (,${KSRC})
+  KSRC := $(firstword ${KSP})
+endif
+
+ifeq (,${KSRC})
+  $(warning *** Kernel header files not in any of the expected locations.)
+  $(warning *** Install the appropriate kernel development package, e.g.)
+  $(error kernel-devel, for building kernel modules and try again)
+else
+ifeq (/lib/modules/${BUILD_KERNEL}/source, ${KSRC})
+  KOBJ :=  /lib/modules/${BUILD_KERNEL}/build
+else
+  KOBJ :=  ${KSRC}
+endif
+endif
+
+# Version file Search Path
+VSP :=  ${KOBJ}/include/generated/utsrelease.h \
+        ${KOBJ}/include/linux/utsrelease.h \
+        ${KOBJ}/include/linux/version.h \
+        ${KOBJ}/include/generated/uapi/linux/version.h \
+        /boot/vmlinuz.version.h
+
+# Config file Search Path
+CSP :=  ${KOBJ}/include/generated/autoconf.h \
+        ${KOBJ}/include/linux/autoconf.h \
+        /boot/vmlinuz.autoconf.h
+
+# System.map Search Path (for depmod)
+MSP := ${KSRC}/System.map \
+       /boot/System.map-${BUILD_KERNEL}
+
+# prune the lists down to only files that exist
+test_file = $(shell [ -f ${file} ] && echo ${file})
+VSP := $(foreach file, ${VSP}, ${test_file})
+CSP := $(foreach file, ${CSP}, ${test_file})
+MSP := $(foreach file, ${MSP}, ${test_file})
+
+
+# and use the first valid entry in the Search Paths
+ifeq (,${VERSION_FILE})
+  VERSION_FILE := $(firstword ${VSP})
+endif
+
+ifeq (,${CONFIG_FILE})
+  CONFIG_FILE := $(firstword ${CSP})
+endif
+
+ifeq (,${SYSTEM_MAP_FILE})
+  SYSTEM_MAP_FILE := $(firstword ${MSP})
+endif
+
+ifeq (,$(wildcard ${VERSION_FILE}))
+  $(error Linux kernel source not configured - missing version header file)
+endif
+
+ifeq (,$(wildcard ${CONFIG_FILE}))
+  $(error Linux kernel source not configured - missing autoconf.h)
+endif
+
+ifeq (,$(wildcard ${SYSTEM_MAP_FILE}))
+  $(warning Missing System.map file - depmod will not check for missing symbols)
+endif
+
+ifneq ($(words $(subst :, ,$(CURDIR))), 1)
+  $(error Sources directory '$(CURDIR)' cannot contain spaces nor colons. Rename directory or move sources to another path)
+endif
+
+#######################
+# Linux Version Setup #
+#######################
+
+# The following command line parameter is intended for development of KCOMPAT
+# against upstream kernels such as net-next which have broken or non-updated
+# version codes in their Makefile. They are intended for debugging and
+# development purpose only so that we can easily test new KCOMPAT early. If you
+# don't know what this means, you do not need to set this flag. There is no
+# arcane magic here.
+
+# Convert LINUX_VERSION into LINUX_VERSION_CODE
+ifneq (${LINUX_VERSION},)
+  LINUX_VERSION_CODE=$(call get_kvercode,$(call get_kver,${LINUX_VERSION},1),$(call get_kver,${LINUX_VERSION},2),$(call get_kver,${LINUX_VERSION},3))
+endif
+
+# Honor LINUX_VERSION_CODE
+ifneq (${LINUX_VERSION_CODE},)
+  $(warning Forcing target kernel to build with LINUX_VERSION_CODE of ${LINUX_VERSION_CODE}$(if ${LINUX_VERSION}, from LINUX_VERSION=${LINUX_VERSION}). Do this at your own risk.)
+  KVER_CODE := ${LINUX_VERSION_CODE}
+  EXTRA_CFLAGS += -DLINUX_VERSION_CODE=${LINUX_VERSION_CODE}
+endif
+
+# Determine SLE_LOCALVERSION_CODE for SuSE SLE >= 11 (needed by kcompat)
+# This assumes SuSE will continue setting CONFIG_LOCALVERSION to the string
+# appended to the stable kernel version on which their kernel is based with
+# additional versioning information (up to 3 numbers), a possible abbreviated
+# git SHA1 commit id and a kernel type, e.g. CONFIG_LOCALVERSION=-1.2.3-default
+# or CONFIG_LOCALVERSION=-999.gdeadbee-default
+ifeq (1,$(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\
+          grep -m 1 CONFIG_SUSE_KERNEL | awk '{ print $$3 }'))
+
+ifneq (10,$(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\
+	  grep -m 1 CONFIG_SLE_VERSION | awk '{ print $$3 }'))
+
+  LOCALVERSION := $(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\
+                    grep -m 1 CONFIG_LOCALVERSION | awk '{ print $$3 }' |\
+                    cut -d'-' -f2 | sed 's/\.g[[:xdigit:]]\{7\}//')
+  LOCALVER_A := $(shell echo ${LOCALVERSION} | cut -d'.' -f1)
+  LOCALVER_B := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f2)
+  LOCALVER_C := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f3)
+  SLE_LOCALVERSION_CODE := $(shell expr ${LOCALVER_A} \* 65536 + \
+                                        0${LOCALVER_B} \* 256 + 0${LOCALVER_C})
+  EXTRA_CFLAGS += -DSLE_LOCALVERSION_CODE=${SLE_LOCALVERSION_CODE}
+endif
+endif
+
+EXTRA_CFLAGS += ${CFLAGS_EXTRA}
+
+# get the kernel version - we use this to find the correct install path
+KVER := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VERSION_FILE} | grep UTS_RELEASE | \
+        awk '{ print $$3 }' | sed 's/\"//g')
+
+# assume source symlink is the same as build, otherwise adjust KOBJ
+ifneq (,$(wildcard /lib/modules/${KVER}/build))
+  ifneq (${KSRC},$(call readlink,/lib/modules/${KVER}/build))
+    KOBJ=/lib/modules/${KVER}/build
+  endif
+endif
+
+ifeq (${KVER_CODE},)
+  KVER_CODE := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VSP} 2> /dev/null |\
+                 grep -m 1 LINUX_VERSION_CODE | awk '{ print $$3 }' | sed 's/\"//g')
+endif
+
+# minimum_kver_check
+#
+# helper function to provide uniform output for different drivers to abort the
+# build based on kernel version check. Usage: "$(call minimum_kver_check,2,6,XX)".
+define _minimum_kver_check
+ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,${1},${2},${3}) ]; echo "$$?"))
+  $$(warning *** Aborting the build.)
+  $$(error This driver is not supported on kernel versions older than ${1}.${2}.${3})
+endif
+endef
+minimum_kver_check = $(eval $(call _minimum_kver_check,${1},${2},${3}))
+
+################
+# Manual Pages #
+################
+
+MANSECTION = 7
+
+ifeq (,${MANDIR})
+  # find the best place to install the man page
+  MANPATH := $(shell (manpath 2>/dev/null || echo $MANPATH) | sed 's/:/ /g')
+  ifneq (,${MANPATH})
+    # test based on inclusion in MANPATH
+    test_dir = $(findstring ${dir}, ${MANPATH})
+  else
+    # no MANPATH, test based on directory existence
+    test_dir = $(shell [ -e ${dir} ] && echo ${dir})
+  endif
+  # our preferred install path
+  # should /usr/local/man be in here ?
+  MANDIR := /usr/share/man /usr/man
+  MANDIR := $(foreach dir, ${MANDIR}, ${test_dir})
+  MANDIR := $(firstword ${MANDIR})
+endif
+ifeq (,${MANDIR})
+  # fallback to /usr/man
+  MANDIR := /usr/man
+endif
+
+####################
+# CCFLAGS variable #
+####################
+
+# set correct CCFLAGS variable for kernels older than 2.6.24
+ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,2,6,24) ]; echo $$?))
+CCFLAGS_VAR := EXTRA_CFLAGS
+else
+CCFLAGS_VAR := ccflags-y
+endif
+
+#################
+# KBUILD_OUTPUT #
+#################
+
+# Only set KBUILD_OUTPUT if the real paths of KOBJ and KSRC differ
+ifneq ($(call readlink,${KSRC}),$(call readlink,${KOBJ}))
+export KBUILD_OUTPUT ?= ${KOBJ}
+endif
+
+############################
+# Module Install Directory #
+############################
+
+# Default to using updates/drivers/net/ethernet/intel/ path, since depmod since
+# v3.1 defaults to checking updates folder first, and only checking kernels/
+# and extra afterwards. We use updates instead of kernel/* due to desire to
+# prevent over-writing built-in modules files.
+export INSTALL_MOD_DIR ?= updates/drivers/net/ethernet/intel/${DRIVER}
+
+######################
+# Kernel Build Macro #
+######################
+
+# kernel build function
+# ${1} is the kernel build target
+# ${2} may contain any extra rules to pass directly to the sub-make process
+#
+# This function is expected to be executed by
+#   @+$(call kernelbuild,<target>,<extra parameters>)
+# from within a Makefile recipe.
+#
+# The following variables are expected to be defined for its use:
+# GCC_I_SYS -- if set it will enable use of gcc-i-sys.sh wrapper to use -isystem
+# CCFLAGS_VAR -- the CCFLAGS variable to set extra CFLAGS
+# EXTRA_CFLAGS -- a set of extra CFLAGS to pass into the ccflags-y variable
+# KSRC -- the location of the kernel source tree to build against
+# DRIVER_UPPERCASE -- the uppercase name of the kernel module, set from DRIVER
+#
+kernelbuild = ${MAKE} $(if ${GCC_I_SYS},CC="${GCC_I_SYS}") \
+                      ${CCFLAGS_VAR}="${EXTRA_CFLAGS}" \
+                      -C "${KSRC}" \
+                      CONFIG_${DRIVER_UPPERCASE}=m \
+                      M="${CURDIR}" \
+                      ${2} ${1}
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d0c1bf544..776bb99b6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifndef _I40E_H_
 #define _I40E_H_
@@ -36,10 +13,9 @@
 #include <linux/aer.h>
 #include <linux/netdevice.h>
 #include <linux/ioport.h>
-#include <linux/iommu.h>
 #include <linux/slab.h>
 #include <linux/list.h>
-#include <linux/hashtable.h>
+#include <linux/hash.h>
 #include <linux/string.h>
 #include <linux/in.h>
 #include <linux/ip.h>
@@ -47,36 +23,61 @@
 #include <linux/pkt_sched.h>
 #include <linux/ipv6.h>
 #include <net/checksum.h>
+#include <net/ipv6.h>
 #include <net/ip6_checksum.h>
+#ifdef SIOCETHTOOL
 #include <linux/ethtool.h>
+#endif
 #include <linux/if_vlan.h>
 #include <linux/if_bridge.h>
+#include "kcompat.h"
+#ifdef HAVE_IOMMU_PRESENT
+#include <linux/iommu.h>
+#endif
+#ifdef HAVE_SCTP
+#include <linux/sctp.h>
+#endif
+#ifdef HAVE_PTP_1588_CLOCK
 #include <linux/clocksource.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_clock_kernel.h>
+#endif /* HAVE_PTP_1588_CLOCK */
+#ifdef __TC_MQPRIO_MODE_MAX
+#include <net/pkt_cls.h>
+#endif
 #include "i40e_type.h"
 #include "i40e_prototype.h"
 #include "i40e_client.h"
-#include <linux/avf/virtchnl.h>
+#include "virtchnl.h"
 #include "i40e_virtchnl_pf.h"
 #include "i40e_txrx.h"
 #include "i40e_dcb.h"
 
+#ifdef HAVE_XDP_SUPPORT
+#include <linux/bpf_trace.h>
+#endif
+
 /* Useful i40e defaults */
 #define I40E_MAX_VEB			16
 
 #define I40E_MAX_NUM_DESCRIPTORS	4096
 #define I40E_MAX_CSR_SPACE		(4 * 1024 * 1024 - 64 * 1024)
 #define I40E_DEFAULT_NUM_DESCRIPTORS	512
+#define L4_MODE_UDP 0
+#define L4_MODE_TCP 1
+#define L4_MODE_BOTH 2
+#define L4_MODE_DISABLED -1
+bool i40e_is_l4mode_enabled(void);
 #define I40E_REQ_DESCRIPTOR_MULTIPLE	32
 #define I40E_MIN_NUM_DESCRIPTORS	64
 #define I40E_MIN_MSIX			2
 #define I40E_DEFAULT_NUM_VMDQ_VSI	8 /* max 256 VSIs */
-#define I40E_MIN_VSI_ALLOC		83 /* LAN, ATR, FCOE, 64 VF */
+#define I40E_MIN_VSI_ALLOC		83 /* LAN, ATR, FCOE, 64 VF, 16 VMDQ */
 /* max 16 qps */
 #define i40e_default_queues_per_vmdq(pf) \
 		(((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1)
 #define I40E_DEFAULT_QUEUES_PER_VF	4
+#define I40E_MAX_VF_QUEUES		16
 #define I40E_DEFAULT_QUEUES_PER_TC	1 /* should be a power of 2 */
 #define i40e_pf_get_max_q_per_tc(pf) \
 		(((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64)
@@ -85,6 +86,10 @@
 #define I40E_MAX_AQ_BUF_SIZE		4096
 #define I40E_AQ_LEN			256
 #define I40E_AQ_WORK_LIMIT		66 /* max number of VFs + a little */
+/*
+ * If I40E_MAX_USER_PRIORITY is updated please also update
+ * I40E_CLIENT_MAX_USER_PRIORITY in i40e_client.h and i40evf_client.h
+ */
 #define I40E_MAX_USER_PRIORITY		8
 #define I40E_DEFAULT_TRAFFIC_CLASS	BIT(0)
 #define I40E_DEFAULT_MSG_ENABLE		4
@@ -114,7 +119,7 @@
 #define I40E_CURRENT_NVM_VERSION_LO	0x40
 
 #define I40E_RX_DESC(R, i)	\
-	(&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
+	(&(((union i40e_rx_desc *)((R)->desc))[i]))
 #define I40E_TX_DESC(R, i)	\
 	(&(((struct i40e_tx_desc *)((R)->desc))[i]))
 #define I40E_TX_CTXTDESC(R, i)	\
@@ -125,6 +130,10 @@
 /* default to trying for four seconds */
 #define I40E_TRY_LINK_TIMEOUT	(4 * HZ)
 
+/* BW rate limiting */
+#define I40E_BW_CREDIT_DIVISOR		50 /* 50Mbps per BW credit */
+#define I40E_MAX_BW_INACTIVE_ACCUM	4  /* accumulate 4 credits max */
+
 /* driver state flags */
 enum i40e_state_t {
 	__I40E_TESTING,
@@ -136,6 +145,8 @@ enum i40e_state_t {
 	__I40E_MDD_EVENT_PENDING,
 	__I40E_VFLR_EVENT_PENDING,
 	__I40E_RESET_RECOVERY_PENDING,
+	__I40E_TIMEOUT_RECOVERY_PENDING,
+	__I40E_MISC_IRQ_REQUESTED,
 	__I40E_RESET_INTR_RECEIVED,
 	__I40E_REINIT_REQUESTED,
 	__I40E_PF_RESET_REQUESTED,
@@ -144,17 +155,31 @@ enum i40e_state_t {
 	__I40E_EMP_RESET_REQUESTED,
 	__I40E_EMP_RESET_INTR_RECEIVED,
 	__I40E_SUSPENDED,
-	__I40E_PTP_TX_IN_PROGRESS,
 	__I40E_BAD_EEPROM,
+	__I40E_DEBUG_MODE,
 	__I40E_DOWN_REQUESTED,
 	__I40E_FD_FLUSH_REQUESTED,
+	__I40E_FD_ATR_AUTO_DISABLED,
+	__I40E_FD_SB_AUTO_DISABLED,
 	__I40E_RESET_FAILED,
 	__I40E_PORT_SUSPENDED,
+	__I40E_PTP_TX_IN_PROGRESS,
 	__I40E_VF_DISABLE,
+	__I40E_RECOVERY_MODE,
+	__I40E_MACVLAN_SYNC_PENDING,
+	__I40E_UDP_FILTER_SYNC_PENDING,
+	__I40E_TEMP_LINK_POLLING,
+	__I40E_CLIENT_SERVICE_REQUESTED,
+	__I40E_CLIENT_L2_CHANGE,
+	__I40E_CLIENT_RESET,
+	__I40E_VIRTCHNL_OP_PENDING,
+	__I40E_VFS_RELEASING,
 	/* This must be last as it determines the size of the BITMAP */
 	__I40E_STATE_SIZE__,
 };
 
+#define I40E_PF_RESET_FLAG	BIT_ULL(__I40E_PF_RESET_REQUESTED)
+
 /* VSI state flags */
 enum i40e_vsi_state_t {
 	__I40E_VSI_DOWN,
@@ -163,6 +188,7 @@ enum i40e_vsi_state_t {
 	__I40E_VSI_OVERFLOW_PROMISC,
 	__I40E_VSI_REINIT_REQUESTED,
 	__I40E_VSI_DOWN_REQUESTED,
+	__I40E_VSI_RELEASING,
 	/* This must be last as it determines the size of the BITMAP */
 	__I40E_VSI_STATE_SIZE__,
 };
@@ -183,6 +209,10 @@ struct i40e_lump_tracking {
 
 #define I40E_DEFAULT_ATR_SAMPLE_RATE	20
 #define I40E_FDIR_MAX_RAW_PACKET_SIZE	512
+#define I40E_TCPIP_DUMMY_PACKET_LEN	54
+#define I40E_SCTPIP_DUMMY_PACKET_LEN	46
+#define I40E_UDPIP_DUMMY_PACKET_LEN	42
+#define I40E_IP_DUMMY_PACKET_LEN	34
 #define I40E_FDIR_BUFFER_FULL_MARGIN	10
 #define I40E_FDIR_BUFFER_HEAD_ROOM	32
 #define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4)
@@ -205,10 +235,20 @@ enum i40e_fd_stat_idx {
 #define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
 			(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
 
+/* get PTP pins for ioctl */
+#define SIOCGPINS	(SIOCDEVPRIVATE + 0)
+/* set PTP pins for ioctl */
+#define SIOCSPINS	(SIOCDEVPRIVATE + 1)
+
 /* The following structure contains the data parsed from the user-defined
  * field of the ethtool_rx_flow_spec structure.
  */
 struct i40e_rx_flow_userdef {
+	bool cloud_filter;
+	bool tenant_id_valid;
+	u32 tenant_id;
+	bool tunnel_type_valid;
+	u8 tunnel_type;
 	bool flex_filter;
 	u16 flex_word;
 	u16 flex_offset;
@@ -216,7 +256,7 @@ struct i40e_rx_flow_userdef {
 
 struct i40e_fdir_filter {
 	struct hlist_node fdir_node;
-	/* filter ipnut set */
+	/* filter input set */
 	u8 flow_type;
 	u8 ip4_proto;
 	/* TX packet view of src and dst */
@@ -233,7 +273,6 @@ struct i40e_fdir_filter {
 
 	/* filter control */
 	u16 q_index;
-	u8  flex_off;
 	u8  pctype;
 	u16 dest_vsi;
 	u8  dest_ctl;
@@ -242,6 +281,65 @@ struct i40e_fdir_filter {
 	u32 fd_id;
 };
 
+#define I40E_CLOUD_FIELD_OMAC		BIT(0)
+#define I40E_CLOUD_FIELD_IMAC		BIT(1)
+#define I40E_CLOUD_FIELD_IVLAN		BIT(2)
+#define I40E_CLOUD_FIELD_TEN_ID		BIT(3)
+#define I40E_CLOUD_FIELD_IIP		BIT(4)
+
+#define I40E_CLOUD_FILTER_FLAGS_OMAC I40E_CLOUD_FIELD_OMAC
+#define I40E_CLOUD_FILTER_FLAGS_IMAC I40E_CLOUD_FIELD_IMAC
+#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN (I40E_CLOUD_FIELD_IMAC | \
+					    I40E_CLOUD_FIELD_IVLAN)
+#define I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID (I40E_CLOUD_FIELD_IMAC | \
+					     I40E_CLOUD_FIELD_TEN_ID)
+#define I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC (I40E_CLOUD_FIELD_OMAC | \
+						  I40E_CLOUD_FIELD_IMAC | \
+						  I40E_CLOUD_FIELD_TEN_ID)
+#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID (I40E_CLOUD_FIELD_IMAC | \
+						   I40E_CLOUD_FIELD_IVLAN | \
+						   I40E_CLOUD_FIELD_TEN_ID)
+#define I40E_CLOUD_FILTER_FLAGS_IIP  I40E_CLOUD_FIELD_IIP
+
+struct i40e_cloud_filter {
+	struct hlist_node cloud_node;
+	unsigned long cookie;
+	/* cloud filter input set follows */
+	u8 outer_mac[ETH_ALEN];
+	u8 inner_mac[ETH_ALEN];
+	__be16 inner_vlan;
+	__be32 inner_ip[4];
+	u16 queue_id;
+	u32 id;
+	/* cloud filter input set follows */
+	u8 dst_mac[ETH_ALEN];
+	u8 src_mac[ETH_ALEN];
+	__be16 vlan_id;
+	u16 seid;       /* filter control */
+	__be16 dst_port;
+	__be16 src_port;
+	u32 tenant_id;
+	union {
+		struct {
+			struct in_addr dst_ip;
+			struct in_addr src_ip;
+		} v4;
+		struct {
+			struct in6_addr dst_ip6;
+			struct in6_addr src_ip6;
+		} v6;
+	} ip;
+#define dst_ipv6	ip.v6.dst_ip6.s6_addr32
+#define src_ipv6	ip.v6.src_ip6.s6_addr32
+#define dst_ipv4	ip.v4.dst_ip.s_addr
+#define src_ipv4	ip.v4.src_ip.s_addr
+	u16 n_proto;    /* Ethernet Protocol */
+	u8 ip_proto;    /* IPPROTO value */
+	u8 flags;
+#define I40E_CLOUD_TNL_TYPE_NONE	0xff
+	u8 tunnel_type;
+};
+
 #define I40E_ETH_P_LLDP			0x88cc
 
 #define I40E_DCB_PRIO_TYPE_STRICT	0
@@ -261,10 +359,34 @@ struct i40e_tc_configuration {
 	struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS];
 };
 
+#define I40E_UDP_PORT_INDEX_UNUSED	255
 struct i40e_udp_port_config {
 	/* AdminQ command interface expects port number in Host byte order */
 	u16 port;
 	u8 type;
+	u8 filter_index;
+};
+
+#define I40_DDP_FLASH_REGION 100
+#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_MAX_PROFILE_NUM 16
+#define I40E_PROFILE_LIST_SIZE \
+	(I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4)
+#define I40E_DDP_PROFILE_PATH "intel/i40e/ddp/"
+
+int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
+		  bool is_add);
+int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
+
+struct i40e_ddp_profile_list {
+	u32 p_count;
+	struct i40e_profile_info p_info[0];
+};
+
+struct i40e_ddp_old_profile_list {
+	struct list_head list;
+	size_t old_ddp_size;
+	u8 old_ddp_buf[0];
 };
 
 /* macros related to FLX_PIT */
@@ -330,12 +452,49 @@ struct i40e_udp_port_config {
 				 I40E_FLEX_54_MASK | I40E_FLEX_55_MASK | \
 				 I40E_FLEX_56_MASK | I40E_FLEX_57_MASK)
 
+#define I40E_QINT_TQCTL_VAL(qp, vector, nextq_type) \
+	(I40E_QINT_TQCTL_CAUSE_ENA_MASK | \
+	(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | \
+	((vector) << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | \
+	((qp) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | \
+	(I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT))
+
+#define I40E_QINT_RQCTL_VAL(qp, vector, nextq_type) \
+	(I40E_QINT_RQCTL_CAUSE_ENA_MASK | \
+	(I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | \
+	((vector) << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | \
+	((qp) << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | \
+	(I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT))
+
 struct i40e_flex_pit {
 	struct list_head list;
 	u16 src_offset;
 	u8 pit_index;
 };
 
+struct i40e_channel {
+	struct list_head list;
+	bool initialized;
+	u8 type;
+	u16 vsi_number; /* Assigned VSI number from AQ 'Add VSI' response */
+	u16 stat_counter_idx;
+	u16 base_queue;
+	u16 num_queue_pairs; /* Requested by user */
+	u16 seid;
+
+	u8 enabled_tc;
+	struct i40e_aqc_vsi_properties_data info;
+
+	u64 max_tx_rate;
+
+	/* track this channel belongs to which VSI */
+	struct i40e_vsi *parent_vsi;
+};
+
+#ifdef HAVE_PTP_1588_CLOCK
+struct i40e_ptp_pins_settings;
+#endif /* HAVE_PTP_1588_CLOCK */
+
 /* struct that defines the Ethernet device */
 struct i40e_pf {
 	struct pci_dev *pdev;
@@ -390,6 +549,9 @@ struct i40e_pf {
 	struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
 	u16 pending_udp_bitmap;
 
+	struct hlist_head cloud_filter_list;
+	u16 num_cloud_filters;
+
 	enum i40e_interrupt_policy int_policy;
 	u16 rx_itr_default;
 	u16 tx_itr_default;
@@ -401,55 +563,93 @@ struct i40e_pf {
 	struct timer_list service_timer;
 	struct work_struct service_task;
 
-	u64 hw_features;
-#define I40E_HW_RSS_AQ_CAPABLE			BIT_ULL(0)
-#define I40E_HW_128_QP_RSS_CAPABLE		BIT_ULL(1)
-#define I40E_HW_ATR_EVICT_CAPABLE		BIT_ULL(2)
-#define I40E_HW_WB_ON_ITR_CAPABLE		BIT_ULL(3)
-#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE	BIT_ULL(4)
-#define I40E_HW_NO_PCI_LINK_CHECK		BIT_ULL(5)
-#define I40E_HW_100M_SGMII_CAPABLE		BIT_ULL(6)
-#define I40E_HW_NO_DCB_SUPPORT			BIT_ULL(7)
-#define I40E_HW_USE_SET_LLDP_MIB		BIT_ULL(8)
-#define I40E_HW_GENEVE_OFFLOAD_CAPABLE		BIT_ULL(9)
-#define I40E_HW_PTP_L4_CAPABLE			BIT_ULL(10)
-#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE		BIT_ULL(11)
-#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE	BIT_ULL(12)
-#define I40E_HW_HAVE_CRT_RETIMER		BIT_ULL(13)
-#define I40E_HW_OUTER_UDP_CSUM_CAPABLE		BIT_ULL(14)
-#define I40E_HW_PHY_CONTROLS_LEDS		BIT_ULL(15)
-#define I40E_HW_STOP_FW_LLDP			BIT_ULL(16)
-#define I40E_HW_PORT_ID_VALID			BIT_ULL(17)
-#define I40E_HW_RESTART_AUTONEG			BIT_ULL(18)
+	u32 hw_features;
+#define I40E_HW_RSS_AQ_CAPABLE			BIT(0)
+#define I40E_HW_128_QP_RSS_CAPABLE		BIT(1)
+#define I40E_HW_ATR_EVICT_CAPABLE		BIT(2)
+#define I40E_HW_WB_ON_ITR_CAPABLE		BIT(3)
+#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE	BIT(4)
+#define I40E_HW_NO_PCI_LINK_CHECK		BIT(5)
+#define I40E_HW_100M_SGMII_CAPABLE		BIT(6)
+#define I40E_HW_NO_DCB_SUPPORT			BIT(7)
+#define I40E_HW_USE_SET_LLDP_MIB		BIT(8)
+#define I40E_HW_GENEVE_OFFLOAD_CAPABLE		BIT(9)
+#define I40E_HW_PTP_L4_CAPABLE			BIT(10)
+#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE		BIT(11)
+#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE	BIT(12)
+#define I40E_HW_HAVE_CRT_RETIMER		BIT(13)
+#define I40E_HW_OUTER_UDP_CSUM_CAPABLE		BIT(14)
+#define I40E_HW_PHY_CONTROLS_LEDS		BIT(15)
+#define I40E_HW_STOP_FW_LLDP			BIT(16)
+#define I40E_HW_PORT_ID_VALID			BIT(17)
+#define I40E_HW_RESTART_AUTONEG			BIT(18)
 
 	u64 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED		BIT_ULL(1)
-#define I40E_FLAG_MSI_ENABLED			BIT_ULL(2)
-#define I40E_FLAG_MSIX_ENABLED			BIT_ULL(3)
-#define I40E_FLAG_HW_ATR_EVICT_ENABLED		BIT_ULL(4)
-#define I40E_FLAG_RSS_ENABLED			BIT_ULL(6)
-#define I40E_FLAG_VMDQ_ENABLED			BIT_ULL(7)
-#define I40E_FLAG_IWARP_ENABLED			BIT_ULL(10)
-#define I40E_FLAG_FILTER_SYNC			BIT_ULL(15)
-#define I40E_FLAG_SERVICE_CLIENT_REQUESTED	BIT_ULL(16)
-#define I40E_FLAG_SRIOV_ENABLED			BIT_ULL(19)
-#define I40E_FLAG_DCB_ENABLED			BIT_ULL(20)
-#define I40E_FLAG_FD_SB_ENABLED			BIT_ULL(21)
-#define I40E_FLAG_FD_ATR_ENABLED		BIT_ULL(22)
-#define I40E_FLAG_FD_SB_AUTO_DISABLED		BIT_ULL(23)
-#define I40E_FLAG_FD_ATR_AUTO_DISABLED		BIT_ULL(24)
-#define I40E_FLAG_PTP				BIT_ULL(25)
-#define I40E_FLAG_MFP_ENABLED			BIT_ULL(26)
-#define I40E_FLAG_UDP_FILTER_SYNC		BIT_ULL(27)
-#define I40E_FLAG_DCB_CAPABLE			BIT_ULL(29)
-#define I40E_FLAG_VEB_STATS_ENABLED		BIT_ULL(37)
-#define I40E_FLAG_LINK_POLLING_ENABLED		BIT_ULL(39)
-#define I40E_FLAG_VEB_MODE_ENABLED		BIT_ULL(40)
-#define I40E_FLAG_TRUE_PROMISC_SUPPORT		BIT_ULL(51)
-#define I40E_FLAG_CLIENT_RESET			BIT_ULL(54)
-#define I40E_FLAG_TEMP_LINK_POLLING		BIT_ULL(55)
-#define I40E_FLAG_CLIENT_L2_CHANGE		BIT_ULL(56)
-#define I40E_FLAG_LEGACY_RX			BIT_ULL(58)
+#define I40E_FLAG_RX_CSUM_ENABLED		BIT(0)
+#define I40E_FLAG_MSI_ENABLED			BIT(1)
+#define I40E_FLAG_MSIX_ENABLED			BIT(2)
+#define I40E_FLAG_RSS_ENABLED			BIT(3)
+#define I40E_FLAG_VMDQ_ENABLED			BIT(4)
+#define I40E_FLAG_SRIOV_ENABLED			BIT(5)
+#define I40E_FLAG_DCB_CAPABLE			BIT(6)
+#define I40E_FLAG_DCB_ENABLED			BIT(7)
+#define I40E_FLAG_FD_SB_ENABLED			BIT(8)
+#define I40E_FLAG_FD_ATR_ENABLED		BIT(9)
+#define I40E_FLAG_MFP_ENABLED			BIT(10)
+#define I40E_FLAG_HW_ATR_EVICT_ENABLED		BIT(11)
+#define I40E_FLAG_VEB_MODE_ENABLED		BIT(12)
+#define I40E_FLAG_VEB_STATS_ENABLED		BIT(13)
+#define I40E_FLAG_LINK_POLLING_ENABLED		BIT(14)
+#define I40E_FLAG_TRUE_PROMISC_SUPPORT		BIT(15)
+#define I40E_FLAG_LEGACY_RX			BIT(16)
+#ifdef HAVE_PTP_1588_CLOCK
+#define I40E_FLAG_PTP				BIT(17)
+#endif /* HAVE_PTP_1588_CLOCK */
+#define I40E_FLAG_IWARP_ENABLED			BIT(18)
+#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED	BIT(19)
+#define I40E_FLAG_SOURCE_PRUNING_DISABLED       BIT(20)
+#define I40E_FLAG_TC_MQPRIO			BIT(21)
+#define I40E_FLAG_FD_SB_INACTIVE		BIT(22)
+#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER		BIT(23)
+#define I40E_FLAG_DISABLE_FW_LLDP		BIT(24)
+#define I40E_FLAG_RS_FEC			BIT(25)
+#define I40E_FLAG_BASE_R_FEC			BIT(26)
+#define I40E_FLAG_TOTAL_PORT_SHUTDOWN		BIT(27)
+
+/* GPIO defines used by PTP */
+#define I40E_SDP3_2			18
+#define I40E_SDP3_3			19
+#define I40E_GPIO_4			20
+#define I40E_LED2_0			26
+#define I40E_LED2_1			27
+#define I40E_LED3_0			28
+#define I40E_LED3_1			29
+#define I40E_GPIO_SET_HIGH		BIT(5)
+#define I40E_GPIO_SET_LOW		0
+#define I40E_DRIVE_SDP_ON		BIT(6)
+#define I40E_DRIVE_SDP_OFF		0
+#define I40E_GPIO_PRT_NUM_0		0
+#define I40E_GPIO_PRT_NUM_1		1
+#define I40E_GPIO_PRT_NUM_2		2
+#define I40E_GPIO_PRT_NUM_3		3
+#define I40E_GPIO_RESERVED_2		BIT(2)
+#define I40E_GPIO_PIN_DIR_OUT		BIT(4)
+#define I40E_GPIO_PIN_DIR_IN		0
+#define I40E_GPIO_TRI_CTL_OFF		BIT(5)
+#define I40E_GPIO_CTL_OUT_HIGH		BIT(6)
+#define I40E_GPIO_TIMESYNC_0		3 << 7
+#define I40E_GPIO_TIMESYNC_1		4 << 7
+#define I40E_GPIO_PHY_PIN_NA_ME_NO_SDP	0x3F << 20
+#define I40E_PORT_0_TIMESYNC_1		0x3F00184
+#define I40E_PORT_1_TIMESYNC_1		0x3F00185
+#define I40E_PORT_0_OUT_HIGH_TIMESYNC_0	0x3F00274
+#define I40E_PORT_1_OUT_HIGH_TIMESYNC_1	0x3F00275
+#define I40E_PTP_HALF_SECOND		500000000LL /* nano seconds */
+#define I40E_PRTTSYN_CTL0_FFFB_MASK	0xFFFFFFFB
+#define I40E_PTP_2_SEC_DELAY		2
+
+	/* flag to enable/disable vf base mode support */
+	bool vf_base_mode_only;
 
 	struct i40e_client_instance *cinst;
 	bool stat_offsets_loaded;
@@ -507,22 +707,50 @@ struct i40e_pf {
 	u16 dcbx_cap;
 
 	struct i40e_filter_control_settings filter_settings;
+#ifdef HAVE_PTP_1588_CLOCK
 
 	struct ptp_clock *ptp_clock;
 	struct ptp_clock_info ptp_caps;
 	struct sk_buff *ptp_tx_skb;
 	unsigned long ptp_tx_start;
 	struct hwtstamp_config tstamp_config;
+	struct timespec64 ptp_prev_hw_time;
+	struct work_struct ptp_pps_work;
+	struct work_struct ptp_extts0_work;
+	struct work_struct ptp_extts1_work;
+	ktime_t ptp_reset_start;
 	struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
-	u64 ptp_base_adj;
+	u32 ptp_adj_mult;
 	u32 tx_hwtstamp_timeouts;
 	u32 tx_hwtstamp_skipped;
 	u32 rx_hwtstamp_cleared;
 	u32 latch_event_flags;
+	u64 ptp_pps_start;
+	u32 pps_delay;
 	spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
+	struct ptp_pin_desc ptp_pin[3];
 	unsigned long latch_events[4];
 	bool ptp_tx;
 	bool ptp_rx;
+	struct i40e_ptp_pins_settings *ptp_pins;
+	struct kobject *ptp_kobj;
+#endif /* HAVE_PTP_1588_CLOCK */
+#ifdef I40E_ADD_PROBES
+	u64 tcp_segs;
+	u64 tx_tcp_cso;
+	u64 tx_udp_cso;
+	u64 tx_sctp_cso;
+	u64 tx_ip4_cso;
+	u64 rx_tcp_cso;
+	u64 rx_udp_cso;
+	u64 rx_sctp_cso;
+	u64 rx_ip4_cso;
+	u64 hw_csum_rx_outer;
+	u64 rx_tcp_cso_err;
+	u64 rx_udp_cso_err;
+	u64 rx_sctp_cso_err;
+	u64 rx_ip4_cso_err;
+#endif
 	u16 rss_table_size; /* HW RSS table size */
 	u32 max_bw;
 	u32 min_bw;
@@ -530,6 +758,18 @@ struct i40e_pf {
 	u32 ioremap_len;
 	u32 fd_inv;
 	u16 phy_led_val;
+	u16 last_sw_conf_flags;
+	u16 last_sw_conf_valid_flags;
+
+	u16 override_q_count;
+	struct vfd_objects *vfd_obj;
+	u16 ingress_rule_id;
+	int ingress_vlan;
+	u16 egress_rule_id;
+	int egress_vlan;
+	bool vf_bw_applied;
+	/* List to keep previous DDP profiles to be rolled back in the future */
+	struct list_head ddp_old_prof;
 };
 
 /**
@@ -605,7 +845,11 @@ struct i40e_veb {
 /* struct that defines a VSI, associated with a dev */
 struct i40e_vsi {
 	struct net_device *netdev;
+#ifdef HAVE_VLAN_RX_REGISTER
+	struct vlan_group *vlgrp;
+#else
 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+#endif
 	bool netdev_registered;
 	bool stat_offsets_loaded;
 
@@ -613,7 +857,7 @@ struct i40e_vsi {
 	DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__);
 #define I40E_VSI_FLAG_FILTER_CHANGED	BIT(0)
 #define I40E_VSI_FLAG_VEB_OWNER		BIT(1)
-	unsigned long flags;
+	u64 flags;
 
 	/* Per VSI lock to protect elements/hash (MAC filter) */
 	spinlock_t mac_filter_hash_lock;
@@ -622,8 +866,13 @@ struct i40e_vsi {
 	bool has_vlan_filter;
 
 	/* VSI stats */
+#ifdef HAVE_NDO_GET_STATS64
 	struct rtnl_link_stats64 net_stats;
 	struct rtnl_link_stats64 net_stats_offsets;
+#else
+	struct net_device_stats net_stats;
+	struct net_device_stats net_stats_offsets;
+#endif
 	struct i40e_eth_stats eth_stats;
 	struct i40e_eth_stats eth_stats_offsets;
 	u32 tx_restart;
@@ -649,7 +898,6 @@ struct i40e_vsi {
 	u8  *rss_hkey_user;	/* User configured hash keys */
 	u8  *rss_lut_user;	/* User configured lookup table entries */
 
-
 	u16 max_frame;
 	u16 rx_buf_len;
 
@@ -669,10 +917,13 @@ struct i40e_vsi {
 	u16 alloc_queue_pairs;	/* Allocated Tx/Rx queues */
 	u16 req_queue_pairs;	/* User requested queue pairs */
 	u16 num_queue_pairs;	/* Used tx and rx pairs */
-	u16 num_desc;
+	u16 num_tx_desc;
+	u16 num_rx_desc;
 	enum i40e_vsi_type type;  /* VSI type, e.g., LAN, FCoE, etc */
 	s16 vf_id;		/* Virtual function ID for SRIOV VSIs */
-
+#ifdef __TC_MQPRIO_MODE_MAX
+	struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */
+#endif
 	struct i40e_tc_configuration tc_config;
 	struct i40e_aqc_vsi_properties_data info;
 
@@ -693,11 +944,25 @@ struct i40e_vsi {
 	struct kobject *kobj;	/* sysfs object */
 	bool current_isup;	/* Sync 'link up' logging */
 	enum i40e_aq_link_speed current_speed;	/* Sync link speed logging */
+	/* channel specific fields */
+	u16 cnt_q_avail;	/* num of queues available for channel usage */
+	u16 orig_rss_size;
+	u16 current_rss_size;
+	bool reconfig_rss;
+
+	u16 next_base_queue;	/* next queue to be used for channel setup */
+
+	struct list_head ch_list;
+	u16 tc_seid_map[I40E_MAX_TRAFFIC_CLASS];
 
 	void *priv;	/* client driver data reference. */
+	bool block_tx_timeout;
 
 	/* VSI specific handlers */
 	irqreturn_t (*irq_handler)(int irq, void *data);
+#ifdef ETHTOOL_GRXRINGS
+#endif
+
 } ____cacheline_internodealigned_in_smp;
 
 struct i40e_netdev_priv {
@@ -716,16 +981,17 @@ struct i40e_q_vector {
 	struct i40e_ring_container rx;
 	struct i40e_ring_container tx;
 
+	u8 itr_countdown;	/* when 0 should adjust adaptive ITR */
 	u8 num_ringpairs;	/* total number of ring pairs in vector */
 
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
 	cpumask_t affinity_mask;
 	struct irq_affinity_notify affinity_notify;
+#endif
 
 	struct rcu_head rcu;	/* to avoid race with update stats on free */
 	char name[I40E_INT_NAME_STR_LEN];
 	bool arm_wb_state;
-#define ITR_COUNTDOWN_START 100
-	u8 itr_countdown;	/* when 0 should adjust ITR */
 } ____cacheline_internodealigned_in_smp;
 
 /* lan device */
@@ -845,7 +1111,7 @@ static inline void i40e_write_fd_input_set(struct i40e_pf *pf,
 /* needed by i40e_ethtool.c */
 int i40e_up(struct i40e_vsi *vsi);
 void i40e_down(struct i40e_vsi *vsi);
-extern const char i40e_driver_name[];
+extern char i40e_driver_name[];
 extern const char i40e_driver_version_str[];
 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired);
@@ -854,6 +1120,7 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
 		       u16 rss_table_size, u16 rss_size);
 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
+struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_pf *pf, u16 seid);
 /**
  * i40e_find_vsi_by_type - Find and return Flow Director VSI
  * @pf: PF to search for VSI
@@ -874,8 +1141,13 @@ i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
 	return NULL;
 }
 void i40e_update_stats(struct i40e_vsi *vsi);
+void i40e_update_veb_stats(struct i40e_veb *veb);
 void i40e_update_eth_stats(struct i40e_vsi *vsi);
+#ifdef HAVE_NDO_GET_STATS64
 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
+#else
+struct net_device_stats *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
+#endif
 int i40e_fetch_switch_configuration(struct i40e_pf *pf,
 				    bool printconfig);
 
@@ -888,6 +1160,8 @@ u32 i40e_get_current_atr_cnt(struct i40e_pf *pf);
 u32 i40e_get_global_fd_count(struct i40e_pf *pf);
 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
 void i40e_set_ethtool_ops(struct net_device *netdev);
+struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
+					 const u8 *macaddr, s16 vlan);
 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
 					const u8 *macaddr, s16 vlan);
 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f);
@@ -896,14 +1170,26 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
 				u16 uplink, u32 param1);
 int i40e_vsi_release(struct i40e_vsi *vsi);
+int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type);
+int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi);
+int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi);
+int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc);
+int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename);
 void i40e_service_event_schedule(struct i40e_pf *pf);
 void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
 				  u8 *msg, u16 len);
 
+int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, bool is_xdp,
+			   bool enable);
+int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable);
 int i40e_vsi_start_rings(struct i40e_vsi *vsi);
 void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
 void i40e_vsi_stop_rings_no_wait(struct  i40e_vsi *vsi);
 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi);
+void i40e_quiesce_vsi(struct i40e_vsi *vsi);
+void i40e_unquiesce_vsi(struct i40e_vsi *vsi);
+void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf);
+void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf);
 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
 				u16 downlink_seid, u8 enabled_tc);
@@ -912,6 +1198,7 @@ void i40e_veb_release(struct i40e_veb *veb);
 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc);
 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
+int i40e_get_cloud_filter_type(u8 flags, u16 *type);
 void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
 void i40e_pf_reset_stats(struct i40e_pf *pf);
 #ifdef CONFIG_DEBUG_FS
@@ -933,6 +1220,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
 void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
 void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
 void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
+void i40e_client_update_msix_info(struct i40e_pf *pf);
 int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id);
 /**
  * i40e_irq_dynamic_enable - Enable default interrupt generation settings
@@ -945,9 +1233,6 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
 	struct i40e_hw *hw = &pf->hw;
 	u32 val;
 
-	/* definitely clear the PBA here, as this function is meant to
-	 * clean out all previous interrupts AND enable the interrupt
-	 */
 	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
 	      I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
 	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
@@ -956,8 +1241,9 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
 }
 
 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
-void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int i40e_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 int i40e_open(struct net_device *netdev);
 int i40e_close(struct net_device *netdev);
 int i40e_vsi_open(struct i40e_vsi *vsi);
@@ -970,18 +1256,22 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
 					    const u8 *macaddr);
 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
+int i40e_count_filters(struct i40e_vsi *vsi);
 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
-#ifdef CONFIG_I40E_DCB
+#ifdef CONFIG_DCB
+#ifdef HAVE_DCBNL_IEEE
 void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
 			   struct i40e_dcbx_config *old_cfg,
 			   struct i40e_dcbx_config *new_cfg);
 void i40e_dcbnl_set_all(struct i40e_vsi *vsi);
 void i40e_dcbnl_setup(struct i40e_vsi *vsi);
+#endif /* HAVE_DCBNL_IEEE */
 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
 			    struct i40e_dcbx_config *old_cfg,
 			    struct i40e_dcbx_config *new_cfg);
-#endif /* CONFIG_I40E_DCB */
+#endif /* CONFIG_DCB */
+#ifdef HAVE_PTP_1588_CLOCK
 void i40e_ptp_rx_hang(struct i40e_pf *pf);
 void i40e_ptp_tx_hang(struct i40e_pf *pf);
 void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
@@ -989,16 +1279,45 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
 void i40e_ptp_set_increment(struct i40e_pf *pf);
 int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
 int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+int i40e_ptp_set_pins_ioctl(struct i40e_pf *pf, struct ifreq *ifr);
+int i40e_ptp_get_pins(struct i40e_pf *pf, struct ifreq *ifr);
+void i40e_ptp_save_hw_time(struct i40e_pf *pf);
+void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
 void i40e_ptp_init(struct i40e_pf *pf);
 void i40e_ptp_stop(struct i40e_pf *pf);
+int i40e_ptp_alloc_pins(struct i40e_pf *pf);
+#endif /* HAVE_PTP_1588_CLOCK */
+u8 i40e_pf_get_num_tc(struct i40e_pf *pf);
 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
+int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
+int i40e_add_del_cloud_filter_ex(struct i40e_pf *pf,
+				 struct i40e_cloud_filter *filter,
+				 bool add);
+int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
+			      struct i40e_cloud_filter *filter,
+			      bool add);
+int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+				      struct i40e_cloud_filter *filter,
+				      bool add);
 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
+int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
+int i40e_get_link_speed(struct i40e_vsi *vsi);
+
+void i40e_set_fec_in_flags(u8 fec_cfg, u64 *flags);
+
+#ifdef HAVE_XDP_SUPPORT
+int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
+int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
+#endif
 
 static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
 {
 	return !!vsi->xdp_prog;
 }
+int i40e_restore_ingress_egress_mirror(struct i40e_vsi *src_vsi, int mirror, u16 rule_type,
+				       u16 *rule_id);
+
 #endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index ba04988e0..15b9d9c89 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #include "i40e_status.h"
 #include "i40e_type.h"
@@ -30,8 +7,6 @@
 #include "i40e_adminq.h"
 #include "i40e_prototype.h"
 
-static void i40e_resume_aq(struct i40e_hw *hw);
-
 /**
  *  i40e_adminq_init_regs - Initialize AdminQ registers
  *  @hw: pointer to the hardware structure
@@ -119,6 +94,7 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
  **/
 static void i40e_free_adminq_asq(struct i40e_hw *hw)
 {
+	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
 	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
 }
 
@@ -169,21 +145,21 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
 		/* now configure the descriptors for use */
 		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
 
-		desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+		desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
 		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
-			desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+			desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
 		desc->opcode = 0;
 		/* This is in accordance with Admin queue design, there is no
 		 * register for buffer size configuration
 		 */
-		desc->datalen = cpu_to_le16((u16)bi->size);
+		desc->datalen = CPU_TO_LE16((u16)bi->size);
 		desc->retval = 0;
 		desc->cookie_high = 0;
 		desc->cookie_low = 0;
 		desc->params.external.addr_high =
-			cpu_to_le32(upper_32_bits(bi->pa));
+			CPU_TO_LE32(upper_32_bits(bi->pa));
 		desc->params.external.addr_low =
-			cpu_to_le32(lower_32_bits(bi->pa));
+			CPU_TO_LE32(lower_32_bits(bi->pa));
 		desc->params.external.param0 = 0;
 		desc->params.external.param1 = 0;
 	}
@@ -291,7 +267,7 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
  **/
 static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 	u32 reg = 0;
 
 	/* Clear Head and Tail */
@@ -320,7 +296,7 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
  **/
 static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 	u32 reg = 0;
 
 	/* Clear Head and Tail */
@@ -359,7 +335,7 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
  **/
 static i40e_status i40e_init_asq(struct i40e_hw *hw)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 
 	if (hw->aq.asq.count > 0) {
 		/* queue already initialized */
@@ -379,18 +355,18 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
 
 	/* allocate the ring memory */
 	ret_code = i40e_alloc_adminq_asq_ring(hw);
-	if (ret_code)
+	if (ret_code != I40E_SUCCESS)
 		goto init_adminq_exit;
 
 	/* allocate buffers in the rings */
 	ret_code = i40e_alloc_asq_bufs(hw);
-	if (ret_code)
+	if (ret_code != I40E_SUCCESS)
 		goto init_adminq_free_rings;
 
 	/* initialize base registers */
 	ret_code = i40e_config_asq_regs(hw);
-	if (ret_code)
-		goto init_adminq_free_rings;
+	if (ret_code != I40E_SUCCESS)
+		goto init_config_regs;
 
 	/* success! */
 	hw->aq.asq.count = hw->aq.num_asq_entries;
@@ -398,6 +374,10 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
 
 init_adminq_free_rings:
 	i40e_free_adminq_asq(hw);
+	return ret_code;
+
+init_config_regs:
+	i40e_free_asq_bufs(hw);
 
 init_adminq_exit:
 	return ret_code;
@@ -418,7 +398,7 @@ init_adminq_exit:
  **/
 static i40e_status i40e_init_arq(struct i40e_hw *hw)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 
 	if (hw->aq.arq.count > 0) {
 		/* queue already initialized */
@@ -438,17 +418,17 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
 
 	/* allocate the ring memory */
 	ret_code = i40e_alloc_adminq_arq_ring(hw);
-	if (ret_code)
+	if (ret_code != I40E_SUCCESS)
 		goto init_adminq_exit;
 
 	/* allocate buffers in the rings */
 	ret_code = i40e_alloc_arq_bufs(hw);
-	if (ret_code)
+	if (ret_code != I40E_SUCCESS)
 		goto init_adminq_free_rings;
 
 	/* initialize base registers */
 	ret_code = i40e_config_arq_regs(hw);
-	if (ret_code)
+	if (ret_code != I40E_SUCCESS)
 		goto init_adminq_free_rings;
 
 	/* success! */
@@ -470,9 +450,9 @@ init_adminq_exit:
  **/
 static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 
-	mutex_lock(&hw->aq.asq_mutex);
+	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
 
 	if (hw->aq.asq.count == 0) {
 		ret_code = I40E_ERR_NOT_READY;
@@ -492,7 +472,7 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
 	i40e_free_asq_bufs(hw);
 
 shutdown_asq_out:
-	mutex_unlock(&hw->aq.asq_mutex);
+	i40e_release_spinlock(&hw->aq.asq_spinlock);
 	return ret_code;
 }
 
@@ -504,9 +484,9 @@ shutdown_asq_out:
  **/
 static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 
-	mutex_lock(&hw->aq.arq_mutex);
+	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
 
 	if (hw->aq.arq.count == 0) {
 		ret_code = I40E_ERR_NOT_READY;
@@ -526,10 +506,86 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
 	i40e_free_arq_bufs(hw);
 
 shutdown_arq_out:
-	mutex_unlock(&hw->aq.arq_mutex);
+	i40e_release_spinlock(&hw->aq.arq_spinlock);
 	return ret_code;
 }
 
+/**
+ *  i40e_resume_aq - resume AQ processing from 0
+ *  @hw: pointer to the hardware structure
+ **/
+static void i40e_resume_aq(struct i40e_hw *hw)
+{
+	/* Registers are reset after PF reset */
+	hw->aq.asq.next_to_use = 0;
+	hw->aq.asq.next_to_clean = 0;
+
+	i40e_config_asq_regs(hw);
+
+	hw->aq.arq.next_to_use = 0;
+	hw->aq.arq.next_to_clean = 0;
+
+	i40e_config_arq_regs(hw);
+}
+
+/**
+ *  i40e_set_hw_flags - set HW flags
+ *  @hw: pointer to the hardware structure
+ **/
+static void i40e_set_hw_flags(struct i40e_hw *hw)
+{
+	struct i40e_adminq_info *aq = &hw->aq;
+
+	hw->flags = 0;
+
+	switch (hw->mac.type) {
+	case I40E_MAC_XL710:
+		if (aq->api_maj_ver > 1 ||
+		    (aq->api_maj_ver == 1 &&
+		     aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
+			hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+			hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+			/* The ability to RX (not drop) 802.1ad frames */
+			hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+		}
+		break;
+	case I40E_MAC_X722:
+		hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+			     I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+		if (aq->api_maj_ver > 1 ||
+		    (aq->api_maj_ver == 1 &&
+		     aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
+			hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+
+		if (aq->api_maj_ver > 1 ||
+		    (aq->api_maj_ver == 1 &&
+		     aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
+			hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+		/* fall through */
+	default:
+		break;
+	}
+
+	/* Newer versions of firmware require lock when reading the NVM */
+	if (aq->api_maj_ver > 1 ||
+	    (aq->api_maj_ver == 1 &&
+	     aq->api_min_ver >= 5))
+		hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+	if (aq->api_maj_ver > 1 ||
+	    (aq->api_maj_ver == 1 &&
+	     aq->api_min_ver >= 8)) {
+		hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
+		hw->flags |= I40E_HW_FLAG_DROP_MODE;
+	}
+
+	if (aq->api_maj_ver > 1 ||
+	    (aq->api_maj_ver == 1 &&
+	     aq->api_min_ver >= 9))
+		hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
+}
+
 /**
  *  i40e_init_adminq - main initialization routine for Admin Queue
  *  @hw: pointer to the hardware structure
@@ -543,19 +599,22 @@ shutdown_arq_out:
  **/
 i40e_status i40e_init_adminq(struct i40e_hw *hw)
 {
+	struct i40e_adminq_info *aq = &hw->aq;
+	i40e_status ret_code;
 	u16 cfg_ptr, oem_hi, oem_lo;
 	u16 eetrack_lo, eetrack_hi;
-	i40e_status ret_code;
 	int retry = 0;
 
 	/* verify input for valid configuration */
-	if ((hw->aq.num_arq_entries == 0) ||
-	    (hw->aq.num_asq_entries == 0) ||
-	    (hw->aq.arq_buf_size == 0) ||
-	    (hw->aq.asq_buf_size == 0)) {
+	if (aq->num_arq_entries == 0 ||
+	    aq->num_asq_entries == 0 ||
+	    aq->arq_buf_size == 0 ||
+	    aq->asq_buf_size == 0) {
 		ret_code = I40E_ERR_CONFIG;
 		goto init_adminq_exit;
 	}
+	i40e_init_spinlock(&aq->asq_spinlock);
+	i40e_init_spinlock(&aq->arq_spinlock);
 
 	/* Set up register offsets */
 	i40e_adminq_init_regs(hw);
@@ -565,12 +624,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
 
 	/* allocate the ASQ */
 	ret_code = i40e_init_asq(hw);
-	if (ret_code)
-		goto init_adminq_destroy_locks;
+	if (ret_code != I40E_SUCCESS)
+		goto init_adminq_destroy_spinlocks;
 
 	/* allocate the ARQ */
 	ret_code = i40e_init_arq(hw);
-	if (ret_code)
+	if (ret_code != I40E_SUCCESS)
 		goto init_adminq_free_asq;
 
 	/* There are some cases where the firmware may not be quite ready
@@ -579,11 +638,11 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
 	 */
 	do {
 		ret_code = i40e_aq_get_firmware_version(hw,
-							&hw->aq.fw_maj_ver,
-							&hw->aq.fw_min_ver,
-							&hw->aq.fw_build,
-							&hw->aq.api_maj_ver,
-							&hw->aq.api_min_ver,
+							&aq->fw_maj_ver,
+							&aq->fw_min_ver,
+							&aq->fw_build,
+							&aq->api_maj_ver,
+							&aq->api_min_ver,
 							NULL);
 		if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
 			break;
@@ -594,6 +653,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
 	if (ret_code != I40E_SUCCESS)
 		goto init_adminq_free_arq;
 
+	/*
+	 * Some features were introduced in different FW API version
+	 * for different MAC type.
+	 */
+	i40e_set_hw_flags(hw);
+
 	/* get the NVM version info */
 	i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
 			   &hw->nvm.version);
@@ -601,13 +666,11 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
 	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
 	i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
-	i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
-			   &oem_hi);
-	i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
-			   &oem_lo);
+	i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), &oem_hi);
+	i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), &oem_lo);
 	hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
 
-	if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
+	if (aq->api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
 		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
 		goto init_adminq_free_arq;
 	}
@@ -617,7 +680,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
 	hw->nvm_release_on_done = false;
 	hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 
-	ret_code = 0;
+	ret_code = I40E_SUCCESS;
 
 	/* success! */
 	goto init_adminq_exit;
@@ -626,7 +689,9 @@ init_adminq_free_arq:
 	i40e_shutdown_arq(hw);
 init_adminq_free_asq:
 	i40e_shutdown_asq(hw);
-init_adminq_destroy_locks:
+init_adminq_destroy_spinlocks:
+	i40e_destroy_spinlock(&aq->asq_spinlock);
+	i40e_destroy_spinlock(&aq->arq_spinlock);
 
 init_adminq_exit:
 	return ret_code;
@@ -638,13 +703,15 @@ init_adminq_exit:
  **/
 i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 
 	if (i40e_check_asq_alive(hw))
 		i40e_aq_queue_shutdown(hw, true);
 
 	i40e_shutdown_asq(hw);
 	i40e_shutdown_arq(hw);
+	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
+	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
 
 	if (hw->nvm_buff.va)
 		i40e_free_virt_mem(hw, &hw->nvm_buff);
@@ -669,17 +736,18 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
 	desc = I40E_ADMINQ_DESC(*asq, ntc);
 	details = I40E_ADMINQ_DETAILS(*asq, ntc);
 	while (rd32(hw, hw->aq.asq.head) != ntc) {
-		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+		i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
 			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
 
 		if (details->callback) {
 			I40E_ADMINQ_CALLBACK cb_func =
 					(I40E_ADMINQ_CALLBACK)details->callback;
-			desc_cb = *desc;
+			i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
+				    I40E_DMA_TO_DMA);
 			cb_func(hw, &desc_cb);
 		}
-		memset(desc, 0, sizeof(*desc));
-		memset(details, 0, sizeof(*details));
+		i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
+		i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
 		ntc++;
 		if (ntc == asq->count)
 			ntc = 0;
@@ -709,23 +777,26 @@ static bool i40e_asq_done(struct i40e_hw *hw)
 }
 
 /**
- *  i40e_asq_send_command - send command to Admin Queue
+ *  i40e_asq_send_command_atomic - send command to Admin Queue
  *  @hw: pointer to the hw struct
  *  @desc: prefilled descriptor describing the command (non DMA mem)
  *  @buff: buffer to use for indirect commands
  *  @buff_size: size of buffer for indirect commands
  *  @cmd_details: pointer to command details structure
+ *  @is_atomic_context: is the function called in an atomic context?
  *
  *  This is the main send command driver routine for the Admin Queue send
  *  queue.  It runs the queue, cleans the queue, etc
  **/
-i40e_status i40e_asq_send_command(struct i40e_hw *hw,
-				struct i40e_aq_desc *desc,
-				void *buff, /* can be NULL */
-				u16  buff_size,
-				struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code
+i40e_asq_send_command_atomic(struct i40e_hw *hw,
+			     struct i40e_aq_desc *desc,
+			     void *buff, /* can be NULL */
+			     u16  buff_size,
+			     struct i40e_asq_cmd_details *cmd_details,
+			     bool is_atomic_context)
 {
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 	struct i40e_dma_mem *dma_buff = NULL;
 	struct i40e_asq_cmd_details *details;
 	struct i40e_aq_desc *desc_on_ring;
@@ -733,7 +804,9 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
 	u16  retval = 0;
 	u32  val = 0;
 
-	mutex_lock(&hw->aq.asq_mutex);
+	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+	hw->aq.asq_last_status = I40E_AQ_RC_OK;
 
 	if (hw->aq.asq.count == 0) {
 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
@@ -742,37 +815,40 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
 		goto asq_send_command_error;
 	}
 
-	hw->aq.asq_last_status = I40E_AQ_RC_OK;
-
 	val = rd32(hw, hw->aq.asq.head);
 	if (val >= hw->aq.num_asq_entries) {
 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
 			   "AQTX: head overrun at %d\n", val);
-		status = I40E_ERR_QUEUE_EMPTY;
+		status = I40E_ERR_ADMIN_QUEUE_FULL;
 		goto asq_send_command_error;
 	}
 
 	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
 	if (cmd_details) {
-		*details = *cmd_details;
+		i40e_memcpy(details,
+			    cmd_details,
+			    sizeof(struct i40e_asq_cmd_details),
+			    I40E_NONDMA_TO_NONDMA);
 
 		/* If the cmd_details are defined copy the cookie.  The
-		 * cpu_to_le32 is not needed here because the data is ignored
+		 * CPU_TO_LE32 is not needed here because the data is ignored
 		 * by the FW, only used by the driver
 		 */
 		if (details->cookie) {
 			desc->cookie_high =
-				cpu_to_le32(upper_32_bits(details->cookie));
+				CPU_TO_LE32(upper_32_bits(details->cookie));
 			desc->cookie_low =
-				cpu_to_le32(lower_32_bits(details->cookie));
+				CPU_TO_LE32(lower_32_bits(details->cookie));
 		}
 	} else {
-		memset(details, 0, sizeof(struct i40e_asq_cmd_details));
+		i40e_memset(details, 0,
+			    sizeof(struct i40e_asq_cmd_details),
+			    I40E_NONDMA_MEM);
 	}
 
 	/* clear requested flags and then set additional flags if defined */
-	desc->flags &= ~cpu_to_le16(details->flags_dis);
-	desc->flags |= cpu_to_le16(details->flags_ena);
+	desc->flags &= ~CPU_TO_LE16(details->flags_dis);
+	desc->flags |= CPU_TO_LE16(details->flags_ena);
 
 	if (buff_size > hw->aq.asq_buf_size) {
 		i40e_debug(hw,
@@ -810,26 +886,28 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
 	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
 
 	/* if the desc is available copy the temp desc to the right place */
-	*desc_on_ring = *desc;
+	i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
+		    I40E_NONDMA_TO_DMA);
 
 	/* if buff is not NULL assume indirect command */
 	if (buff != NULL) {
 		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
 		/* copy the user buff into the respective DMA buff */
-		memcpy(dma_buff->va, buff, buff_size);
-		desc_on_ring->datalen = cpu_to_le16(buff_size);
+		i40e_memcpy(dma_buff->va, buff, buff_size,
+			    I40E_NONDMA_TO_DMA);
+		desc_on_ring->datalen = CPU_TO_LE16(buff_size);
 
 		/* Update the address values in the desc with the pa value
 		 * for respective buffer
 		 */
 		desc_on_ring->params.external.addr_high =
-				cpu_to_le32(upper_32_bits(dma_buff->pa));
+				CPU_TO_LE32(upper_32_bits(dma_buff->pa));
 		desc_on_ring->params.external.addr_low =
-				cpu_to_le32(lower_32_bits(dma_buff->pa));
+				CPU_TO_LE32(lower_32_bits(dma_buff->pa));
 	}
 
 	/* bump the tail */
-	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+	i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
 		      buff, buff_size);
 	(hw->aq.asq.next_to_use)++;
@@ -850,17 +928,22 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
 			 */
 			if (i40e_asq_done(hw))
 				break;
-			udelay(50);
+			if (is_atomic_context)
+				udelay(50);
+			else
+				usleep_range(40, 60);
 			total_delay += 50;
 		} while (total_delay < hw->aq.asq_cmd_timeout);
 	}
 
 	/* if ready, copy the desc back to temp */
 	if (i40e_asq_done(hw)) {
-		*desc = *desc_on_ring;
+		i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
+			    I40E_DMA_TO_NONDMA);
 		if (buff != NULL)
-			memcpy(buff, dma_buff->va, buff_size);
-		retval = le16_to_cpu(desc->retval);
+			i40e_memcpy(buff, dma_buff->va, buff_size,
+				    I40E_DMA_TO_NONDMA);
+		retval = LE16_TO_CPU(desc->retval);
 		if (retval != 0) {
 			i40e_debug(hw,
 				   I40E_DEBUG_AQ_MESSAGE,
@@ -872,34 +955,54 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
 		}
 		cmd_completed = true;
 		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
-			status = 0;
+			status = I40E_SUCCESS;
+		else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+			status = I40E_ERR_NOT_READY;
 		else
 			status = I40E_ERR_ADMIN_QUEUE_ERROR;
 		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
 	}
 
-	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+	i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
 		   "AQTX: desc and buffer writeback:\n");
 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
 
 	/* save writeback aq if requested */
 	if (details->wb_desc)
-		*details->wb_desc = *desc_on_ring;
+		i40e_memcpy(details->wb_desc, desc_on_ring,
+			    sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
 
 	/* update the error if time out occurred */
 	if ((!cmd_completed) &&
 	    (!details->async && !details->postpone)) {
-		i40e_debug(hw,
-			   I40E_DEBUG_AQ_MESSAGE,
-			   "AQTX: Writeback timeout.\n");
-		status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+		if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
+			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+				   "AQTX: AQ Critical error.\n");
+			status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+		} else {
+			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+				   "AQTX: Writeback timeout.\n");
+			status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+		}
 	}
 
 asq_send_command_error:
-	mutex_unlock(&hw->aq.asq_mutex);
+	i40e_release_spinlock(&hw->aq.asq_spinlock);
 	return status;
 }
 
+// inline function with previous signature to avoid modifying
+// all existing calls to i40e_asq_send_command
+inline i40e_status i40e_asq_send_command(struct i40e_hw *hw,
+				struct i40e_aq_desc *desc,
+				void *buff, /* can be NULL */
+				u16  buff_size,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_asq_send_command_atomic(hw, desc, buff, buff_size,
+					cmd_details, false);
+}
+
 /**
  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
  *  @desc:     pointer to the temp descriptor (non DMA mem)
@@ -911,9 +1014,10 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
 				       u16 opcode)
 {
 	/* zero out the desc */
-	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
-	desc->opcode = cpu_to_le16(opcode);
-	desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
+	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
+		    I40E_NONDMA_MEM);
+	desc->opcode = CPU_TO_LE16(opcode);
+	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
 }
 
 /**
@@ -930,7 +1034,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
 					     struct i40e_arq_event_info *e,
 					     u16 *pending)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 	u16 ntc = hw->aq.arq.next_to_clean;
 	struct i40e_aq_desc *desc;
 	struct i40e_dma_mem *bi;
@@ -940,10 +1044,10 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
 	u16 ntu;
 
 	/* pre-clean the event info */
-	memset(&e->desc, 0, sizeof(e->desc));
+	i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
 
 	/* take the lock before we start messing with the ring */
-	mutex_lock(&hw->aq.arq_mutex);
+	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
 
 	if (hw->aq.arq.count == 0) {
 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
@@ -953,7 +1057,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
 	}
 
 	/* set next_to_use to head */
-	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+	ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
 	if (ntu == ntc) {
 		/* nothing to do - shouldn't need to update ring's values */
 		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
@@ -965,8 +1069,8 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
 	desc_idx = ntc;
 
 	hw->aq.arq_last_status =
-		(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
-	flags = le16_to_cpu(desc->flags);
+		(enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
+	flags = LE16_TO_CPU(desc->flags);
 	if (flags & I40E_AQ_FLAG_ERR) {
 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
 		i40e_debug(hw,
@@ -975,14 +1079,16 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
 			   hw->aq.arq_last_status);
 	}
 
-	e->desc = *desc;
-	datalen = le16_to_cpu(desc->datalen);
+	i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
+		    I40E_DMA_TO_NONDMA);
+	datalen = LE16_TO_CPU(desc->datalen);
 	e->msg_len = min(datalen, e->buf_len);
 	if (e->msg_buf != NULL && (e->msg_len != 0))
-		memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
-		       e->msg_len);
+		i40e_memcpy(e->msg_buf,
+			    hw->aq.arq.r.arq_bi[desc_idx].va,
+			    e->msg_len, I40E_DMA_TO_NONDMA);
 
-	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+	i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
 		      hw->aq.arq_buf_size);
 
@@ -991,14 +1097,14 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
 	 * size
 	 */
 	bi = &hw->aq.arq.r.arq_bi[ntc];
-	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
 
-	desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
 	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
-		desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
-	desc->datalen = cpu_to_le16((u16)bi->size);
-	desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
-	desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+		desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
+	desc->datalen = CPU_TO_LE16((u16)bi->size);
+	desc->params.external.addr_high = CPU_TO_LE32(upper_32_bits(bi->pa));
+	desc->params.external.addr_low = CPU_TO_LE32(lower_32_bits(bi->pa));
 
 	/* set tail = the last cleaned desc index. */
 	wr32(hw, hw->aq.arq.tail, ntc);
@@ -1009,27 +1115,14 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
 	hw->aq.arq.next_to_clean = ntc;
 	hw->aq.arq.next_to_use = ntu;
 
-	i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode));
+	i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
 clean_arq_element_out:
 	/* Set pending if needed, unlock and return */
-	if (pending)
+	if (pending != NULL)
 		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
 clean_arq_element_err:
-	mutex_unlock(&hw->aq.arq_mutex);
+	i40e_release_spinlock(&hw->aq.arq_spinlock);
 
 	return ret_code;
 }
 
-static void i40e_resume_aq(struct i40e_hw *hw)
-{
-	/* Registers are reset after PF reset */
-	hw->aq.asq.next_to_use = 0;
-	hw->aq.asq.next_to_clean = 0;
-
-	i40e_config_asq_regs(hw);
-
-	hw->aq.arq.next_to_use = 0;
-	hw->aq.arq.next_to_clean = 0;
-
-	i40e_config_arq_regs(hw);
-}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 2349fbe04..f0af697c4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifndef _I40E_ADMINQ_H_
 #define _I40E_ADMINQ_H_
@@ -98,8 +75,8 @@ struct i40e_adminq_info {
 	u16 api_maj_ver;                /* api major version */
 	u16 api_min_ver;                /* api minor version */
 
-	struct mutex asq_mutex; /* Send queue lock */
-	struct mutex arq_mutex; /* Receive queue lock */
+	struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
+	struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */
 
 	/* last status values on send and receive queues */
 	enum i40e_admin_queue_err asq_last_status;
@@ -111,7 +88,7 @@ struct i40e_adminq_info {
  * aq_ret: AdminQ handler error code can override aq_rc
  * aq_rc: AdminQ firmware error code to convert
  **/
-static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
+static INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
 {
 	int aq_to_posix[] = {
 		0,           /* I40E_AQ_RC_OK */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 5d5f422cb..f4940fe53 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifndef _I40E_ADMINQ_CMD_H_
 #define _I40E_ADMINQ_CMD_H_
@@ -34,7 +11,19 @@
  */
 
 #define I40E_FW_API_VERSION_MAJOR	0x0001
-#define I40E_FW_API_VERSION_MINOR	0x0005
+#define I40E_FW_API_VERSION_MINOR_X722	0x0009
+#define I40E_FW_API_VERSION_MINOR_X710	0x000A
+
+#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
+					I40E_FW_API_VERSION_MINOR_X710 : \
+					I40E_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs  */
+#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+/* API version 1.9 for X722 implements additional link and PHY-specific APIs */
+#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009
+/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
+#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
 
 struct i40e_aq_desc {
 	__le16 flags;
@@ -78,17 +67,17 @@ struct i40e_aq_desc {
 #define I40E_AQ_FLAG_EI_SHIFT	14
 #define I40E_AQ_FLAG_FE_SHIFT	15
 
-#define I40E_AQ_FLAG_DD		BIT(I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
-#define I40E_AQ_FLAG_CMP	BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
-#define I40E_AQ_FLAG_ERR	BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
-#define I40E_AQ_FLAG_VFE	BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
-#define I40E_AQ_FLAG_LB		BIT(I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
-#define I40E_AQ_FLAG_RD		BIT(I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
-#define I40E_AQ_FLAG_VFC	BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
-#define I40E_AQ_FLAG_BUF	BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI		BIT(I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
-#define I40E_AQ_FLAG_EI		BIT(I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
-#define I40E_AQ_FLAG_FE		BIT(I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
+#define I40E_AQ_FLAG_DD		(1 << I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
+#define I40E_AQ_FLAG_CMP	(1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
+#define I40E_AQ_FLAG_ERR	(1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
+#define I40E_AQ_FLAG_VFE	(1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
+#define I40E_AQ_FLAG_LB		(1 << I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
+#define I40E_AQ_FLAG_RD		(1 << I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
+#define I40E_AQ_FLAG_VFC	(1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
+#define I40E_AQ_FLAG_BUF	(1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI		(1 << I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
+#define I40E_AQ_FLAG_EI		(1 << I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
+#define I40E_AQ_FLAG_FE		(1 << I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
 
 /* error codes */
 enum i40e_admin_queue_err {
@@ -146,6 +135,7 @@ enum i40e_admin_queue_opc {
 	/* WoL commands */
 	i40e_aqc_opc_set_wol_filter	= 0x0120,
 	i40e_aqc_opc_get_wake_reason	= 0x0121,
+	i40e_aqc_opc_clear_all_wol_filters = 0x025E,
 
 	/* internal switch commands */
 	i40e_aqc_opc_get_switch_config		= 0x0200,
@@ -186,17 +176,19 @@ enum i40e_admin_queue_opc {
 	i40e_aqc_opc_add_cloud_filters		= 0x025C,
 	i40e_aqc_opc_remove_cloud_filters	= 0x025D,
 	i40e_aqc_opc_clear_wol_switch_filters	= 0x025E,
+	i40e_aqc_opc_replace_cloud_filters	= 0x025F,
 
 	i40e_aqc_opc_add_mirror_rule	= 0x0260,
 	i40e_aqc_opc_delete_mirror_rule	= 0x0261,
 
-	/* Pipeline Personalization Profile */
+	/* Dynamic Device Personalization */
 	i40e_aqc_opc_write_personalization_profile	= 0x0270,
 	i40e_aqc_opc_get_personalization_profile_list	= 0x0271,
 
 	/* DCB commands */
 	i40e_aqc_opc_dcb_ignore_pfc	= 0x0301,
 	i40e_aqc_opc_dcb_updated	= 0x0302,
+	i40e_aqc_opc_set_dcb_parameters = 0x0303,
 
 	/* TX scheduler */
 	i40e_aqc_opc_configure_vsi_bw_limit		= 0x0400,
@@ -221,6 +213,8 @@ enum i40e_admin_queue_opc {
 	i40e_aqc_opc_query_hmc_resource_profile	= 0x0500,
 	i40e_aqc_opc_set_hmc_resource_profile	= 0x0501,
 
+	/* phy commands*/
+
 	/* phy commands*/
 	i40e_aqc_opc_get_phy_abilities		= 0x0600,
 	i40e_aqc_opc_set_phy_config		= 0x0601,
@@ -236,6 +230,8 @@ enum i40e_admin_queue_opc {
 	i40e_aqc_opc_set_phy_debug		= 0x0622,
 	i40e_aqc_opc_upload_ext_phy_fm		= 0x0625,
 	i40e_aqc_opc_run_phy_activity		= 0x0626,
+	i40e_aqc_opc_set_phy_register		= 0x0628,
+	i40e_aqc_opc_get_phy_register		= 0x0629,
 
 	/* NVM commands */
 	i40e_aqc_opc_nvm_read			= 0x0701,
@@ -243,6 +239,7 @@ enum i40e_admin_queue_opc {
 	i40e_aqc_opc_nvm_update			= 0x0703,
 	i40e_aqc_opc_nvm_config_read		= 0x0704,
 	i40e_aqc_opc_nvm_config_write		= 0x0705,
+	i40e_aqc_opc_nvm_progress		= 0x0706,
 	i40e_aqc_opc_oem_post_update		= 0x0720,
 	i40e_aqc_opc_thermal_sensor		= 0x0721,
 
@@ -271,6 +268,7 @@ enum i40e_admin_queue_opc {
 	i40e_aqc_opc_get_cee_dcb_cfg	= 0x0A07,
 	i40e_aqc_opc_lldp_set_local_mib	= 0x0A08,
 	i40e_aqc_opc_lldp_stop_start_spec_agent	= 0x0A09,
+	i40e_aqc_opc_lldp_restore		= 0x0A0A,
 
 	/* Tunnel commands */
 	i40e_aqc_opc_add_udp_tunnel	= 0x0B00,
@@ -531,6 +529,7 @@ struct i40e_aqc_mac_address_read {
 #define I40E_AQC_PORT_ADDR_VALID	0x40
 #define I40E_AQC_WOL_ADDR_VALID		0x80
 #define I40E_AQC_MC_MAG_EN_VALID	0x100
+#define I40E_AQC_WOL_PRESERVE_STATUS	0x200
 #define I40E_AQC_ADDR_VALID_MASK	0x3F0
 	u8	reserved[6];
 	__le32	addr_high;
@@ -591,6 +590,7 @@ struct i40e_aqc_set_wol_filter {
 	__le16 cmd_flags;
 #define I40E_AQC_SET_WOL_FILTER				0x8000
 #define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL		0x4000
+#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR	0x2000
 #define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR		0
 #define I40E_AQC_SET_WOL_FILTER_ACTION_SET		1
 	__le16 valid_flags;
@@ -764,8 +764,52 @@ struct i40e_aqc_set_switch_config {
 /* flags used for both fields below */
 #define I40E_AQ_SET_SWITCH_CFG_PROMISC		0x0001
 #define I40E_AQ_SET_SWITCH_CFG_L2_FILTER	0x0002
+#define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT	0x0004
 	__le16	valid_flags;
-	u8	reserved[12];
+	/* The ethertype in switch_tag is dropped on ingress and used
+	 * internally by the switch. Set this to zero for the default
+	 * of 0x88a8 (802.1ad). Should be zero for firmware API
+	 * versions lower than 1.7.
+	 */
+	__le16	switch_tag;
+	/* The ethertypes in first_tag and second_tag are used to
+	 * match the outer and inner VLAN tags (respectively) when HW
+	 * double VLAN tagging is enabled via the set port parameters
+	 * AQ command. Otherwise these are both ignored. Set them to
+	 * zero for their defaults of 0x8100 (802.1Q). Should be zero
+	 * for firmware API versions lower than 1.7.
+	 */
+	__le16	first_tag;
+	__le16	second_tag;
+	/* Next byte is split into following:
+	 * Bit 7    : 0 : No action, 1: Switch to mode defined by bits 6:0
+	 * Bit 6    : 0 : Destination Port, 1: source port
+	 * Bit 5..4 : L4 type
+	 * 0: rsvd
+	 * 1: TCP
+	 * 2: UDP
+	 * 3: Both TCP and UDP
+	 * Bits 3:0 Mode
+	 * 0: default mode
+	 * 1: L4 port only mode
+	 * 2: non-tunneled mode
+	 * 3: tunneled mode
+	 */
+#define I40E_AQ_SET_SWITCH_BIT7_VALID		0x80
+
+#define I40E_AQ_SET_SWITCH_L4_SRC_PORT		0x40
+
+#define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD		0x00
+#define I40E_AQ_SET_SWITCH_L4_TYPE_TCP		0x10
+#define I40E_AQ_SET_SWITCH_L4_TYPE_UDP		0x20
+#define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH		0x30
+
+#define I40E_AQ_SET_SWITCH_MODE_DEFAULT		0x00
+#define I40E_AQ_SET_SWITCH_MODE_L4_PORT		0x01
+#define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL	0x02
+#define I40E_AQ_SET_SWITCH_MODE_TUNNEL		0x03
+	u8	mode;
+	u8	rsvd5[5];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
@@ -1318,14 +1362,17 @@ struct i40e_aqc_add_remove_cloud_filters {
 #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT	0
 #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK	(0x3FF << \
 					I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
-	u8	reserved2[4];
+	u8	big_buffer_flag;
+#define I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER	1
+#define I40E_AQC_ADD_CLOUD_CMD_BB		1
+	u8	reserved2[3];
 	__le32	addr_high;
 	__le32	addr_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
 
-struct i40e_aqc_add_remove_cloud_filters_element_data {
+struct i40e_aqc_cloud_filters_element_data {
 	u8	outer_mac[6];
 	u8	inner_mac[6];
 	__le16	inner_vlan;
@@ -1337,13 +1384,16 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
 		struct {
 			u8 data[16];
 		} v6;
+		struct {
+			__le16 data[8];
+		} raw_v6;
 	} ipaddr;
 	__le16	flags;
 #define I40E_AQC_ADD_CLOUD_FILTER_SHIFT			0
 #define I40E_AQC_ADD_CLOUD_FILTER_MASK	(0x3F << \
 					I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
 /* 0x0000 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP			0x0001
+/* 0x0001 reserved */
 /* 0x0002 reserved */
 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN		0x0003
 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID	0x0004
@@ -1355,6 +1405,13 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC			0x000A
 #define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC	0x000B
 #define I40E_AQC_ADD_CLOUD_FILTER_IIP			0x000C
+/* 0x000D reserved */
+/* 0x000E reserved */
+/* 0x000F reserved */
+/* 0x0010 to 0x0017 is for custom filters */
+#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT		0x0010 /* Dest IP + L4 Port */
+#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT		0x0011 /* Dest MAC + L4 Port */
+#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT		0x0012 /* Dest MAC + VLAN + L4 Port */
 
 #define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE		0x0080
 #define I40E_AQC_ADD_CLOUD_VNK_SHIFT			6
@@ -1389,6 +1446,88 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
 	u8	response_reserved[7];
 };
 
+/* i40e_aqc_add_rm_cloud_filt_elem_ext is used when
+ * I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER flag is set.
+ */
+struct i40e_aqc_add_rm_cloud_filt_elem_ext {
+	struct i40e_aqc_cloud_filters_element_data element;
+	u16     general_fields[32];
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0	0
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1	1
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2	2
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0	3
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1	4
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2	5
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0	6
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1	7
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2	8
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0	9
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1	10
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2	11
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0	12
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1	13
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2	14
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0	15
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1	16
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2	17
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3	18
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4	19
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5	20
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6	21
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7	22
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0	23
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1	24
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2	25
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3	26
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4	27
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5	28
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6	29
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7	30
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data);
+
+/* i40e_aqc_cloud_filters_element_bb is used when
+ * I40E_AQC_CLOUD_CMD_BB flag is set.
+ */
+struct i40e_aqc_cloud_filters_element_bb {
+	struct i40e_aqc_cloud_filters_element_data element;
+	u16     general_fields[32];
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0	0
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1	1
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2	2
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0	3
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1	4
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2	5
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0	6
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1	7
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2	8
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0	9
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1	10
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2	11
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0	12
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1	13
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2	14
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0	15
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1	16
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2	17
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3	18
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4	19
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5	20
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6	21
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7	22
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0	23
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1	24
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2	25
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3	26
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4	27
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5	28
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6	29
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7	30
+};
+
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb);
+
 struct i40e_aqc_remove_cloud_filters_completion {
 	__le16 perfect_ovlan_used;
 	__le16 perfect_ovlan_free;
@@ -1400,6 +1539,61 @@ struct i40e_aqc_remove_cloud_filters_completion {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
 
+/* Replace filter Command 0x025F
+ * uses the i40e_aqc_replace_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_filter_data {
+	u8 filter_type;
+	u8 input[3];
+};
+
+I40E_CHECK_STRUCT_LEN(4, i40e_filter_data);
+
+struct i40e_aqc_replace_cloud_filters_cmd {
+	u8	valid_flags;
+#define I40E_AQC_REPLACE_L1_FILTER		0x0
+#define I40E_AQC_REPLACE_CLOUD_FILTER		0x1
+#define I40E_AQC_GET_CLOUD_FILTERS		0x2
+#define I40E_AQC_MIRROR_CLOUD_FILTER		0x4
+#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER	0x8
+	u8	old_filter_type;
+	u8	new_filter_type;
+	u8	tr_bit;
+	u8	tr_bit2;
+	u8	reserved[3];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd);
+
+struct i40e_aqc_replace_cloud_filters_cmd_buf {
+	u8	data[32];
+/* Filter type INPUT codes*/
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX	3
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED	(1 << 7UL)
+
+/* Field Vector offsets */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA		0
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH		6
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG		7
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN		8
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN		9
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN		10
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY		11
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC		12
+/* big FLU */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA		14
+/* big FLU */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA		15
+
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN		37
+	struct i40e_filter_data	filters[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf);
+
 /* Add Mirror Rule (indirect or direct 0x0260)
  * Delete Mirror Rule (indirect or direct 0x0261)
  * note: some rule types (4,5) do not use an external buffer.
@@ -1435,7 +1629,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
 
-/* Pipeline Personalization Profile */
+/* Dynamic Device Personalization */
 struct i40e_aqc_write_personalization_profile {
 	u8      flags;
 	u8      reserved[3];
@@ -1446,7 +1640,7 @@ struct i40e_aqc_write_personalization_profile {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
 
-struct i40e_aqc_write_ppp_resp {
+struct i40e_aqc_write_ddp_resp {
 	__le32 error_offset;
 	__le32 error_info;
 	__le32 addr_high;
@@ -1455,8 +1649,8 @@ struct i40e_aqc_write_ppp_resp {
 
 struct i40e_aqc_get_applied_profiles {
 	u8      flags;
-#define I40E_AQC_GET_PPP_GET_CONF	0x1
-#define I40E_AQC_GET_PPP_GET_RDPU_CONF	0x2
+#define I40E_AQC_GET_DDP_GET_CONF	0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF	0x2
 	u8      rsv[3];
 	__le32  reserved;
 	__le32  addr_high;
@@ -1726,6 +1920,8 @@ enum i40e_aq_phy_type {
 	I40E_PHY_TYPE_10GBASE_CR1_CU		= 0xB,
 	I40E_PHY_TYPE_10GBASE_AOC		= 0xC,
 	I40E_PHY_TYPE_40GBASE_AOC		= 0xD,
+	I40E_PHY_TYPE_UNRECOGNIZED		= 0xE,
+	I40E_PHY_TYPE_UNSUPPORTED		= 0xF,
 	I40E_PHY_TYPE_100BASE_TX		= 0x11,
 	I40E_PHY_TYPE_1000BASE_T		= 0x12,
 	I40E_PHY_TYPE_10GBASE_T			= 0x13,
@@ -1744,24 +1940,74 @@ enum i40e_aq_phy_type {
 	I40E_PHY_TYPE_25GBASE_CR		= 0x20,
 	I40E_PHY_TYPE_25GBASE_SR		= 0x21,
 	I40E_PHY_TYPE_25GBASE_LR		= 0x22,
-	I40E_PHY_TYPE_MAX
-};
-
+	I40E_PHY_TYPE_25GBASE_AOC		= 0x23,
+	I40E_PHY_TYPE_25GBASE_ACC		= 0x24,
+	I40E_PHY_TYPE_2_5GBASE_T		= 0x30,
+	I40E_PHY_TYPE_5GBASE_T			= 0x31,
+	I40E_PHY_TYPE_MAX,
+	I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP	= 0xFD,
+	I40E_PHY_TYPE_EMPTY			= 0xFE,
+	I40E_PHY_TYPE_DEFAULT			= 0xFF,
+};
+
+#define I40E_PHY_TYPES_BITMASK (BIT_ULL(I40E_PHY_TYPE_SGMII) | \
+				BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) | \
+				BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) | \
+				BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) | \
+				BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) | \
+				BIT_ULL(I40E_PHY_TYPE_XAUI) | \
+				BIT_ULL(I40E_PHY_TYPE_XFI) | \
+				BIT_ULL(I40E_PHY_TYPE_SFI) | \
+				BIT_ULL(I40E_PHY_TYPE_XLAUI) | \
+				BIT_ULL(I40E_PHY_TYPE_XLPPI) | \
+				BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) | \
+				BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) | \
+				BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) | \
+				BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) | \
+				BIT_ULL(I40E_PHY_TYPE_UNRECOGNIZED) | \
+				BIT_ULL(I40E_PHY_TYPE_UNSUPPORTED) | \
+				BIT_ULL(I40E_PHY_TYPE_100BASE_TX) | \
+				BIT_ULL(I40E_PHY_TYPE_1000BASE_T) | \
+				BIT_ULL(I40E_PHY_TYPE_10GBASE_T) | \
+				BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) | \
+				BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) | \
+				BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) | \
+				BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) | \
+				BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) | \
+				BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) | \
+				BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) | \
+				BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) | \
+				BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) | \
+				BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) | \
+				BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) | \
+				BIT_ULL(I40E_PHY_TYPE_25GBASE_KR) | \
+				BIT_ULL(I40E_PHY_TYPE_25GBASE_CR) | \
+				BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \
+				BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \
+				BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \
+				BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC) | \
+				BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T) | \
+				BIT_ULL(I40E_PHY_TYPE_5GBASE_T))
+
+#define I40E_LINK_SPEED_2_5GB_SHIFT	0x0
 #define I40E_LINK_SPEED_100MB_SHIFT	0x1
 #define I40E_LINK_SPEED_1000MB_SHIFT	0x2
 #define I40E_LINK_SPEED_10GB_SHIFT	0x3
 #define I40E_LINK_SPEED_40GB_SHIFT	0x4
 #define I40E_LINK_SPEED_20GB_SHIFT	0x5
 #define I40E_LINK_SPEED_25GB_SHIFT	0x6
+#define I40E_LINK_SPEED_5GB_SHIFT	0x7
 
 enum i40e_aq_link_speed {
 	I40E_LINK_SPEED_UNKNOWN	= 0,
-	I40E_LINK_SPEED_100MB	= BIT(I40E_LINK_SPEED_100MB_SHIFT),
-	I40E_LINK_SPEED_1GB	= BIT(I40E_LINK_SPEED_1000MB_SHIFT),
-	I40E_LINK_SPEED_10GB	= BIT(I40E_LINK_SPEED_10GB_SHIFT),
-	I40E_LINK_SPEED_40GB	= BIT(I40E_LINK_SPEED_40GB_SHIFT),
-	I40E_LINK_SPEED_20GB	= BIT(I40E_LINK_SPEED_20GB_SHIFT),
-	I40E_LINK_SPEED_25GB	= BIT(I40E_LINK_SPEED_25GB_SHIFT),
+	I40E_LINK_SPEED_100MB	= (1 << I40E_LINK_SPEED_100MB_SHIFT),
+	I40E_LINK_SPEED_1GB	= (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+	I40E_LINK_SPEED_2_5GB	= (1 << I40E_LINK_SPEED_2_5GB_SHIFT),
+	I40E_LINK_SPEED_5GB	= (1 << I40E_LINK_SPEED_5GB_SHIFT),
+	I40E_LINK_SPEED_10GB	= (1 << I40E_LINK_SPEED_10GB_SHIFT),
+	I40E_LINK_SPEED_40GB	= (1 << I40E_LINK_SPEED_40GB_SHIFT),
+	I40E_LINK_SPEED_20GB	= (1 << I40E_LINK_SPEED_20GB_SHIFT),
+	I40E_LINK_SPEED_25GB	= (1 << I40E_LINK_SPEED_25GB_SHIFT),
 };
 
 struct i40e_aqc_module_desc {
@@ -1787,20 +2033,27 @@ struct i40e_aq_get_phy_abilities_resp {
 #define I40E_AQ_PHY_FEC_ABILITY_KR	0x40
 #define I40E_AQ_PHY_FEC_ABILITY_RS	0x80
 	__le16	eee_capability;
+#define I40E_AQ_EEE_AUTO		0x0001
 #define I40E_AQ_EEE_100BASE_TX		0x0002
 #define I40E_AQ_EEE_1000BASE_T		0x0004
 #define I40E_AQ_EEE_10GBASE_T		0x0008
 #define I40E_AQ_EEE_1000BASE_KX		0x0010
 #define I40E_AQ_EEE_10GBASE_KX4		0x0020
 #define I40E_AQ_EEE_10GBASE_KR		0x0040
+#define I40E_AQ_EEE_2_5GBASE_T		0x0100
+#define I40E_AQ_EEE_5GBASE_T		0x0200
 	__le32	eeer_val;
 	u8	d3_lpan;
 #define I40E_AQ_SET_PHY_D3_LPAN_ENA	0x01
 	u8	phy_type_ext;
-#define I40E_AQ_PHY_TYPE_EXT_25G_KR	0X01
-#define I40E_AQ_PHY_TYPE_EXT_25G_CR	0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR	0x01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR	0x02
 #define I40E_AQ_PHY_TYPE_EXT_25G_SR	0x04
 #define I40E_AQ_PHY_TYPE_EXT_25G_LR	0x08
+#define I40E_AQ_PHY_TYPE_EXT_25G_AOC	0x10
+#define I40E_AQ_PHY_TYPE_EXT_25G_ACC	0x20
+#define I40E_AQ_PHY_TYPE_EXT_2_5GBASE_T	0x40
+#define I40E_AQ_PHY_TYPE_EXT_5GBASE_T	0x80
 	u8	fec_cfg_curr_mod_ext_info;
 #define I40E_AQ_ENABLE_FEC_KR		0x01
 #define I40E_AQ_ENABLE_FEC_RS		0x02
@@ -1834,10 +2087,6 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
 	__le32	eeer;
 	u8	low_power_ctrl;
 	u8	phy_type_ext;
-#define I40E_AQ_PHY_TYPE_EXT_25G_KR	0X01
-#define I40E_AQ_PHY_TYPE_EXT_25G_CR	0X02
-#define I40E_AQ_PHY_TYPE_EXT_25G_SR	0x04
-#define I40E_AQ_PHY_TYPE_EXT_25G_LR	0x08
 	u8	fec_config;
 #define I40E_AQ_SET_FEC_ABILITY_KR	BIT(0)
 #define I40E_AQ_SET_FEC_ABILITY_RS	BIT(1)
@@ -1855,20 +2104,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
 struct i40e_aq_set_mac_config {
 	__le16	max_frame_size;
 	u8	params;
-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN		0x04
-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK	0x78
-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT	3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE	0x0
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX	0xF
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX	0x9
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX	0x8
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX	0x7
-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX	0x6
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX	0x5
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX	0x4
-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX	0x3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX	0x2
-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX	0x1
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN			0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK		0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT		3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE		0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX		0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX		0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX		0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX		0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX		0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX		0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX		0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX		0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX		0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX		0x1
+#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN	0x80
 	u8	tx_timer_priority; /* bitmap */
 	__le16	tx_timer_value;
 	__le16	fc_refresh_threshold;
@@ -1934,19 +2184,31 @@ struct i40e_aqc_get_link_status {
 #define I40E_AQ_25G_SERDES_UCODE_ERR	0X04
 #define I40E_AQ_25G_NIMB_UCODE_ERR	0X05
 	u8	loopback; /* use defines from i40e_aqc_set_lb_mode */
+/* Since firmware API 1.7 loopback field keeps power class info as well */
+#define I40E_AQ_LOOPBACK_MASK		0x07
+#define I40E_AQ_PWR_CLASS_SHIFT_LB	6
+#define I40E_AQ_PWR_CLASS_MASK_LB	(0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB)
 	__le16	max_frame_size;
 	u8	config;
 #define I40E_AQ_CONFIG_FEC_KR_ENA	0x01
 #define I40E_AQ_CONFIG_FEC_RS_ENA	0x02
 #define I40E_AQ_CONFIG_CRC_ENA		0x04
 #define I40E_AQ_CONFIG_PACING_MASK	0x78
-	u8	power_desc;
+	union {
+		struct {
+			u8	power_desc;
 #define I40E_AQ_LINK_POWER_CLASS_1	0x00
 #define I40E_AQ_LINK_POWER_CLASS_2	0x01
 #define I40E_AQ_LINK_POWER_CLASS_3	0x02
 #define I40E_AQ_LINK_POWER_CLASS_4	0x03
 #define I40E_AQ_PWR_CLASS_MASK		0x03
-	u8	reserved[4];
+			u8	reserved[4];
+		};
+		struct {
+			u8	link_type[4];
+			u8	link_type_ext;
+		};
+	};
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
@@ -1983,11 +2245,28 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
 
 /* Set Loopback mode (0x0618) */
 struct i40e_aqc_set_lb_mode {
-	__le16	lb_mode;
+	u8	lb_level;
+#define I40E_AQ_LB_NONE	0
+#define I40E_AQ_LB_MAC	1
+#define I40E_AQ_LB_SERDES	2
+#define I40E_AQ_LB_PHY_INT	3
+#define I40E_AQ_LB_PHY_EXT	4
+#define I40E_AQ_LB_BASE_T_PCS	5
+#define I40E_AQ_LB_BASE_T_EXT	6
 #define I40E_AQ_LB_PHY_LOCAL	0x01
 #define I40E_AQ_LB_PHY_REMOTE	0x02
 #define I40E_AQ_LB_MAC_LOCAL	0x04
-	u8	reserved[14];
+	u8	lb_type;
+#define I40E_AQ_LB_LOCAL	0
+#define I40E_AQ_LB_FAR	0x01
+	u8	speed;
+#define I40E_AQ_LB_SPEED_NONE	0
+#define I40E_AQ_LB_SPEED_1G	1
+#define I40E_AQ_LB_SPEED_10G	2
+#define I40E_AQ_LB_SPEED_40G	3
+#define I40E_AQ_LB_SPEED_20G	4
+	u8	force_speed;
+	u8	reserved[12];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
@@ -2004,7 +2283,7 @@ struct i40e_aqc_set_phy_debug {
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT	0x02
 /* Disable link manageability on a single port */
 #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW	0x10
-/* Disable link manageability on all ports */
+/* Disable link manageability on all ports needs both bits 4 and 5 */
 #define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW	0x20
 	u8	reserved[15];
 };
@@ -2017,26 +2296,71 @@ enum i40e_aq_phy_reg_type {
 	I40E_AQC_PHY_REG_EXERNAL_MODULE	= 0x3
 };
 
+#pragma pack(1)
 /* Run PHY Activity (0x0626) */
 struct i40e_aqc_run_phy_activity {
-	__le16  activity_id;
-	u8      flags;
-	u8      reserved1;
-	__le32  control;
-	__le32  data;
-	u8      reserved2[4];
+	u8	cmd_flags;
+	__le16	activity_id;
+#define I40E_AQ_RUN_PHY_ACT_ID_USR_DFND			0x10
+	u8	reserved;
+	union {
+		struct {
+			__le32  dnl_opcode;
+#define I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT_DUR	0x801a
+#define I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT	0x801b
+#define I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_DUR	0x1801b
+			__le32  data;
+			u8	reserved2[4];
+		} cmd;
+		struct {
+			__le32	cmd_status;
+#define I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC		0x4
+#define I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK		0xFFFF
+			__le32	data0;
+			__le32	data1;
+		} resp;
+	} params;
 };
+#pragma pack()
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
 
+/* Set PHY Register command (0x0628) */
+/* Get PHY Register command (0x0629) */
+struct i40e_aqc_phy_register_access {
+	u8	phy_interface;
+#define I40E_AQ_PHY_REG_ACCESS_INTERNAL	0
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL	1
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE	2
+	u8	dev_addres;
+	u8	cmd_flags;
+#define I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE	0x01
+#define I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER	0x02
+#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT	2
+#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK	(0x3 << \
+		I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT)
+	u8	reserved1;
+	__le32	reg_address;
+	__le32	reg_value;
+	u8	reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
+
 /* NVM Read command (indirect 0x0701)
  * NVM Erase commands (direct 0x0702)
  * NVM Update commands (indirect 0x0703)
  */
 struct i40e_aqc_nvm_update {
 	u8	command_flags;
-#define I40E_AQ_NVM_LAST_CMD	0x01
-#define I40E_AQ_NVM_FLASH_ONLY	0x80
+#define I40E_AQ_NVM_LAST_CMD			0x01
+#define I40E_AQ_NVM_REARRANGE_TO_FLAT		0x20
+#define I40E_AQ_NVM_REARRANGE_TO_STRUCT		0x40
+#define I40E_AQ_NVM_FLASH_ONLY			0x80
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT	1
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK	0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED	0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL	0x01
 	u8	module_pointer;
 	__le16	length;
 	__le32	offset;
@@ -2049,8 +2373,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
 /* NVM Config Read (indirect 0x0704) */
 struct i40e_aqc_nvm_config_read {
 	__le16	cmd_flags;
-#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK	1 
-#define I40E_AQ_ANVM_READ_SINGLE_FEATURE		0 
+#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK	1
+#define I40E_AQ_ANVM_READ_SINGLE_FEATURE		0
 #define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES		1
 	__le16	element_count;
 	__le16	element_id;	/* Feature/field ID */
@@ -2075,9 +2399,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
 /* Used for 0x0704 as well as for 0x0705 commands */
 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT		1
 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
-				BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+				(1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
 #define I40E_AQ_ANVM_FEATURE		0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD	BIT(FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD	(1 << FEATURE_OR_IMMEDIATE_SHIFT)
 struct i40e_aqc_nvm_config_data_feature {
 	__le16 feature_id;
 #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY		0x01
@@ -2279,23 +2603,35 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
 /* Stop LLDP (direct 0x0A05) */
 struct i40e_aqc_lldp_stop {
 	u8	command;
-#define I40E_AQ_LLDP_AGENT_STOP		0x0
-#define I40E_AQ_LLDP_AGENT_SHUTDOWN	0x1
+#define I40E_AQ_LLDP_AGENT_STOP			0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN		0x1
+#define I40E_AQ_LLDP_AGENT_STOP_PERSIST		0x2
 	u8	reserved[15];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
 
 /* Start LLDP (direct 0x0A06) */
-
 struct i40e_aqc_lldp_start {
 	u8	command;
-#define I40E_AQ_LLDP_AGENT_START	0x1
+#define I40E_AQ_LLDP_AGENT_START		0x1
+#define I40E_AQ_LLDP_AGENT_START_PERSIST	0x2
 	u8	reserved[15];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
 
+/* Set DCB (direct 0x0303) */
+struct i40e_aqc_set_dcb_parameters {
+	u8 command;
+#define I40E_AQ_DCB_SET_AGENT	0x1
+#define I40E_DCB_VALID		0x1
+	u8 valid_flags;
+	u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
+
 /* Get CEE DCBX Oper Config (0x0A07)
  * uses the generic descriptor struct
  * returns below as indirect response
@@ -2350,20 +2686,7 @@ struct i40e_aqc_get_cee_dcb_cfg_resp {
 	u8	oper_tc_bw[8];
 	u8	oper_pfc_en;
 	__le16	oper_app_prio;
-#define I40E_AQC_CEE_APP_FCOE_SHIFT	0x0
-#define I40E_AQC_CEE_APP_FCOE_MASK	(0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
-#define I40E_AQC_CEE_APP_ISCSI_SHIFT	0x3
-#define I40E_AQC_CEE_APP_ISCSI_MASK	(0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
-#define I40E_AQC_CEE_APP_FIP_SHIFT	0x8
-#define I40E_AQC_CEE_APP_FIP_MASK	(0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
-#define I40E_AQC_CEE_APP_FIP_MASK	(0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
 	__le32	tlv_status;
-#define I40E_AQC_CEE_PG_STATUS_SHIFT	0x0
-#define I40E_AQC_CEE_PG_STATUS_MASK	(0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
-#define I40E_AQC_CEE_PFC_STATUS_SHIFT	0x3
-#define I40E_AQC_CEE_PFC_STATUS_MASK	(0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
-#define I40E_AQC_CEE_APP_STATUS_SHIFT	0x8
-#define I40E_AQC_CEE_APP_STATUS_MASK	(0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
 	u8	reserved[12];
 };
 
@@ -2374,11 +2697,12 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
  */
 struct i40e_aqc_lldp_set_local_mib {
 #define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT	0
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK	BIT(SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK	(1 << \
+					SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
 #define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB	0x0
 #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT	(1)
-#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK \
-			BIT(SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK	(1 << \
+				SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
 #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS		0x1
 	u8	type;
 	u8	reserved0;
@@ -2390,19 +2714,37 @@ struct i40e_aqc_lldp_set_local_mib {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
 
+struct i40e_aqc_lldp_set_local_mib_resp {
+#define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK      0x01
+	u8  status;
+	u8  reserved[15];
+};
+
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_lldp_set_local_mib_resp);
+
 /*	Stop/Start LLDP Agent (direct 0x0A09)
  *	Used for stopping/starting specific LLDP agent. e.g. DCBx
  */
 struct i40e_aqc_lldp_stop_start_specific_agent {
 #define I40E_AQC_START_SPECIFIC_AGENT_SHIFT	0
 #define I40E_AQC_START_SPECIFIC_AGENT_MASK \
-				BIT(I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+				(1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
 	u8	command;
 	u8	reserved[15];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
 
+/* Restore LLDP Agent factory settings (direct 0x0A0A) */
+struct i40e_aqc_lldp_restore {
+	u8	command;
+#define I40E_AQ_LLDP_AGENT_RESTORE_NOT		0x0
+#define I40E_AQ_LLDP_AGENT_RESTORE		0x1
+	u8	reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_restore);
+
 /* Add Udp Tunnel command and completion (direct 0x0B00) */
 struct i40e_aqc_add_udp_tunnel {
 	__le16	udp_port;
@@ -2449,7 +2791,7 @@ struct i40e_aqc_del_udp_tunnel_completion {
 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
 
 struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID		BIT(15)
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID		(0x1 << 15)
 #define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT	0
 #define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK	(0x3FF << \
 					I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
@@ -2469,13 +2811,14 @@ struct i40e_aqc_get_set_rss_key_data {
 I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
 
 struct  i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID		BIT(15)
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID		(0x1 << 15)
 #define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT	0
 #define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK	(0x3FF << \
 					I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
 	__le16	vsi_id;
 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT	0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK	BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK	(0x1 << \
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
 
 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI	0
 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF	1
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
index 926811ad4..d7feb645f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifndef _I40E_ALLOC_H_
 #define _I40E_ALLOC_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 1b1e2acbd..2877a1203 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #include <linux/list.h>
 #include <linux/errno.h>
@@ -53,17 +30,23 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
 				       bool is_vf, u32 vf_id,
 				       u32 flag, u32 valid_flag);
 
+static int i40e_client_device_register(struct i40e_info *ldev);
+
+static void i40e_client_device_unregister(struct i40e_info *ldev);
+
 static struct i40e_ops i40e_lan_ops = {
 	.virtchnl_send = i40e_client_virtchnl_send,
 	.setup_qvlist = i40e_client_setup_qvlist,
 	.request_reset = i40e_client_request_reset,
 	.update_vsi_ctxt = i40e_client_update_vsi_ctxt,
+	.client_device_register = i40e_client_device_register,
+	.client_device_unregister = i40e_client_device_unregister,
 };
 
 /**
  * i40e_client_get_params - Get the params that can change at runtime
  * @vsi: the VSI with the message
- * @param: clinet param struct
+ * @params: clinet param struct
  *
  **/
 static
@@ -94,6 +77,10 @@ int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
 	return 0;
 }
 
+static void i40e_client_device_release(struct device *dev)
+{
+}
+
 /**
  * i40e_notify_client_of_vf_msg - call the client vf message callback
  * @vsi: the VSI with the message
@@ -287,21 +274,27 @@ out:
 	return capable;
 }
 
+void i40e_client_update_msix_info(struct i40e_pf *pf)
+{
+	struct i40e_client_instance *cdev = pf->cinst;
+
+	if (!cdev || !cdev->client)
+		return;
+
+	cdev->lan_info.msix_count = pf->num_iwarp_msix;
+	cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
+}
+
 /**
  * i40e_client_add_instance - add a client instance struct to the instance list
  * @pf: pointer to the board struct
- * @client: pointer to a client struct in the client list.
- * @existing: if there was already an existing instance
- *
  **/
 static void i40e_client_add_instance(struct i40e_pf *pf)
 {
 	struct i40e_client_instance *cdev = NULL;
 	struct netdev_hw_addr *mac = NULL;
 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
-
-	if (!registered_client || pf->cinst)
-		return;
+	struct platform_device *platform_dev;
 
 	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
 	if (!cdev)
@@ -320,6 +313,12 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
 	cdev->lan_info.fw_maj_ver = pf->hw.aq.fw_maj_ver;
 	cdev->lan_info.fw_min_ver = pf->hw.aq.fw_min_ver;
 	cdev->lan_info.fw_build = pf->hw.aq.fw_build;
+	platform_dev = &cdev->lan_info.platform_dev;
+	platform_dev->name = "i40e_rdma";
+	platform_dev->id = PLATFORM_DEVID_AUTO;
+	platform_dev->id_auto = true;
+	platform_dev->dev.release = i40e_client_device_release;
+	platform_dev->dev.parent = &pf->pdev->dev;
 	set_bit(__I40E_CLIENT_INSTANCE_NONE, &cdev->state);
 
 	if (i40e_client_get_params(vsi, &cdev->lan_info.params)) {
@@ -328,9 +327,6 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
 		return;
 	}
 
-	cdev->lan_info.msix_count = pf->num_iwarp_msix;
-	cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
-
 	mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list,
 			       struct netdev_hw_addr, list);
 	if (mac)
@@ -338,8 +334,12 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
 	else
 		dev_err(&pf->pdev->dev, "MAC address list is empty!\n");
 
-	cdev->client = registered_client;
+	cdev->client = NULL;
 	pf->cinst = cdev;
+
+	cdev->lan_info.msix_count = pf->num_iwarp_msix;
+	cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
+	platform_device_register(platform_dev);
 }
 
 /**
@@ -360,14 +360,13 @@ void i40e_client_del_instance(struct i40e_pf *pf)
  **/
 void i40e_client_subtask(struct i40e_pf *pf)
 {
-	struct i40e_client *client = registered_client;
+	struct i40e_client *client;
 	struct i40e_client_instance *cdev;
 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
 	int ret = 0;
 
-	if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
+	if (!test_and_clear_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state))
 		return;
-	pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED;
 	cdev = pf->cinst;
 
 	/* If we're down or resetting, just bail */
@@ -375,14 +374,16 @@ void i40e_client_subtask(struct i40e_pf *pf)
 	    test_bit(__I40E_CONFIG_BUSY, pf->state))
 		return;
 
-	if (!client || !cdev)
+	if (!cdev || !cdev->client)
 		return;
 
-	/* Here we handle client opens. If the client is down, but
-	 * the netdev is up, then open the client.
+	client = cdev->client;
+
+	/* Here we handle client opens. If the client is down, and
+	 * the netdev is registered, then open the client.
 	 */
 	if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
-		if (!test_bit(__I40E_VSI_DOWN, vsi->state) &&
+		if (vsi->netdev_registered &&
 		    client->ops && client->ops->open) {
 			set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
 			ret = client->ops->open(&cdev->lan_info, client);
@@ -393,17 +394,19 @@ void i40e_client_subtask(struct i40e_pf *pf)
 				i40e_client_del_instance(pf);
 			}
 		}
-	} else {
-	/* Likewise for client close. If the client is up, but the netdev
-	 * is down, then close the client.
-	 */
-		if (test_bit(__I40E_VSI_DOWN, vsi->state) &&
-		    client->ops && client->ops->close) {
-			clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
-			client->ops->close(&cdev->lan_info, client, false);
-			i40e_client_release_qvlist(&cdev->lan_info);
-		}
 	}
+
+	/* enable/disable PE TCP_ENA flag based on netdev down/up
+	 */
+	if (test_bit(__I40E_VSI_DOWN, vsi->state))
+		i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
+					    0, 0, 0,
+					    I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
+	else
+		i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
+					    0, 0,
+					    I40E_CLIENT_VSI_FLAG_TCP_ENABLE,
+					    I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
 }
 
 /**
@@ -436,17 +439,8 @@ int i40e_lan_add_device(struct i40e_pf *pf)
 		 pf->hw.pf_id, pf->hw.bus.bus_id,
 		 pf->hw.bus.device, pf->hw.bus.func);
 
-	/* If a client has already been registered, we need to add an instance
-	 * of it to our new LAN device.
-	 */
-	if (registered_client)
-		i40e_client_add_instance(pf);
-
-	/* Since in some cases register may have happened before a device gets
-	 * added, we can schedule a subtask to go initiate the clients if
-	 * they can be launched at probe time.
-	 */
-	pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+	i40e_client_add_instance(pf);
+	set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
 	i40e_service_event_schedule(pf);
 
 out:
@@ -465,6 +459,8 @@ int i40e_lan_del_device(struct i40e_pf *pf)
 	struct i40e_device *ldev, *tmp;
 	int ret = -ENODEV;
 
+	platform_device_unregister(&pf->cinst->lan_info.platform_dev);
+
 	/* First, remove any client instance. */
 	i40e_client_del_instance(pf);
 
@@ -480,6 +476,7 @@ int i40e_lan_del_device(struct i40e_pf *pf)
 			break;
 		}
 	}
+
 	mutex_unlock(&i40e_device_mutex);
 	return ret;
 }
@@ -517,10 +514,7 @@ static void i40e_client_release(struct i40e_client *client)
 				 "Client %s instance for PF id %d closed\n",
 				 client->name, pf->hw.pf_id);
 		}
-		/* delete the client instance */
-		i40e_client_del_instance(pf);
-		dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
-			 client->name);
+		cdev->client = NULL;
 		clear_bit(__I40E_SERVICE_SCHED, pf->state);
 	}
 	mutex_unlock(&i40e_device_mutex);
@@ -539,9 +533,9 @@ static void i40e_client_prepare(struct i40e_client *client)
 	mutex_lock(&i40e_device_mutex);
 	list_for_each_entry(ldev, &i40e_devices, list) {
 		pf = ldev->pf;
-		i40e_client_add_instance(pf);
+		pf->cinst->client = registered_client;
 		/* Start the client subtask */
-		pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+		set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
 		i40e_service_event_schedule(pf);
 	}
 	mutex_unlock(&i40e_device_mutex);
@@ -566,7 +560,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev,
 	i40e_status err;
 
 	err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP,
-				     0, msg, len, NULL);
+				     I40E_SUCCESS, msg, len, NULL);
 	if (err)
 		dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n",
 			err, hw->aq.asq_last_status);
@@ -578,7 +572,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev,
  * i40e_client_setup_qvlist
  * @ldev: pointer to L2 context.
  * @client: Client pointer.
- * @qv_info: queue and vector list
+ * @qvlist_info: queue and vector list
  *
  * Return 0 on success or < 0 on error
  **/
@@ -653,7 +647,7 @@ err:
  * i40e_client_request_reset
  * @ldev: pointer to L2 context.
  * @client: Client pointer.
- * @level: reset level
+ * @reset_level: reset level
  **/
 static void i40e_client_request_reset(struct i40e_info *ldev,
 				      struct i40e_client *client,
@@ -717,13 +711,13 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
 		return -ENOENT;
 	}
 
-	if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
-	    (flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
+	if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
+	    (flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
 		ctxt.info.valid_sections =
 			cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
-	} else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
-		  !(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
+	} else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
+		  !(flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
 		ctxt.info.valid_sections =
 			cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
 		ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA;
@@ -747,6 +741,66 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
 	return err;
 }
 
+static int i40e_client_device_register(struct i40e_info *ldev)
+{
+	struct i40e_client *client;
+	struct i40e_pf *pf;
+
+	if (!ldev) {
+		pr_err("Failed to reg client dev: ldev ptr NULL\n");
+		return -EINVAL;
+	}
+
+	client = ldev->client;
+	pf = ldev->pf;
+	if (!client) {
+		pr_err("Failed to reg client dev: client ptr NULL\n");
+		return -EINVAL;
+	}
+
+	if (!ldev->ops || !client->ops) {
+		pr_err("Failed to reg client dev: client dev peer_ops/ops NULL\n");
+		return -EINVAL;
+	}
+
+	if (client->version.major != I40E_CLIENT_VERSION_MAJOR ||
+	    client->version.minor != I40E_CLIENT_VERSION_MINOR) {
+		pr_err("i40e: Failed to register client %s due to mismatched client interface version\n",
+		       client->name);
+		pr_err("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
+		       client->version.major, client->version.minor,
+		       client->version.build,
+		       i40e_client_interface_version_str);
+		return -EINVAL;
+	}
+
+	pf->cinst->client = ldev->client;
+	set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+	i40e_service_event_schedule(pf);
+
+	return 0;
+}
+
+static void i40e_client_device_unregister(struct i40e_info *ldev)
+{
+	struct i40e_pf *pf = ldev->pf;
+	struct i40e_client_instance *cdev = pf->cinst;
+
+	while (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
+		usleep_range(500, 1000);
+
+	if (!cdev || !cdev->client || !cdev->client->ops ||
+	    !cdev->client->ops->close) {
+		dev_err(&pf->pdev->dev, "Cannot close client device\n");
+		return;
+	}
+	cdev->client->ops->close(&cdev->lan_info, cdev->client, false);
+	clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+	i40e_client_release_qvlist(&cdev->lan_info);
+	pf->cinst->client = NULL;
+	clear_bit(__I40E_SERVICE_SCHED, pf->state);
+}
+
 /**
  * i40e_register_client - Register a i40e client driver with the L2 driver
  * @client: pointer to the i40e_client struct
@@ -791,7 +845,7 @@ int i40e_register_client(struct i40e_client *client)
 
 	i40e_client_prepare(client);
 
-	pr_info("i40e: Registered client %s\n", client->name);
+	pr_info("i40e: Registered client %s\n",	client->name);
 out:
 	return ret;
 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index 15b21a531..40041ad26 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -1,32 +1,11 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifndef _I40E_CLIENT_H_
 #define _I40E_CLIENT_H_
 
+#include <linux/platform_device.h>
+
 #define I40E_CLIENT_STR_LENGTH 10
 
 /* Client interface version should be updated anytime there is a change in the
@@ -128,11 +107,14 @@ struct i40e_info {
 	u16 fw_maj_ver;                 /* firmware major version */
 	u16 fw_min_ver;                 /* firmware minor version */
 	u32 fw_build;                   /* firmware build number */
+
+	struct platform_device platform_dev;
+	struct i40e_client *client;
 };
 
 #define I40E_CLIENT_RESET_LEVEL_PF   1
 #define I40E_CLIENT_RESET_LEVEL_CORE 2
-#define I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE  BIT(1)
+#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE  BIT(1)
 
 struct i40e_ops {
 	/* setup_q_vector_list enables queues with a particular vector */
@@ -155,6 +137,10 @@ struct i40e_ops {
 			       struct i40e_client *client,
 			       bool is_vf, u32 vf_id,
 			       u32 flag, u32 valid_flag);
+
+	int (*client_device_register)(struct i40e_info *ldev);
+
+	void (*client_device_unregister)(struct i40e_info *ldev);
 };
 
 struct i40e_client_ops {
@@ -211,7 +197,8 @@ struct i40e_client {
 #define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS	BIT(2)
 	u8 type;
 #define I40E_CLIENT_IWARP 0
-	const struct i40e_client_ops *ops; /* client ops provided by the client */
+	/* client ops provided by the client */
+	const struct i40e_client_ops *ops;
 };
 
 static inline bool i40e_client_is_registered(struct i40e_client *client)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 111426ba5..69fd8cc2e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1,33 +1,10 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #include "i40e_type.h"
 #include "i40e_adminq.h"
 #include "i40e_prototype.h"
-#include <linux/avf/virtchnl.h>
+#include "virtchnl.h"
 
 /**
  * i40e_set_mac_type - Sets MAC type
@@ -36,9 +13,9 @@
  * This function sets the mac type of the adapter based on the
  * vendor ID and device ID stored in the hw structure.
  **/
-static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+i40e_status i40e_set_mac_type(struct i40e_hw *hw)
 {
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 
 	if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
 		switch (hw->device_id) {
@@ -51,10 +28,16 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
 		case I40E_DEV_ID_QSFP_C:
 		case I40E_DEV_ID_10G_BASE_T:
 		case I40E_DEV_ID_10G_BASE_T4:
+		case I40E_DEV_ID_10G_BASE_T_BC:
+		case I40E_DEV_ID_10G_B:
+		case I40E_DEV_ID_10G_SFP:
+		case I40E_DEV_ID_5G_BASE_T_BC:
 		case I40E_DEV_ID_20G_KR2:
 		case I40E_DEV_ID_20G_KR2_A:
 		case I40E_DEV_ID_25G_B:
 		case I40E_DEV_ID_25G_SFP28:
+		case I40E_DEV_ID_X710_N3000:
+		case I40E_DEV_ID_XXV710_N3000:
 			hw->mac.type = I40E_MAC_XL710;
 			break;
 		case I40E_DEV_ID_KX_X722:
@@ -146,7 +129,7 @@ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
 {
 	switch (stat_err) {
-	case 0:
+	case I40E_SUCCESS:
 		return "OK";
 	case I40E_ERR_NVM:
 		return "I40E_ERR_NVM";
@@ -278,6 +261,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
 		return "I40E_NOT_SUPPORTED";
 	case I40E_ERR_FIRMWARE_API_VERSION:
 		return "I40E_ERR_FIRMWARE_API_VERSION";
+	case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+		return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
 	}
 
 	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@@ -298,47 +283,48 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
 		   void *buffer, u16 buf_len)
 {
 	struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
-	u16 len;
+	u32 effective_mask = hw->debug_mask & mask;
 	u8 *buf = (u8 *)buffer;
+	u16 len;
+	char prefix[27];
 
-	if ((!(mask & hw->debug_mask)) || (desc == NULL))
+	if (!effective_mask || !desc)
 		return;
 
-	len = le16_to_cpu(aq_desc->datalen);
+	len = LE16_TO_CPU(aq_desc->datalen);
 
-	i40e_debug(hw, mask,
+	i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
 		   "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
-		   le16_to_cpu(aq_desc->opcode),
-		   le16_to_cpu(aq_desc->flags),
-		   le16_to_cpu(aq_desc->datalen),
-		   le16_to_cpu(aq_desc->retval));
-	i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
-		   le32_to_cpu(aq_desc->cookie_high),
-		   le32_to_cpu(aq_desc->cookie_low));
-	i40e_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
-		   le32_to_cpu(aq_desc->params.internal.param0),
-		   le32_to_cpu(aq_desc->params.internal.param1));
-	i40e_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
-		   le32_to_cpu(aq_desc->params.external.addr_high),
-		   le32_to_cpu(aq_desc->params.external.addr_low));
-
-	if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+		   LE16_TO_CPU(aq_desc->opcode),
+		   LE16_TO_CPU(aq_desc->flags),
+		   LE16_TO_CPU(aq_desc->datalen),
+		   LE16_TO_CPU(aq_desc->retval));
+	i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
+		   "\tcookie (h,l) 0x%08X 0x%08X\n",
+		   LE32_TO_CPU(aq_desc->cookie_high),
+		   LE32_TO_CPU(aq_desc->cookie_low));
+	i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
+		   "\tparam (0,1)  0x%08X 0x%08X\n",
+		   LE32_TO_CPU(aq_desc->params.internal.param0),
+		   LE32_TO_CPU(aq_desc->params.internal.param1));
+	i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
+		   "\taddr (h,l)   0x%08X 0x%08X\n",
+		   LE32_TO_CPU(aq_desc->params.external.addr_high),
+		   LE32_TO_CPU(aq_desc->params.external.addr_low));
+
+	if (buffer && (buf_len != 0) && (len != 0) &&
+	    (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
 		i40e_debug(hw, mask, "AQ CMD Buffer:\n");
 		if (buf_len < len)
 			len = buf_len;
-		/* write the full 16-byte chunks */
-		if (hw->debug_mask & mask) {
-			char prefix[27];
-
-			snprintf(prefix, sizeof(prefix),
-				 "i40e %02x:%02x.%x: \t0x",
-				 hw->bus.bus_id,
-				 hw->bus.device,
-				 hw->bus.func);
-
-			print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
-				       16, 1, buf, len, false);
-		}
+		snprintf(prefix, sizeof(prefix),
+			 "i40e %02x:%02x.%x: \t0x",
+			 hw->bus.bus_id,
+			 hw->bus.device,
+			 hw->bus.func);
+
+		print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
+			       16, 1, buf, len, false);
 	}
 }
 
@@ -352,9 +338,8 @@ bool i40e_check_asq_alive(struct i40e_hw *hw)
 {
 	if (hw->aq.asq.len)
 		return !!(rd32(hw, hw->aq.asq.len) &
-			  I40E_PF_ATQLEN_ATQENABLE_MASK);
-	else
-		return false;
+			I40E_PF_ATQLEN_ATQENABLE_MASK);
+	return false;
 }
 
 /**
@@ -377,7 +362,7 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
 					  i40e_aqc_opc_queue_shutdown);
 
 	if (unloading)
-		cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+		cmd->driver_unloading = CPU_TO_LE32(I40E_AQ_DRIVER_UNLOADING);
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
 
 	return status;
@@ -395,9 +380,9 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
  * Internal function to get or set RSS look up table
  **/
 static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
-					   u16 vsi_id, bool pf_lut,
-					   u8 *lut, u16 lut_size,
-					   bool set)
+						     u16 vsi_id, bool pf_lut,
+						     u8 *lut, u16 lut_size,
+						     bool set)
 {
 	i40e_status status;
 	struct i40e_aq_desc desc;
@@ -412,22 +397,22 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
 						  i40e_aqc_opc_get_rss_lut);
 
 	/* Indirect command */
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
 
 	cmd_resp->vsi_id =
-			cpu_to_le16((u16)((vsi_id <<
+			CPU_TO_LE16((u16)((vsi_id <<
 					  I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
 					  I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
-	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+	cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
 
 	if (pf_lut)
-		cmd_resp->flags |= cpu_to_le16((u16)
+		cmd_resp->flags |= CPU_TO_LE16((u16)
 					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
 	else
-		cmd_resp->flags |= cpu_to_le16((u16)
+		cmd_resp->flags |= CPU_TO_LE16((u16)
 					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
@@ -448,7 +433,7 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
  * get the RSS lookup table, PF or VSI type
  **/
 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
-				bool pf_lut, u8 *lut, u16 lut_size)
+					  bool pf_lut, u8 *lut, u16 lut_size)
 {
 	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
 				       false);
@@ -465,7 +450,7 @@ i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
  * set the RSS lookup table, PF or VSI type
  **/
 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
-				bool pf_lut, u8 *lut, u16 lut_size)
+					  bool pf_lut, u8 *lut, u16 lut_size)
 {
 	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
 }
@@ -498,14 +483,14 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
 						  i40e_aqc_opc_get_rss_key);
 
 	/* Indirect command */
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
 
 	cmd_resp->vsi_id =
-			cpu_to_le16((u16)((vsi_id <<
+			CPU_TO_LE16((u16)((vsi_id <<
 					  I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
 					  I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
-	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+	cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
 
 	status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
 
@@ -520,8 +505,8 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
  *
  **/
 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
-				u16 vsi_id,
-				struct i40e_aqc_get_set_rss_key_data *key)
+				      u16 vsi_id,
+				      struct i40e_aqc_get_set_rss_key_data *key)
 {
 	return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
 }
@@ -535,8 +520,8 @@ i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
  * set the RSS key per VSI
  **/
 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
-				u16 vsi_id,
-				struct i40e_aqc_get_set_rss_key_data *key)
+				      u16 vsi_id,
+				      struct i40e_aqc_get_set_rss_key_data *key)
 {
 	return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
 }
@@ -920,7 +905,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
  **/
 i40e_status i40e_init_shared_code(struct i40e_hw *hw)
 {
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 	u32 port, ari, func_rid;
 
 	i40e_set_mac_type(hw);
@@ -947,8 +932,16 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
 	else
 		hw->pf_id = (u8)(func_rid & 0x7);
 
-	if (hw->mac.type == I40E_MAC_X722)
-		hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+	/* NVMUpdate features structure initialization */
+	hw->nvmupd_features.major = I40E_NVMUPD_FEATURES_API_VER_MAJOR;
+	hw->nvmupd_features.minor = I40E_NVMUPD_FEATURES_API_VER_MINOR;
+	hw->nvmupd_features.size = sizeof(hw->nvmupd_features);
+	i40e_memset(hw->nvmupd_features.features, 0x0,
+		    I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN *
+		    sizeof(*hw->nvmupd_features.features),
+		    I40E_NONDMA_MEM);
+
+	hw->nvmupd_features.features[0] = I40E_NVMUPD_FEATURE_FLAT_NVM_SUPPORT;
 
 	status = i40e_init_nvm(hw);
 	return status;
@@ -972,11 +965,11 @@ static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
 	i40e_status status;
 
 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
-	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
 
 	status = i40e_asq_send_command(hw, &desc, addrs,
 				       sizeof(*addrs), cmd_details);
-	*flags = le16_to_cpu(cmd_data->command_flags);
+	*flags = LE16_TO_CPU(cmd_data->command_flags);
 
 	return status;
 }
@@ -999,9 +992,9 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
 
 	i40e_fill_default_direct_cmd_desc(&desc,
 					  i40e_aqc_opc_mac_address_write);
-	cmd_data->command_flags = cpu_to_le16(flags);
-	cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
-	cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
+	cmd_data->command_flags = CPU_TO_LE16(flags);
+	cmd_data->mac_sah = CPU_TO_LE16((u16)mac_addr[0] << 8 | mac_addr[1]);
+	cmd_data->mac_sal = CPU_TO_LE32(((u32)mac_addr[2] << 24) |
 					((u32)mac_addr[3] << 16) |
 					((u32)mac_addr[4] << 8) |
 					mac_addr[5]);
@@ -1060,7 +1053,7 @@ i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
 /**
  * i40e_pre_tx_queue_cfg - pre tx queue configure
  * @hw: pointer to the HW structure
- * @queue: target PF queue index
+ * @queue: target pf queue index
  * @enable: state change request
  *
  * Handles hw requirement to indicate intention to enable
@@ -1098,28 +1091,28 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
  *  Reads the part number string from the EEPROM.
  **/
 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
-				 u32 pba_num_size)
+					    u32 pba_num_size)
 {
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 	u16 pba_word = 0;
 	u16 pba_size = 0;
 	u16 pba_ptr = 0;
 	u16 i = 0;
 
 	status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
-	if (status || (pba_word != 0xFAFA)) {
+	if ((status != I40E_SUCCESS) || (pba_word != 0xFAFA)) {
 		hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
 		return status;
 	}
 
 	status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
-	if (status) {
+	if (status != I40E_SUCCESS) {
 		hw_dbg(hw, "Failed to read PBA Block pointer.\n");
 		return status;
 	}
 
 	status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
-	if (status) {
+	if (status != I40E_SUCCESS) {
 		hw_dbg(hw, "Failed to read PBA Block size.\n");
 		return status;
 	}
@@ -1135,7 +1128,7 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
 
 	for (i = 0; i < pba_size; i++) {
 		status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
-		if (status) {
+		if (status != I40E_SUCCESS) {
 			hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
 			return status;
 		}
@@ -1169,6 +1162,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
 		break;
 	case I40E_PHY_TYPE_100BASE_TX:
 	case I40E_PHY_TYPE_1000BASE_T:
+	case I40E_PHY_TYPE_2_5GBASE_T:
+	case I40E_PHY_TYPE_5GBASE_T:
 	case I40E_PHY_TYPE_10GBASE_T:
 		media = I40E_MEDIA_TYPE_BASET;
 		break;
@@ -1180,6 +1175,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
 	case I40E_PHY_TYPE_40GBASE_AOC:
 	case I40E_PHY_TYPE_10GBASE_AOC:
 	case I40E_PHY_TYPE_25GBASE_CR:
+	case I40E_PHY_TYPE_25GBASE_AOC:
+	case I40E_PHY_TYPE_25GBASE_ACC:
 		media = I40E_MEDIA_TYPE_DA;
 		break;
 	case I40E_PHY_TYPE_1000BASE_KX:
@@ -1203,7 +1200,29 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
 	return media;
 }
 
-#define I40E_PF_RESET_WAIT_COUNT_A0	200
+/**
+ * i40e_poll_globr - Poll for Global Reset completion
+ * @hw: pointer to the hardware structure
+ * @retry_limit: how many times to retry before failure
+ **/
+static i40e_status i40e_poll_globr(struct i40e_hw *hw,
+					     u32 retry_limit)
+{
+	u32 cnt, reg = 0;
+
+	for (cnt = 0; cnt < retry_limit; cnt++) {
+		reg = rd32(hw, I40E_GLGEN_RSTAT);
+		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+			return I40E_SUCCESS;
+		msleep(100);
+	}
+
+	hw_dbg(hw, "Global reset failed.\n");
+	hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
+
+	return I40E_ERR_RESET_FAILED;
+}
+
 #define I40E_PF_RESET_WAIT_COUNT	200
 /**
  * i40e_pf_reset - Reset the PF
@@ -1224,13 +1243,10 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
 	 * couple counts longer to be sure we don't just miss the end.
 	 */
 	grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
-		    I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
-		    I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+			I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+			I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
 
-	/* It can take upto 15 secs for GRST steady state.
-	 * Bump it to 16 secs max to be safe.
-	 */
-	grst_del = grst_del * 20;
+	grst_del = min(grst_del * 20, 160U);
 
 	for (cnt = 0; cnt < grst_del; cnt++) {
 		reg = rd32(hw, I40E_GLGEN_RSTAT);
@@ -1266,20 +1282,24 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
 	 * we don't need to do the PF Reset
 	 */
 	if (!cnt) {
-		if (hw->revision_id == 0)
-			cnt = I40E_PF_RESET_WAIT_COUNT_A0;
-		else
-			cnt = I40E_PF_RESET_WAIT_COUNT;
+		u32 reg2 = 0;
+
 		reg = rd32(hw, I40E_PFGEN_CTRL);
 		wr32(hw, I40E_PFGEN_CTRL,
 		     (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
-		for (; cnt; cnt--) {
+		for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
 			reg = rd32(hw, I40E_PFGEN_CTRL);
 			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
 				break;
+			reg2 = rd32(hw, I40E_GLGEN_RSTAT);
+			if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
+				break;
 			usleep_range(1000, 2000);
 		}
-		if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
+		if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+			if (i40e_poll_globr(hw, grst_del) != I40E_SUCCESS)
+				return I40E_ERR_RESET_FAILED;
+		} else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
 			hw_dbg(hw, "PF reset polling failed to complete.\n");
 			return I40E_ERR_RESET_FAILED;
 		}
@@ -1287,7 +1307,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
 
 	i40e_clear_pxe_mode(hw);
 
-	return 0;
+	return I40E_SUCCESS;
 }
 
 /**
@@ -1308,18 +1328,18 @@ void i40e_clear_hw(struct i40e_hw *hw)
 	u32 val;
 	u32 eol = 0x7ff;
 
-	/* get number of interrupts, queues, and VFs */
+	/* get number of interrupts, queues, and vfs */
 	val = rd32(hw, I40E_GLPCI_CNF2);
 	num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
-		     I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
+			I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
 	num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
-		     I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
+			I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
 
 	val = rd32(hw, I40E_PFLAN_QALLOC);
 	base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
-		     I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
+			I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
 	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
-	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
+			I40E_PFLAN_QALLOC_LASTQ_SHIFT;
 	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
 		num_queues = (j - base_queue) + 1;
 	else
@@ -1327,9 +1347,9 @@ void i40e_clear_hw(struct i40e_hw *hw)
 
 	val = rd32(hw, I40E_PF_VT_PFALLOC);
 	i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
-	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
+			I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
 	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
-	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
+			I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
 	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
 		num_vfs = (j - i) + 1;
 	else
@@ -1392,20 +1412,8 @@ void i40e_clear_hw(struct i40e_hw *hw)
  **/
 void i40e_clear_pxe_mode(struct i40e_hw *hw)
 {
-	u32 reg;
-
 	if (i40e_check_asq_alive(hw))
 		i40e_aq_clear_pxe_mode(hw, NULL);
-
-	/* Clear single descriptor fetch/write-back mode */
-	reg = rd32(hw, I40E_GLLAN_RCTL_0);
-
-	if (hw->revision_id == 0) {
-		/* As a work around clear PXE_MODE instead of setting it */
-		wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
-	} else {
-		wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
-	}
 }
 
 /**
@@ -1420,9 +1428,9 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
 	u32 gpio_val = 0;
 	u32 port;
 
-	if (!hw->func_caps.led[idx])
+	if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
+	    !hw->func_caps.led[idx])
 		return 0;
-
 	gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
 	port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
 		I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
@@ -1441,8 +1449,15 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
 #define I40E_FILTER_ACTIVITY 0xE
 #define I40E_LINK_ACTIVITY 0xC
 #define I40E_MAC_ACTIVITY 0xD
+#define I40E_FW_LED BIT(4)
+#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
+			     I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+
 #define I40E_LED0 22
 
+#define I40E_PIN_FUNC_SDP 0x0
+#define I40E_PIN_FUNC_LED 0x1
+
 /**
  * i40e_led_get - return current on/off mode
  * @hw: pointer to the hw struct
@@ -1468,7 +1483,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
 			continue;
 
 		/* ignore gpio LED src mode entries related to the activity
-		 * LEDs
+		 *  LEDs
 		 */
 		current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
 				>> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
@@ -1476,6 +1491,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
 		case I40E_COMBINED_ACTIVITY:
 		case I40E_FILTER_ACTIVITY:
 		case I40E_MAC_ACTIVITY:
+		case I40E_LINK_ACTIVITY:
 			continue;
 		default:
 			break;
@@ -1503,8 +1519,10 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
 	u32 current_mode = 0;
 	int i;
 
-	if (mode & 0xfffffff0)
+	if (mode & ~I40E_LED_MODE_VALID) {
 		hw_dbg(hw, "invalid mode passed in %X\n", mode);
+		return;
+	}
 
 	/* as per the documentation GPIO 22-29 are the LED
 	 * GPIO pins named LED0..LED7
@@ -1524,19 +1542,30 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
 		case I40E_COMBINED_ACTIVITY:
 		case I40E_FILTER_ACTIVITY:
 		case I40E_MAC_ACTIVITY:
+		case I40E_LINK_ACTIVITY:
 			continue;
 		default:
 			break;
 		}
 
+		if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
+			u32 pin_func = 0;
+
+			if (mode & I40E_FW_LED)
+				pin_func = I40E_PIN_FUNC_SDP;
+			else
+				pin_func = I40E_PIN_FUNC_LED;
+
+			gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
+			gpio_val |= ((pin_func <<
+				     I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
+				     I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
+		}
 		gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
 		/* this & is a bit of paranoia, but serves as a range check */
 		gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
 			     I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
 
-		if (mode == I40E_LINK_ACTIVITY)
-			blink = false;
-
 		if (blink)
 			gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
 		else
@@ -1566,35 +1595,61 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
 {
 	struct i40e_aq_desc desc;
 	i40e_status status;
+	u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
 	u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
 
 	if (!abilities)
 		return I40E_ERR_PARAM;
 
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_get_phy_abilities);
+	do {
+		i40e_fill_default_direct_cmd_desc(&desc,
+					       i40e_aqc_opc_get_phy_abilities);
 
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
-	if (abilities_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+		if (abilities_size > I40E_AQ_LARGE_BUF)
+			desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
 
-	if (qualified_modules)
-		desc.params.external.param0 |=
-			cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
+		if (qualified_modules)
+			desc.params.external.param0 |=
+			CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
 
-	if (report_init)
-		desc.params.external.param0 |=
-			cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
+		if (report_init)
+			desc.params.external.param0 |=
+			CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
 
-	status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
-				       cmd_details);
+		status = i40e_asq_send_command(hw, &desc, abilities,
+					       abilities_size, cmd_details);
 
-	if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
-		status = I40E_ERR_UNKNOWN_PHY;
+		switch (hw->aq.asq_last_status) {
+		case I40E_AQ_RC_EIO:
+			status = I40E_ERR_UNKNOWN_PHY;
+			break;
+		case I40E_AQ_RC_EAGAIN:
+			usleep_range(1000, 2000);
+			total_delay++;
+			status = I40E_ERR_TIMEOUT;
+			break;
+		/* also covers I40E_AQ_RC_OK */
+		default:
+			break;
+		}
+
+	} while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
+		(total_delay < max_delay));
+
+	if (status != I40E_SUCCESS)
+		return status;
 
 	if (report_init) {
-		hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
-		hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
+		if (hw->mac.type ==  I40E_MAC_XL710 &&
+		    hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+		    hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+			status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+		} else {
+			hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
+			hw->phy.phy_types |=
+					((u64)abilities->phy_type_ext << 32);
+		}
 	}
 
 	return status;
@@ -1612,14 +1667,14 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
  * of the PHY Config parameters. This status will be indicated by the
  * command response.
  **/
-enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+i40e_status i40e_aq_set_phy_config(struct i40e_hw *hw,
 				struct i40e_aq_set_phy_config *config,
 				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aq_set_phy_config *cmd =
-			(struct i40e_aq_set_phy_config *)&desc.params.raw;
-	enum i40e_status_code status;
+		(struct i40e_aq_set_phy_config *)&desc.params.raw;
+	i40e_status status;
 
 	if (!config)
 		return I40E_ERR_PARAM;
@@ -1637,16 +1692,18 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
 /**
  * i40e_set_fc
  * @hw: pointer to the hw struct
+ * @aq_failures: buffer to return AdminQ failure information
+ * @atomic_restart: whether to enable atomic link restart
  *
  * Set the requested flow control mode using set_phy_config.
  **/
-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+i40e_status i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
 				  bool atomic_restart)
 {
 	enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
 	struct i40e_aq_get_phy_abilities_resp abilities;
 	struct i40e_aq_set_phy_config config;
-	enum i40e_status_code status;
+	i40e_status status;
 	u8 pause_mask = 0x0;
 
 	*aq_failures = 0x0;
@@ -1674,7 +1731,7 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
 		return status;
 	}
 
-	memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+	memset(&config, 0, sizeof(config));
 	/* clear the old pause settings */
 	config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
 			   ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
@@ -1723,7 +1780,7 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
  * Tell the firmware that the driver is taking over from PXE
  **/
 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
-				struct i40e_asq_cmd_details *cmd_details)
+			struct i40e_asq_cmd_details *cmd_details)
 {
 	i40e_status status;
 	struct i40e_aq_desc desc;
@@ -1751,8 +1808,7 @@ i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
  * Sets up the link and restarts the Auto-Negotiation over the link.
  **/
 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
-					bool enable_link,
-					struct i40e_asq_cmd_details *cmd_details)
+		bool enable_link, struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_set_link_restart_an *cmd =
@@ -1800,15 +1856,16 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
 		command_flags = I40E_AQ_LSE_ENABLE;
 	else
 		command_flags = I40E_AQ_LSE_DISABLE;
-	resp->command_flags = cpu_to_le16(command_flags);
+	resp->command_flags = CPU_TO_LE16(command_flags);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
-	if (status)
+	if (status != I40E_SUCCESS)
 		goto aq_get_link_info_exit;
 
 	/* save off old link status information */
-	hw->phy.link_info_old = *hw_link_info;
+	i40e_memcpy(&hw->phy.link_info_old, hw_link_info,
+		    sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA);
 
 	/* update link status */
 	hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
@@ -1819,8 +1876,8 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
 	hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
 						 I40E_AQ_CONFIG_FEC_RS_ENA);
 	hw_link_info->ext_info = resp->ext_info;
-	hw_link_info->loopback = resp->loopback;
-	hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
+	hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
+	hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
 	hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
 
 	/* update fc info */
@@ -1840,7 +1897,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
 	else
 		hw_link_info->crc_enable = false;
 
-	if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
+	if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_IS_ENABLED))
 		hw_link_info->lse_enable = true;
 	else
 		hw_link_info->lse_enable = false;
@@ -1850,9 +1907,20 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
 	     hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
 		hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
 
+	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
+	    hw->mac.type != I40E_MAC_X722) {
+		__le32 tmp;
+
+		i40e_memcpy(&tmp, resp->link_type, sizeof(tmp),
+			    I40E_NONDMA_TO_NONDMA);
+		hw->phy.phy_types = LE32_TO_CPU(tmp);
+		hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
+	}
+
 	/* save link status information */
 	if (link)
-		*link = *hw_link_info;
+		i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
+			    I40E_NONDMA_TO_NONDMA);
 
 	/* flag cleared so helper functions don't call AQ again */
 	hw->phy.get_link_info = false;
@@ -1870,8 +1938,8 @@ aq_get_link_info_exit:
  * Set link interrupt mask.
  **/
 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
-				     u16 mask,
-				     struct i40e_asq_cmd_details *cmd_details)
+				u16 mask,
+				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_set_phy_int_mask *cmd =
@@ -1881,7 +1949,7 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
 	i40e_fill_default_direct_cmd_desc(&desc,
 					  i40e_aqc_opc_set_phy_int_mask);
 
-	cmd->event_mask = cpu_to_le16(mask);
+	cmd->event_mask = CPU_TO_LE16(mask);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
@@ -1897,7 +1965,7 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
  * Reset the external PHY.
  **/
 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
-				  struct i40e_asq_cmd_details *cmd_details)
+				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_set_phy_debug *cmd =
@@ -1937,23 +2005,24 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
 	i40e_fill_default_direct_cmd_desc(&desc,
 					  i40e_aqc_opc_add_vsi);
 
-	cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
+	cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->uplink_seid);
 	cmd->connection_type = vsi_ctx->connection_type;
 	cmd->vf_id = vsi_ctx->vf_num;
-	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
+	cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
 
-	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
 
-	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
-				    sizeof(vsi_ctx->info), cmd_details);
+	status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
+					      sizeof(vsi_ctx->info),
+					      cmd_details, true);
 
-	if (status)
+	if (status != I40E_SUCCESS)
 		goto aq_add_vsi_exit;
 
-	vsi_ctx->seid = le16_to_cpu(resp->seid);
-	vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
-	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
-	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+	vsi_ctx->seid = LE16_TO_CPU(resp->seid);
+	vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
+	vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+	vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
 
 aq_add_vsi_exit:
 	return status;
@@ -1966,8 +2035,8 @@ aq_add_vsi_exit:
  * @cmd_details: pointer to command details structure or NULL
  **/
 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
-				    u16 seid,
-				    struct i40e_asq_cmd_details *cmd_details)
+				u16 seid,
+				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -1976,11 +2045,11 @@ i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
 	i40e_status status;
 
 	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_set_vsi_promiscuous_modes);
+					i40e_aqc_opc_set_vsi_promiscuous_modes);
 
-	cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
-	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
-	cmd->seid = cpu_to_le16(seid);
+	cmd->promiscuous_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+	cmd->seid = CPU_TO_LE16(seid);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
@@ -1994,8 +2063,8 @@ i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
  * @cmd_details: pointer to command details structure or NULL
  **/
 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
-				      u16 seid,
-				      struct i40e_asq_cmd_details *cmd_details)
+				u16 seid,
+				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -2004,11 +2073,11 @@ i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
 	i40e_status status;
 
 	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_set_vsi_promiscuous_modes);
+					i40e_aqc_opc_set_vsi_promiscuous_modes);
 
-	cmd->promiscuous_flags = cpu_to_le16(0);
-	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
-	cmd->seid = cpu_to_le16(seid);
+	cmd->promiscuous_flags = CPU_TO_LE16(0);
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+	cmd->seid = CPU_TO_LE16(seid);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
@@ -2045,15 +2114,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
 			flags |= I40E_AQC_SET_VSI_PROMISC_TX;
 	}
 
-	cmd->promiscuous_flags = cpu_to_le16(flags);
+	cmd->promiscuous_flags = CPU_TO_LE16(flags);
 
-	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
 	if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
-	    (hw->aq.api_maj_ver > 1))
-		cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
+	     (hw->aq.api_maj_ver > 1))
+		cmd->valid_flags |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_TX);
 
-	cmd->seid = cpu_to_le16(seid);
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+	cmd->seid = CPU_TO_LE16(seid);
+	status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+					      cmd_details, true);
 
 	return status;
 }
@@ -2080,11 +2150,49 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
 	if (set)
 		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
 
-	cmd->promiscuous_flags = cpu_to_le16(flags);
+	cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+
+	cmd->seid = CPU_TO_LE16(seid);
+	status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+					      cmd_details, true);
+
+	return status;
+}
+
+/**
+* i40e_aq_set_vsi_full_promiscuous
+* @hw: pointer to the hw struct
+* @seid: VSI number
+* @set: set promiscuous enable/disable
+* @cmd_details: pointer to command details structure or NULL
+**/
+i40e_status i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
+				u16 seid, bool set,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+	i40e_status status;
+	u16 flags = 0;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+		i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+	if (set)
+		flags = I40E_AQC_SET_VSI_PROMISC_UNICAST   |
+			I40E_AQC_SET_VSI_PROMISC_MULTICAST |
+			I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+	cmd->promiscuous_flags = CPU_TO_LE16(flags);
 
-	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST   |
+				       I40E_AQC_SET_VSI_PROMISC_MULTICAST |
+				       I40E_AQC_SET_VSI_PROMISC_BROADCAST);
 
-	cmd->seid = cpu_to_le16(seid);
+	cmd->seid = CPU_TO_LE16(seid);
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
 	return status;
@@ -2098,29 +2206,29 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
  * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
  * @cmd_details: pointer to command details structure or NULL
  **/
-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
-							 u16 seid, bool enable,
-							 u16 vid,
+i40e_status i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+				u16 seid, bool enable, u16 vid,
 				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
-	enum i40e_status_code status;
+	i40e_status status;
 	u16 flags = 0;
 
 	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_set_vsi_promiscuous_modes);
+					i40e_aqc_opc_set_vsi_promiscuous_modes);
 
 	if (enable)
 		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
 
-	cmd->promiscuous_flags = cpu_to_le16(flags);
-	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
-	cmd->seid = cpu_to_le16(seid);
-	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+	cmd->promiscuous_flags = CPU_TO_LE16(flags);
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+	cmd->seid = CPU_TO_LE16(seid);
+	cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
 
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+	status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+					      cmd_details, true);
 
 	return status;
 }
@@ -2133,29 +2241,29 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
  * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
  * @cmd_details: pointer to command details structure or NULL
  **/
-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
-							 u16 seid, bool enable,
-							 u16 vid,
+i40e_status i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+				u16 seid, bool enable, u16 vid,
 				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
-	enum i40e_status_code status;
+	i40e_status status;
 	u16 flags = 0;
 
 	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_set_vsi_promiscuous_modes);
+					i40e_aqc_opc_set_vsi_promiscuous_modes);
 
 	if (enable)
 		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
 
-	cmd->promiscuous_flags = cpu_to_le16(flags);
-	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
-	cmd->seid = cpu_to_le16(seid);
-	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+	cmd->promiscuous_flags = CPU_TO_LE16(flags);
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+	cmd->seid = CPU_TO_LE16(seid);
+	cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
 
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+	status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+					      cmd_details, true);
 
 	return status;
 }
@@ -2184,10 +2292,10 @@ i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
 	if (enable)
 		flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
 
-	cmd->promiscuous_flags = cpu_to_le16(flags);
-	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
-	cmd->seid = cpu_to_le16(seid);
-	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+	cmd->promiscuous_flags = CPU_TO_LE16(flags);
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+	cmd->seid = CPU_TO_LE16(seid);
+	cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
@@ -2217,13 +2325,13 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
 
 	if (set_filter)
 		cmd->promiscuous_flags
-			    |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+			    |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
 	else
 		cmd->promiscuous_flags
-			    &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+			    &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
 
-	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
-	cmd->seid = cpu_to_le16(seid);
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+	cmd->seid = CPU_TO_LE16(seid);
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
 	return status;
@@ -2237,8 +2345,8 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
  * @cmd_details: pointer to command details structure or NULL
  **/
 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
-				       u16 seid, bool enable,
-				       struct i40e_asq_cmd_details *cmd_details)
+				u16 seid, bool enable,
+				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -2251,9 +2359,9 @@ i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
 	if (enable)
 		flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
 
-	cmd->promiscuous_flags = cpu_to_le16(flags);
-	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
-	cmd->seid = cpu_to_le16(seid);
+	cmd->promiscuous_flags = CPU_TO_LE16(flags);
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_VLAN);
+	cmd->seid = CPU_TO_LE16(seid);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
@@ -2281,20 +2389,20 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
 	i40e_fill_default_direct_cmd_desc(&desc,
 					  i40e_aqc_opc_get_vsi_parameters);
 
-	cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
+	cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
 
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
 
 	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
 				    sizeof(vsi_ctx->info), NULL);
 
-	if (status)
+	if (status != I40E_SUCCESS)
 		goto aq_get_vsi_params_exit;
 
-	vsi_ctx->seid = le16_to_cpu(resp->seid);
-	vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
-	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
-	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+	vsi_ctx->seid = LE16_TO_CPU(resp->seid);
+	vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
+	vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+	vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
 
 aq_get_vsi_params_exit:
 	return status;
@@ -2322,15 +2430,16 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
 
 	i40e_fill_default_direct_cmd_desc(&desc,
 					  i40e_aqc_opc_update_vsi_parameters);
-	cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
+	cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
 
-	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
 
-	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
-				    sizeof(vsi_ctx->info), cmd_details);
+	status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
+					      sizeof(vsi_ctx->info),
+					      cmd_details, true);
 
-	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
-	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+	vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+	vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
 
 	return status;
 }
@@ -2357,13 +2466,13 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
 
 	i40e_fill_default_direct_cmd_desc(&desc,
 					  i40e_aqc_opc_get_switch_config);
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
 	if (buf_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-	scfg->seid = cpu_to_le16(*start_seid);
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+	scfg->seid = CPU_TO_LE16(*start_seid);
 
 	status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
-	*start_seid = le16_to_cpu(scfg->seid);
+	*start_seid = LE16_TO_CPU(scfg->seid);
 
 	return status;
 }
@@ -2372,26 +2481,31 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
  * i40e_aq_set_switch_config
  * @hw: pointer to the hardware structure
  * @flags: bit flag values to set
+ * @mode: cloud filter mode
  * @valid_flags: which bit flags to set
  * @cmd_details: pointer to command details structure or NULL
  *
  * Set switch configuration bits
  **/
-enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
-						u16 flags,
-						u16 valid_flags,
+i40e_status i40e_aq_set_switch_config(struct i40e_hw *hw,
+				u16 flags, u16 valid_flags, u8 mode,
 				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_set_switch_config *scfg =
 		(struct i40e_aqc_set_switch_config *)&desc.params.raw;
-	enum i40e_status_code status;
+	i40e_status status;
 
 	i40e_fill_default_direct_cmd_desc(&desc,
 					  i40e_aqc_opc_set_switch_config);
-	scfg->flags = cpu_to_le16(flags);
-	scfg->valid_flags = cpu_to_le16(valid_flags);
-
+	scfg->flags = CPU_TO_LE16(flags);
+	scfg->valid_flags = CPU_TO_LE16(valid_flags);
+	scfg->mode = mode;
+	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
+		scfg->switch_tag = CPU_TO_LE16(hw->switch_tag);
+		scfg->first_tag = CPU_TO_LE16(hw->first_tag);
+		scfg->second_tag = CPU_TO_LE16(hw->second_tag);
+	}
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
 	return status;
@@ -2424,17 +2538,17 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
-	if (!status) {
-		if (fw_major_version)
-			*fw_major_version = le16_to_cpu(resp->fw_major);
-		if (fw_minor_version)
-			*fw_minor_version = le16_to_cpu(resp->fw_minor);
-		if (fw_build)
-			*fw_build = le32_to_cpu(resp->fw_build);
-		if (api_major_version)
-			*api_major_version = le16_to_cpu(resp->api_major);
-		if (api_minor_version)
-			*api_minor_version = le16_to_cpu(resp->api_minor);
+	if (status == I40E_SUCCESS) {
+		if (fw_major_version != NULL)
+			*fw_major_version = LE16_TO_CPU(resp->fw_major);
+		if (fw_minor_version != NULL)
+			*fw_minor_version = LE16_TO_CPU(resp->fw_minor);
+		if (fw_build != NULL)
+			*fw_build = LE32_TO_CPU(resp->fw_build);
+		if (api_major_version != NULL)
+			*api_major_version = LE16_TO_CPU(resp->api_major);
+		if (api_minor_version != NULL)
+			*api_minor_version = LE16_TO_CPU(resp->api_minor);
 	}
 
 	return status;
@@ -2463,7 +2577,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
 
 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
 
-	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+	desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
 	cmd->driver_major_ver = dv->major_version;
 	cmd->driver_minor_ver = dv->minor_version;
 	cmd->driver_build_ver = dv->build_version;
@@ -2486,18 +2600,18 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
  * @link_up: pointer to bool (true/false = linkup/linkdown)
  *
  * Variable link_up true if link is up, false if link is down.
- * The variable link_up is invalid if returned value of status != 0
+ * The variable link_up is invalid if returned value of status != I40E_SUCCESS
  *
  * Side effect: LinkStatusEvent reporting becomes enabled
  **/
 i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
 {
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 
 	if (hw->phy.get_link_info) {
 		status = i40e_update_link_info(hw);
 
-		if (status)
+		if (status != I40E_SUCCESS)
 			i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
 				   status);
 	}
@@ -2514,7 +2628,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
 i40e_status i40e_update_link_info(struct i40e_hw *hw)
 {
 	struct i40e_aq_get_phy_abilities_resp abilities;
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 
 	status = i40e_aq_get_link_info(hw, true, NULL, NULL);
 	if (status)
@@ -2529,14 +2643,20 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw)
 		if (status)
 			return status;
 
-		hw->phy.link_info.req_fec_info =
-			abilities.fec_cfg_curr_mod_ext_info &
-			(I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
+		if (abilities.fec_cfg_curr_mod_ext_info &
+		    I40E_AQ_ENABLE_FEC_AUTO)
+			hw->phy.link_info.req_fec_info =
+				(I40E_AQ_REQUEST_FEC_KR |
+				 I40E_AQ_REQUEST_FEC_RS);
+		else
+			hw->phy.link_info.req_fec_info =
+				abilities.fec_cfg_curr_mod_ext_info &
+				(I40E_AQ_REQUEST_FEC_KR |
+				 I40E_AQ_REQUEST_FEC_RS);
 
-		memcpy(hw->phy.link_info.module_type, &abilities.module_type,
-		       sizeof(hw->phy.link_info.module_type));
+		i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+			sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
 	}
-
 	return status;
 }
 
@@ -2574,8 +2694,8 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
 
 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
 
-	cmd->uplink_seid = cpu_to_le16(uplink_seid);
-	cmd->downlink_seid = cpu_to_le16(downlink_seid);
+	cmd->uplink_seid = CPU_TO_LE16(uplink_seid);
+	cmd->downlink_seid = CPU_TO_LE16(downlink_seid);
 	cmd->enable_tcs = enabled_tc;
 	if (!uplink_seid)
 		veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
@@ -2588,12 +2708,12 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
 	if (!enable_stats)
 		veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
 
-	cmd->veb_flags = cpu_to_le16(veb_flags);
+	cmd->veb_flags = CPU_TO_LE16(veb_flags);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
 	if (!status && veb_seid)
-		*veb_seid = le16_to_cpu(resp->veb_seid);
+		*veb_seid = LE16_TO_CPU(resp->veb_seid);
 
 	return status;
 }
@@ -2629,22 +2749,22 @@ i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
 
 	i40e_fill_default_direct_cmd_desc(&desc,
 					  i40e_aqc_opc_get_veb_parameters);
-	cmd_resp->seid = cpu_to_le16(veb_seid);
+	cmd_resp->seid = CPU_TO_LE16(veb_seid);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 	if (status)
 		goto get_veb_exit;
 
 	if (switch_id)
-		*switch_id = le16_to_cpu(cmd_resp->switch_id);
+		*switch_id = LE16_TO_CPU(cmd_resp->switch_id);
 	if (statistic_index)
-		*statistic_index = le16_to_cpu(cmd_resp->statistic_index);
+		*statistic_index = LE16_TO_CPU(cmd_resp->statistic_index);
 	if (vebs_used)
-		*vebs_used = le16_to_cpu(cmd_resp->vebs_used);
+		*vebs_used = LE16_TO_CPU(cmd_resp->vebs_used);
 	if (vebs_free)
-		*vebs_free = le16_to_cpu(cmd_resp->vebs_free);
+		*vebs_free = LE16_TO_CPU(cmd_resp->vebs_free);
 	if (floating) {
-		u16 flags = le16_to_cpu(cmd_resp->veb_flags);
+		u16 flags = LE16_TO_CPU(cmd_resp->veb_flags);
 
 		if (flags & I40E_AQC_ADD_VEB_FLOATING)
 			*floating = true;
@@ -2684,22 +2804,22 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
 
 	/* prep the rest of the request */
 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
-	cmd->num_addresses = cpu_to_le16(count);
-	cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+	cmd->num_addresses = CPU_TO_LE16(count);
+	cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
 	cmd->seid[1] = 0;
 	cmd->seid[2] = 0;
 
 	for (i = 0; i < count; i++)
 		if (is_multicast_ether_addr(mv_list[i].mac_addr))
 			mv_list[i].flags |=
-			       cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
+			    CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
 
-	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
 	if (buf_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
 
-	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
-				       cmd_details);
+	status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
+					      cmd_details, true);
 
 	return status;
 }
@@ -2731,17 +2851,17 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
 
 	/* prep the rest of the request */
 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
-	cmd->num_addresses = cpu_to_le16(count);
-	cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+	cmd->num_addresses = CPU_TO_LE16(count);
+	cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
 	cmd->seid[1] = 0;
 	cmd->seid[2] = 0;
 
-	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
 	if (buf_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
 
-	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
-				       cmd_details);
+	status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
+					      cmd_details, true);
 
 	return status;
 }
@@ -2757,17 +2877,17 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
  * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
  * @cmd_details: pointer to command details structure or NULL
  * @rule_id: Rule ID returned from FW
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
  *
  * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
  * VEBs/VEPA elements only
  **/
 static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
-				u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
-				u16 count, __le16 *mr_list,
-				struct i40e_asq_cmd_details *cmd_details,
-				u16 *rule_id, u16 *rules_used, u16 *rules_free)
+			u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
+			u16 count, __le16 *mr_list,
+			struct i40e_asq_cmd_details *cmd_details,
+			u16 *rule_id, u16 *rules_used, u16 *rules_free)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_add_delete_mirror_rule *cmd =
@@ -2781,29 +2901,29 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
 
 	/* prep the rest of the request */
 	i40e_fill_default_direct_cmd_desc(&desc, opcode);
-	cmd->seid = cpu_to_le16(sw_seid);
-	cmd->rule_type = cpu_to_le16(rule_type &
+	cmd->seid = CPU_TO_LE16(sw_seid);
+	cmd->rule_type = CPU_TO_LE16(rule_type &
 				     I40E_AQC_MIRROR_RULE_TYPE_MASK);
-	cmd->num_entries = cpu_to_le16(count);
+	cmd->num_entries = CPU_TO_LE16(count);
 	/* Dest VSI for add, rule_id for delete */
-	cmd->destination = cpu_to_le16(id);
+	cmd->destination = CPU_TO_LE16(id);
 	if (mr_list) {
-		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
+		desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
 						I40E_AQ_FLAG_RD));
 		if (buf_size > I40E_AQ_LARGE_BUF)
-			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+			desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
 	}
 
 	status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
 				       cmd_details);
-	if (!status ||
+	if (status == I40E_SUCCESS ||
 	    hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
 		if (rule_id)
-			*rule_id = le16_to_cpu(resp->rule_id);
+			*rule_id = LE16_TO_CPU(resp->rule_id);
 		if (rules_used)
-			*rules_used = le16_to_cpu(resp->mirror_rules_used);
+			*rules_used = LE16_TO_CPU(resp->mirror_rules_used);
 		if (rules_free)
-			*rules_free = le16_to_cpu(resp->mirror_rules_free);
+			*rules_free = LE16_TO_CPU(resp->mirror_rules_free);
 	}
 	return status;
 }
@@ -2818,8 +2938,8 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
  * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
  * @cmd_details: pointer to command details structure or NULL
  * @rule_id: Rule ID returned from FW
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
  *
  * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
  **/
@@ -2849,8 +2969,8 @@ i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
  *		add_mirrorrule.
  * @mr_list: list of mirrored VLAN IDs to be removed
  * @cmd_details: pointer to command details structure or NULL
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
  *
  * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
  **/
@@ -2877,7 +2997,7 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
 /**
  * i40e_aq_send_msg_to_vf
  * @hw: pointer to the hardware structure
- * @vfid: VF id to send msg
+ * @vfid: vf id to send msg
  * @v_opcode: opcodes for VF-PF communication
  * @v_retval: return error code
  * @msg: pointer to the msg buffer
@@ -2896,16 +3016,16 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
 	i40e_status status;
 
 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
-	cmd->id = cpu_to_le32(vfid);
-	desc.cookie_high = cpu_to_le32(v_opcode);
-	desc.cookie_low = cpu_to_le32(v_retval);
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+	cmd->id = CPU_TO_LE32(vfid);
+	desc.cookie_high = CPU_TO_LE32(v_opcode);
+	desc.cookie_low = CPU_TO_LE32(v_retval);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
 	if (msglen) {
-		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
+		desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
 						I40E_AQ_FLAG_RD));
 		if (msglen > I40E_AQ_LARGE_BUF)
-			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-		desc.datalen = cpu_to_le16(msglen);
+			desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+		desc.datalen = CPU_TO_LE16(msglen);
 	}
 	status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
 
@@ -2935,13 +3055,13 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
 
 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
 
-	cmd_resp->address = cpu_to_le32(reg_addr);
+	cmd_resp->address = CPU_TO_LE32(reg_addr);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
-	if (!status) {
-		*reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
-			   (u64)le32_to_cpu(cmd_resp->value_low);
+	if (status == I40E_SUCCESS) {
+		*reg_val = ((u64)LE32_TO_CPU(cmd_resp->value_high) << 32) |
+			   (u64)LE32_TO_CPU(cmd_resp->value_low);
 	}
 
 	return status;
@@ -2957,8 +3077,8 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
  * Write to a register using the admin queue commands
  **/
 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
-					u32 reg_addr, u64 reg_val,
-					struct i40e_asq_cmd_details *cmd_details)
+				u32 reg_addr, u64 reg_val,
+				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_debug_reg_read_write *cmd =
@@ -2967,9 +3087,9 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
 
 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
 
-	cmd->address = cpu_to_le32(reg_addr);
-	cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
-	cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
+	cmd->address = CPU_TO_LE32(reg_addr);
+	cmd->value_high = CPU_TO_LE32((u32)(reg_val >> 32));
+	cmd->value_low = CPU_TO_LE32((u32)(reg_val & 0xFFFFFFFF));
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
@@ -3000,9 +3120,9 @@ i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
 
 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
 
-	cmd_resp->resource_id = cpu_to_le16(resource);
-	cmd_resp->access_type = cpu_to_le16(access);
-	cmd_resp->resource_number = cpu_to_le32(sdp_number);
+	cmd_resp->resource_id = CPU_TO_LE16(resource);
+	cmd_resp->access_type = CPU_TO_LE16(access);
+	cmd_resp->resource_number = CPU_TO_LE32(sdp_number);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 	/* The completion specifies the maximum time in ms that the driver
@@ -3011,8 +3131,8 @@ i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
 	 * busy return value and the timeout field indicates the maximum time
 	 * the current owner of the resource has to free it.
 	 */
-	if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
-		*timeout = le32_to_cpu(cmd_resp->timeout);
+	if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+		*timeout = LE32_TO_CPU(cmd_resp->timeout);
 
 	return status;
 }
@@ -3038,8 +3158,8 @@ i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
 
 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
 
-	cmd->resource_id = cpu_to_le16(resource);
-	cmd->resource_number = cpu_to_le32(sdp_number);
+	cmd->resource_id = CPU_TO_LE16(resource);
+	cmd->resource_number = CPU_TO_LE32(sdp_number);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
@@ -3080,12 +3200,12 @@ i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
 	if (last_command)
 		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
 	cmd->module_pointer = module_pointer;
-	cmd->offset = cpu_to_le32(offset);
-	cmd->length = cpu_to_le16(length);
+	cmd->offset = CPU_TO_LE32(offset);
+	cmd->length = CPU_TO_LE16(length);
 
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
 	if (length > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
 
 	status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
 
@@ -3093,6 +3213,100 @@ i40e_aq_read_nvm_exit:
 	return status;
 }
 
+/**
+ * i40e_aq_read_nvm_config - read an nvm config block
+ * @hw: pointer to the hw struct
+ * @cmd_flags: NVM access admin command bits
+ * @field_id: field or feature id
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @element_count: pointer to count of elements read by FW
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_read_nvm_config(struct i40e_hw *hw,
+				u8 cmd_flags, u32 field_id, void *data,
+				u16 buf_size, u16 *element_count,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_nvm_config_read *cmd =
+		(struct i40e_aqc_nvm_config_read *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_read);
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF));
+	if (buf_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+	cmd->element_id = CPU_TO_LE16((u16)(0xffff & field_id));
+	if (cmd_flags & I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK)
+		cmd->element_id_msw = CPU_TO_LE16((u16)(field_id >> 16));
+	else
+		cmd->element_id_msw = 0;
+
+	status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
+
+	if (!status && element_count)
+		*element_count = LE16_TO_CPU(cmd->element_count);
+
+	return status;
+}
+
+/**
+ * i40e_aq_write_nvm_config - write an nvm config block
+ * @hw: pointer to the hw struct
+ * @cmd_flags: NVM access admin command bits
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @element_count: count of elements to be written
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_write_nvm_config(struct i40e_hw *hw,
+				u8 cmd_flags, void *data, u16 buf_size,
+				u16 element_count,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_nvm_config_write *cmd =
+		(struct i40e_aqc_nvm_config_write *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_write);
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buf_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	cmd->element_count = CPU_TO_LE16(element_count);
+	cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+	status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_oem_post_update - triggers an OEM specific flow after update
+ * @hw: pointer to the hw struct
+ * @buff: buffer for result
+ * @buff_size: buffer size
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_oem_post_update(struct i40e_hw *hw,
+				void *buff, u16 buff_size,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	i40e_status status;
+
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update);
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+	if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH)
+		status = I40E_ERR_NOT_IMPLEMENTED;
+
+	return status;
+}
+
 /**
  * i40e_aq_erase_nvm
  * @hw: pointer to the hw struct
@@ -3105,8 +3319,8 @@ i40e_aq_read_nvm_exit:
  * Erase the NVM sector using the admin queue commands
  **/
 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
-			      u32 offset, u16 length, bool last_command,
-			      struct i40e_asq_cmd_details *cmd_details)
+				u32 offset, u16 length, bool last_command,
+				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_nvm_update *cmd =
@@ -3125,8 +3339,8 @@ i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
 	if (last_command)
 		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
 	cmd->module_pointer = module_pointer;
-	cmd->offset = cpu_to_le32(offset);
-	cmd->length = cpu_to_le16(length);
+	cmd->offset = CPU_TO_LE32(offset);
+	cmd->length = CPU_TO_LE16(length);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
@@ -3151,29 +3365,33 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
 	u32 valid_functions, num_functions;
 	u32 number, logical_id, phys_id;
 	struct i40e_hw_capabilities *p;
+	i40e_status status;
+	u16 id, ocp_cfg_word0;
 	u8 major_rev;
 	u32 i = 0;
-	u16 id;
 
 	cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
 
 	if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
-		p = &hw->dev_caps;
+		p = (struct i40e_hw_capabilities *)&hw->dev_caps;
 	else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
-		p = &hw->func_caps;
+		p = (struct i40e_hw_capabilities *)&hw->func_caps;
 	else
 		return;
 
 	for (i = 0; i < cap_count; i++, cap++) {
-		id = le16_to_cpu(cap->id);
-		number = le32_to_cpu(cap->number);
-		logical_id = le32_to_cpu(cap->logical_id);
-		phys_id = le32_to_cpu(cap->phys_id);
+		id = LE16_TO_CPU(cap->id);
+		number = LE32_TO_CPU(cap->number);
+		logical_id = LE32_TO_CPU(cap->logical_id);
+		phys_id = LE32_TO_CPU(cap->phys_id);
 		major_rev = cap->major_rev;
 
 		switch (id) {
 		case I40E_AQ_CAP_ID_SWITCH_MODE:
 			p->switch_mode = number;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: Switch mode = %d\n",
+				   p->switch_mode);
 			break;
 		case I40E_AQ_CAP_ID_MNG_MODE:
 			p->management_mode = number;
@@ -3185,38 +3403,67 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
 			} else {
 				p->mng_protocols_over_mctp = 0;
 			}
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: Management Mode = %d\n",
+				   p->management_mode);
 			break;
 		case I40E_AQ_CAP_ID_NPAR_ACTIVE:
 			p->npar_enable = number;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: NPAR enable = %d\n",
+				   p->npar_enable);
 			break;
 		case I40E_AQ_CAP_ID_OS2BMC_CAP:
 			p->os2bmc = number;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: OS2BMC = %d\n", p->os2bmc);
 			break;
 		case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
 			p->valid_functions = number;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: Valid Functions = %d\n",
+				   p->valid_functions);
 			break;
 		case I40E_AQ_CAP_ID_SRIOV:
 			if (number == 1)
 				p->sr_iov_1_1 = true;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: SR-IOV = %d\n",
+				   p->sr_iov_1_1);
 			break;
 		case I40E_AQ_CAP_ID_VF:
 			p->num_vfs = number;
 			p->vf_base_id = logical_id;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: VF count = %d\n",
+				   p->num_vfs);
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: VF base_id = %d\n",
+				   p->vf_base_id);
 			break;
 		case I40E_AQ_CAP_ID_VMDQ:
 			if (number == 1)
 				p->vmdq = true;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: VMDQ = %d\n", p->vmdq);
 			break;
 		case I40E_AQ_CAP_ID_8021QBG:
 			if (number == 1)
 				p->evb_802_1_qbg = true;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: 802.1Qbg = %d\n", number);
 			break;
 		case I40E_AQ_CAP_ID_8021QBR:
 			if (number == 1)
 				p->evb_802_1_qbh = true;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: 802.1Qbh = %d\n", number);
 			break;
 		case I40E_AQ_CAP_ID_VSI:
 			p->num_vsis = number;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: VSI count = %d\n",
+				   p->num_vsis);
 			break;
 		case I40E_AQ_CAP_ID_DCB:
 			if (number == 1) {
@@ -3224,27 +3471,56 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
 				p->enabled_tcmap = logical_id;
 				p->maxtc = phys_id;
 			}
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: DCB = %d\n", p->dcb);
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: TC Mapping = %d\n",
+				   logical_id);
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: TC Max = %d\n", p->maxtc);
 			break;
 		case I40E_AQ_CAP_ID_FCOE:
 			if (number == 1)
 				p->fcoe = true;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: FCOE = %d\n", p->fcoe);
 			break;
 		case I40E_AQ_CAP_ID_ISCSI:
 			if (number == 1)
 				p->iscsi = true;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: iSCSI = %d\n", p->iscsi);
 			break;
 		case I40E_AQ_CAP_ID_RSS:
 			p->rss = true;
 			p->rss_table_size = number;
 			p->rss_table_entry_width = logical_id;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: RSS = %d\n", p->rss);
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: RSS table size = %d\n",
+				   p->rss_table_size);
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: RSS table width = %d\n",
+				   p->rss_table_entry_width);
 			break;
 		case I40E_AQ_CAP_ID_RXQ:
 			p->num_rx_qp = number;
 			p->base_queue = phys_id;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: Rx QP = %d\n", number);
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: base_queue = %d\n",
+				   p->base_queue);
 			break;
 		case I40E_AQ_CAP_ID_TXQ:
 			p->num_tx_qp = number;
 			p->base_queue = phys_id;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: Tx QP = %d\n", number);
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: base_queue = %d\n",
+				   p->base_queue);
 			break;
 		case I40E_AQ_CAP_ID_MSIX:
 			p->num_msix_vectors = number;
@@ -3254,6 +3530,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
 			break;
 		case I40E_AQ_CAP_ID_VF_MSIX:
 			p->num_msix_vectors_vf = number;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: MSIX VF vector count = %d\n",
+				   p->num_msix_vectors_vf);
 			break;
 		case I40E_AQ_CAP_ID_FLEX10:
 			if (major_rev == 1) {
@@ -3270,41 +3549,72 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
 			}
 			p->flex10_mode = logical_id;
 			p->flex10_status = phys_id;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: Flex10 mode = %d\n",
+				   p->flex10_mode);
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: Flex10 status = %d\n",
+				   p->flex10_status);
 			break;
 		case I40E_AQ_CAP_ID_CEM:
 			if (number == 1)
 				p->mgmt_cem = true;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: CEM = %d\n", p->mgmt_cem);
 			break;
 		case I40E_AQ_CAP_ID_IWARP:
 			if (number == 1)
 				p->iwarp = true;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: iWARP = %d\n", p->iwarp);
 			break;
 		case I40E_AQ_CAP_ID_LED:
 			if (phys_id < I40E_HW_CAP_MAX_GPIO)
 				p->led[phys_id] = true;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: LED - PIN %d\n", phys_id);
 			break;
 		case I40E_AQ_CAP_ID_SDP:
 			if (phys_id < I40E_HW_CAP_MAX_GPIO)
 				p->sdp[phys_id] = true;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: SDP - PIN %d\n", phys_id);
 			break;
 		case I40E_AQ_CAP_ID_MDIO:
 			if (number == 1) {
 				p->mdio_port_num = phys_id;
 				p->mdio_port_mode = logical_id;
 			}
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: MDIO port number = %d\n",
+				   p->mdio_port_num);
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: MDIO port mode = %d\n",
+				   p->mdio_port_mode);
 			break;
 		case I40E_AQ_CAP_ID_1588:
 			if (number == 1)
 				p->ieee_1588 = true;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: IEEE 1588 = %d\n",
+				   p->ieee_1588);
 			break;
 		case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
 			p->fd = true;
 			p->fd_filters_guaranteed = number;
 			p->fd_filters_best_effort = logical_id;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: Flow Director = 1\n");
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: Guaranteed FD filters = %d\n",
+				   p->fd_filters_guaranteed);
 			break;
 		case I40E_AQ_CAP_ID_WSR_PROT:
 			p->wr_csr_prot = (u64)number;
 			p->wr_csr_prot |= (u64)logical_id << 32;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: wr_csr_prot = 0x%llX\n\n",
+				   (p->wr_csr_prot & 0xffff));
 			break;
 		case I40E_AQ_CAP_ID_NVM_MGMT:
 			if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
@@ -3312,6 +3622,19 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
 			if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
 				p->update_disabled = true;
 			break;
+		case I40E_AQ_CAP_ID_WOL_AND_PROXY:
+			hw->num_wol_proxy_filters = (u16)number;
+			hw->wol_proxy_vsi_seid = (u16)logical_id;
+			p->apm_wol_support = phys_id & I40E_WOL_SUPPORT_MASK;
+			if (phys_id & I40E_ACPI_PROGRAMMING_METHOD_MASK)
+				p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK;
+			else
+				p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL;
+			p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: WOL proxy filters = %d\n",
+				   hw->num_wol_proxy_filters);
+			break;
 		default:
 			break;
 		}
@@ -3320,11 +3643,8 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
 	if (p->fcoe)
 		i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
 
-	/* Software override ensuring FCoE is disabled if npar or mfp
-	 * mode because it is not supported in these modes.
-	 */
-	if (p->npar_enable || p->flex10_enable)
-		p->fcoe = false;
+	/* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */
+	p->fcoe = false;
 
 	/* count the enabled ports (aka the "not disabled" ports) */
 	hw->num_ports = 0;
@@ -3340,6 +3660,26 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
 			hw->num_ports++;
 	}
 
+	/* OCP cards case: if a mezz is removed the ethernet port is at
+	 * disabled state in PRTGEN_CNF register. Additional NVM read is
+	 * needed in order to check if we are dealing with OCP card.
+	 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
+	 * physical ports results in wrong partition id calculation and thus
+	 * not supporting WoL.
+	 */
+	if (hw->mac.type == I40E_MAC_X722) {
+		if (i40e_acquire_nvm(hw, I40E_RESOURCE_READ) == I40E_SUCCESS) {
+			status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
+						  2 * I40E_SR_OCP_CFG_WORD0,
+						  sizeof(ocp_cfg_word0),
+						  &ocp_cfg_word0, true, NULL);
+			if (status == I40E_SUCCESS &&
+			    (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
+				hw->num_ports = 4;
+			i40e_release_nvm(hw);
+		}
+	}
+
 	valid_functions = p->valid_functions;
 	num_functions = 0;
 	while (valid_functions) {
@@ -3380,7 +3720,7 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
 {
 	struct i40e_aqc_list_capabilites *cmd;
 	struct i40e_aq_desc desc;
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 
 	cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
 
@@ -3392,17 +3732,17 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
 
 	i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
 
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
 	if (buff_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
 
 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
-	*data_size = le16_to_cpu(desc.datalen);
+	*data_size = LE16_TO_CPU(desc.datalen);
 
 	if (status)
 		goto exit;
 
-	i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
+	i40e_parse_discover_capabilities(hw, buff, LE32_TO_CPU(cmd->count),
 					 list_type_opc);
 
 exit:
@@ -3417,14 +3757,15 @@ exit:
  * @length: length of the section to be written (in bytes from the offset)
  * @data: command buffer (size [bytes] = length)
  * @last_command: tells if this is the last command in a series
+ * @preservation_flags: Preservation mode flags
  * @cmd_details: pointer to command details structure or NULL
  *
  * Update the NVM using the admin queue commands
  **/
 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
-			       u32 offset, u16 length, void *data,
-			       bool last_command,
-			       struct i40e_asq_cmd_details *cmd_details)
+				u32 offset, u16 length, void *data,
+				bool last_command, u8 preservation_flags,
+				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_nvm_update *cmd =
@@ -3442,13 +3783,23 @@ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
 	/* If this is the last command in a series, set the proper flag. */
 	if (last_command)
 		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+	if (hw->mac.type == I40E_MAC_X722) {
+		if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
+			cmd->command_flags |=
+				(I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
+				 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+		else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
+			cmd->command_flags |=
+				(I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
+				 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+	}
 	cmd->module_pointer = module_pointer;
-	cmd->offset = cpu_to_le32(offset);
-	cmd->length = cpu_to_le16(length);
+	cmd->offset = CPU_TO_LE32(offset);
+	cmd->length = CPU_TO_LE16(length);
 
-	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
 	if (length > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
 
 	status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
 
@@ -3457,58 +3808,152 @@ i40e_aq_update_nvm_exit:
 }
 
 /**
- * i40e_aq_get_lldp_mib
+ * i40e_aq_rearrange_nvm
  * @hw: pointer to the hw struct
- * @bridge_type: type of bridge requested
- * @mib_type: Local, Remote or both Local and Remote MIBs
- * @buff: pointer to a user supplied buffer to store the MIB block
- * @buff_size: size of the buffer (in bytes)
- * @local_len : length of the returned Local LLDP MIB
- * @remote_len: length of the returned Remote LLDP MIB
+ * @rearrange_nvm: defines direction of rearrangement
  * @cmd_details: pointer to command details structure or NULL
  *
- * Requests the complete LLDP MIB (entire packet).
+ * Rearrange NVM structure, available only for transition FW
  **/
-i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
-				u8 mib_type, void *buff, u16 buff_size,
-				u16 *local_len, u16 *remote_len,
+i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+				u8 rearrange_nvm,
 				struct i40e_asq_cmd_details *cmd_details)
 {
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_lldp_get_mib *cmd =
-		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
-	struct i40e_aqc_lldp_get_mib *resp =
-		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+	struct i40e_aqc_nvm_update *cmd;
 	i40e_status status;
+	struct i40e_aq_desc desc;
 
-	if (buff_size == 0 || !buff)
-		return I40E_ERR_PARAM;
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
-	/* Indirect Command */
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
 
-	cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
-	cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
-		       I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
 
-	desc.datalen = cpu_to_le16(buff_size);
+	rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
+			 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
 
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	if (!rearrange_nvm) {
+		status = I40E_ERR_PARAM;
+		goto i40e_aq_rearrange_nvm_exit;
+	}
+
+	cmd->command_flags |= rearrange_nvm;
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_rearrange_nvm_exit:
+	return status;
+}
+
+/**
+ * i40e_aq_nvm_progress
+ * @hw: pointer to the hw struct
+ * @progress: pointer to progress returned from AQ
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Gets progress of flash rearrangement process
+ **/
+i40e_status i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	i40e_status status;
+	struct i40e_aq_desc desc;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_progress);
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+	*progress = desc.params.raw[0];
+	return status;
+}
+
+/**
+ * i40e_aq_get_lldp_mib
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @local_len : length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet).
+ **/
+i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+				u8 mib_type, void *buff, u16 buff_size,
+				u16 *local_len, u16 *remote_len,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_get_mib *cmd =
+		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+	struct i40e_aqc_lldp_get_mib *resp =
+		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+	i40e_status status;
+
+	if (buff_size == 0 || !buff)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
+	/* Indirect Command */
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+
+	cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+	cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+		       I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+	desc.datalen = CPU_TO_LE16(buff_size);
+
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
 	if (buff_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
 
 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
 	if (!status) {
 		if (local_len != NULL)
-			*local_len = le16_to_cpu(resp->local_len);
+			*local_len = LE16_TO_CPU(resp->local_len);
 		if (remote_len != NULL)
-			*remote_len = le16_to_cpu(resp->remote_len);
+			*remote_len = LE16_TO_CPU(resp->remote_len);
 	}
 
 	return status;
 }
 
+ /**
+ * i40e_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the hw struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB.
+ **/
+i40e_status i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+				u8 mib_type, void *buff, u16 buff_size,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_set_local_mib *cmd =
+		(struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
+	i40e_status status;
+
+	if (buff_size == 0 || !buff)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+				i40e_aqc_opc_lldp_set_local_mib);
+	/* Indirect Command */
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buff_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+	desc.datalen = CPU_TO_LE16(buff_size);
+
+	cmd->type = mib_type;
+	cmd->length = CPU_TO_LE16(buff_size);
+	cmd->address_high = CPU_TO_LE32(upper_32_bits((u64)buff));
+	cmd->address_low =  CPU_TO_LE32(lower_32_bits((u64)buff));
+
+	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+	return status;
+}
+
 /**
  * i40e_aq_cfg_lldp_mib_change_event
  * @hw: pointer to the hw struct
@@ -3537,15 +3982,55 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
 	return status;
 }
 
+/**
+ * i40e_aq_restore_lldp
+ * @hw: pointer to the hw struct
+ * @setting: pointer to factory setting variable or NULL
+ * @restore: True if factory settings should be restored
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Restore LLDP Agent factory settings if @restore set to True. In other case
+ * only returns factory setting in AQ response.
+ **/
+enum i40e_status_code
+i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+		     struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_restore *cmd =
+		(struct i40e_aqc_lldp_restore *)&desc.params.raw;
+	i40e_status status;
+
+	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
+		i40e_debug(hw, I40E_DEBUG_ALL,
+			   "Restore LLDP not supported by current FW version.\n");
+		return I40E_ERR_DEVICE_NOT_SUPPORTED;
+	}
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
+
+	if (restore)
+		cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (setting)
+		*setting = cmd->command & 1;
+
+	return status;
+}
+
 /**
  * i40e_aq_stop_lldp
  * @hw: pointer to the hw struct
  * @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @persist: True if stop of LLDP should be persistent across power cycles
  * @cmd_details: pointer to command details structure or NULL
  *
  * Stop or Shutdown the embedded LLDP Agent
  **/
 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+				bool persist,
 				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
@@ -3558,6 +4043,14 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
 	if (shutdown_agent)
 		cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
 
+	if (persist) {
+		if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+			cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
+		else
+			i40e_debug(hw, I40E_DEBUG_ALL,
+				   "Persistent Stop LLDP not supported by current FW version.\n");
+	}
+
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
 	return status;
@@ -3566,11 +4059,13 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
 /**
  * i40e_aq_start_lldp
  * @hw: pointer to the hw struct
+ * @persist: True if start of LLDP should be persistent across power cycles
  * @cmd_details: pointer to command details structure or NULL
  *
  * Start the embedded LLDP Agent on all ports.
  **/
 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
+				bool persist,
 				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
@@ -3582,6 +4077,45 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
 
 	cmd->command = I40E_AQ_LLDP_AGENT_START;
 
+	if (persist) {
+		if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+			cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
+		else
+			i40e_debug(hw, I40E_DEBUG_ALL,
+				   "Persistent Start LLDP not supported by current FW version.\n");
+	}
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_dcb_parameters
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ * @dcb_enable: True if DCB configuration needs to be applied
+ *
+ **/
+enum i40e_status_code
+i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
+			   struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_dcb_parameters *cmd =
+		(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
+	i40e_status status;
+
+	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+		return I40E_ERR_DEVICE_NOT_SUPPORTED;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_set_dcb_parameters);
+
+	if (dcb_enable) {
+		cmd->valid_flags = I40E_DCB_VALID;
+		cmd->command = I40E_AQ_DCB_SET_AGENT;
+	}
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
 	return status;
@@ -3597,8 +4131,8 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
  * Get CEE DCBX mode operational configuration from firmware
  **/
 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
-				       void *buff, u16 buff_size,
-				       struct i40e_asq_cmd_details *cmd_details)
+				void *buff, u16 buff_size,
+				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	i40e_status status;
@@ -3608,24 +4142,53 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
 
 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
 
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
 	status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
 				       cmd_details);
 
 	return status;
 }
 
+/**
+ * i40e_aq_start_stop_dcbx - Start/Stop DCBx service in FW
+ * @hw: pointer to the hw struct
+ * @start_agent: True if DCBx Agent needs to be Started
+ *				False if DCBx Agent needs to be Stopped
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start/Stop the embedded dcbx Agent
+ **/
+i40e_status i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
+				bool start_agent,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_stop_start_specific_agent *cmd =
+		(struct i40e_aqc_lldp_stop_start_specific_agent *)
+				&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+				i40e_aqc_opc_lldp_stop_start_spec_agent);
+
+	if (start_agent)
+		cmd->command = I40E_AQC_START_SPECIFIC_AGENT_MASK;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
 /**
  * i40e_aq_add_udp_tunnel
  * @hw: pointer to the hw struct
  * @udp_port: the UDP port to add in Host byte order
- * @header_len: length of the tunneling header length in DWords
  * @protocol_index: protocol index type
  * @filter_index: pointer to filter index
  * @cmd_details: pointer to command details structure or NULL
  *
  * Note: Firmware expects the udp_port value to be in Little Endian format,
- * and this function will call cpu_to_le16 to convert from Host byte order to
+ * and this function will call CPU_TO_LE16 to convert from Host byte order to
  * Little Endian order.
  **/
 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
@@ -3642,7 +4205,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
 
 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
 
-	cmd->udp_port = cpu_to_le16(udp_port);
+	cmd->udp_port = CPU_TO_LE16(udp_port);
 	cmd->protocol_type = protocol_index;
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -3676,6 +4239,45 @@ i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
 	return status;
 }
 
+/**
+ * i40e_aq_get_switch_resource_alloc (0x0204)
+ * @hw: pointer to the hw struct
+ * @num_entries: pointer to u8 to store the number of resource entries returned
+ * @buf: pointer to a user supplied buffer.  This buffer must be large enough
+ *        to store the resource information for all resource types.  Each
+ *        resource type is a i40e_aqc_switch_resource_alloc_data structure.
+ * @count: size, in bytes, of the buffer provided
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Query the resources allocated to a function.
+ **/
+i40e_status i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
+			u8 *num_entries,
+			struct i40e_aqc_switch_resource_alloc_element_resp *buf,
+			u16 count,
+			struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_switch_resource_alloc *cmd_resp =
+		(struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw;
+	i40e_status status;
+	u16 length = count * sizeof(*buf);
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					i40e_aqc_opc_get_switch_resource_alloc);
+
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	if (length > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
+
+	if (!status && num_entries)
+		*num_entries = cmd_resp->num_entries;
+
+	return status;
+}
+
 /**
  * i40e_aq_delete_element - Delete switch element
  * @hw: pointer to the hw struct
@@ -3697,9 +4299,9 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
 
 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
 
-	cmd->seid = cpu_to_le16(seid);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+	cmd->seid = CPU_TO_LE16(seid);
+	status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+					      cmd_details, true);
 
 	return status;
 }
@@ -3709,6 +4311,14 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
  * @hw: pointer to the hw struct
  * @cmd_details: pointer to command details structure or NULL
  *
+ * When LLDP is handled in PF this command is used by the PF
+ * to notify EMP that a DCB setting is modified.
+ * When LLDP is handled in EMP this command is used by the PF
+ * to notify EMP whenever one of the following parameters get
+ * modified:
+ *   - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA
+ *   - PCIRTT in PRTDCB_GENC.PCIRTT
+ *   - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME.
  * EMP will return when the shared RPB settings have been
  * recomputed and modified. The retval field in the descriptor
  * will be set to 0 when RPB is modified.
@@ -3772,15 +4382,15 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
 	i40e_fill_default_direct_cmd_desc(&desc, opcode);
 
 	/* Indirect command */
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
 	if (cmd_param_flag)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
 	if (buff_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
 
-	desc.datalen = cpu_to_le16(buff_size);
+	desc.datalen = CPU_TO_LE16(buff_size);
 
-	cmd->vsi_seid = cpu_to_le16(seid);
+	cmd->vsi_seid = CPU_TO_LE16(seid);
 
 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
 
@@ -3807,8 +4417,8 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
 	i40e_fill_default_direct_cmd_desc(&desc,
 					  i40e_aqc_opc_configure_vsi_bw_limit);
 
-	cmd->vsi_seid = cpu_to_le16(seid);
-	cmd->credit = cpu_to_le16(credit);
+	cmd->vsi_seid = CPU_TO_LE16(seid);
+	cmd->credit = CPU_TO_LE16(credit);
 	cmd->max_credit = max_credit;
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -3838,6 +4448,7 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
  * @hw: pointer to the hw struct
  * @seid: seid of the switching component connected to Physical Port
  * @ets_data: Buffer holding ETS parameters
+ * @opcode: Tx scheduler AQ command opcode
  * @cmd_details: pointer to command details structure or NULL
  **/
 i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
@@ -3961,7 +4572,7 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
  * The function checks for the valid filter/context sizes being
  * passed for FCoE and PE.
  *
- * Returns 0 if the values passed are valid and within
+ * Returns I40E_SUCCESS if the values passed are valid and within
  * range else returns an error.
  **/
 static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
@@ -3970,6 +4581,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
 	u32 fcoe_cntx_size, fcoe_filt_size;
 	u32 pe_cntx_size, pe_filt_size;
 	u32 fcoe_fmax;
+
 	u32 val;
 
 	/* Validate FCoE settings passed */
@@ -4044,7 +4656,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
 	if (fcoe_filt_size + fcoe_cntx_size >  fcoe_fmax)
 		return I40E_ERR_INVALID_SIZE;
 
-	return 0;
+	return I40E_SUCCESS;
 }
 
 /**
@@ -4059,7 +4671,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
 i40e_status i40e_set_filter_control(struct i40e_hw *hw,
 				struct i40e_filter_control_settings *settings)
 {
-	i40e_status ret = 0;
+	i40e_status ret = I40E_SUCCESS;
 	u32 hash_lut_size = 0;
 	u32 val;
 
@@ -4111,7 +4723,7 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
 
 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
 
-	return 0;
+	return I40E_SUCCESS;
 }
 
 /**
@@ -4151,7 +4763,7 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
 	if (is_add) {
 		i40e_fill_default_direct_cmd_desc(&desc,
 				i40e_aqc_opc_add_control_packet_filter);
-		cmd->queue = cpu_to_le16(queue);
+		cmd->queue = CPU_TO_LE16(queue);
 	} else {
 		i40e_fill_default_direct_cmd_desc(&desc,
 				i40e_aqc_opc_remove_control_packet_filter);
@@ -4160,17 +4772,17 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
 	if (mac_addr)
 		ether_addr_copy(cmd->mac, mac_addr);
 
-	cmd->etype = cpu_to_le16(ethtype);
-	cmd->flags = cpu_to_le16(flags);
-	cmd->seid = cpu_to_le16(vsi_seid);
+	cmd->etype = CPU_TO_LE16(ethtype);
+	cmd->flags = CPU_TO_LE16(flags);
+	cmd->seid = CPU_TO_LE16(vsi_seid);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
 	if (!status && stats) {
-		stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
-		stats->etype_used = le16_to_cpu(resp->etype_used);
-		stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
-		stats->etype_free = le16_to_cpu(resp->etype_free);
+		stats->mac_etype_used = LE16_TO_CPU(resp->mac_etype_used);
+		stats->etype_used = LE16_TO_CPU(resp->etype_used);
+		stats->mac_etype_free = LE16_TO_CPU(resp->mac_etype_free);
+		stats->etype_free = LE16_TO_CPU(resp->etype_free);
 	}
 
 	return status;
@@ -4181,10 +4793,10 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
  * @hw: pointer to the hw struct
  * @seid: VSI seid to add ethertype filter from
  **/
-#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
 						    u16 seid)
 {
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
 	u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
 		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
 		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
@@ -4199,89 +4811,361 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
 }
 
 /**
- * i40e_aq_alternate_read
+ * i40e_fix_up_geneve_vni - adjust Geneve VNI for HW issue
+ * @filters: list of cloud filters
+ * @filter_count: length of list
+ *
+ * There's an issue in the device where the Geneve VNI layout needs
+ * to be shifted 1 byte over from the VxLAN VNI
+ **/
+static void i40e_fix_up_geneve_vni(
+	struct i40e_aqc_cloud_filters_element_data *filters,
+	u8 filter_count)
+{
+	struct i40e_aqc_cloud_filters_element_data *f = filters;
+	int i;
+
+	for (i = 0; i < filter_count; i++) {
+		u16 tnl_type;
+		u32 ti;
+
+		tnl_type = (LE16_TO_CPU(f[i].flags) &
+			   I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+			   I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+		if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+			ti = LE32_TO_CPU(f[i].tenant_id);
+			f[i].tenant_id = CPU_TO_LE32(ti << 8);
+		}
+	}
+}
+
+/**
+ * i40e_aq_add_cloud_filters
  * @hw: pointer to the hardware structure
- * @reg_addr0: address of first dword to be read
- * @reg_val0: pointer for data read from 'reg_addr0'
- * @reg_addr1: address of second dword to be read
- * @reg_val1: pointer for data read from 'reg_addr1'
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters to be added
+ * @filter_count: number of filters contained in the buffer
  *
- * Read one or two dwords from alternate structure. Fields are indicated
- * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
- * is not passed then only register at 'reg_addr0' is read.
+ * Set the cloud filters for a given VSI.  The contents of the
+ * i40e_aqc_cloud_filters_element_data are filled
+ * in by the caller of the function.
  *
  **/
-static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
-					  u32 reg_addr0, u32 *reg_val0,
-					  u32 reg_addr1, u32 *reg_val1)
+i40e_status i40e_aq_add_cloud_filters(struct i40e_hw *hw,
+	u16 seid,
+	struct i40e_aqc_cloud_filters_element_data *filters,
+	u8 filter_count)
 {
 	struct i40e_aq_desc desc;
-	struct i40e_aqc_alternate_write *cmd_resp =
-		(struct i40e_aqc_alternate_write *)&desc.params.raw;
+	struct i40e_aqc_add_remove_cloud_filters *cmd =
+	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
 	i40e_status status;
+	u16 buff_len;
 
-	if (!reg_val0)
-		return I40E_ERR_PARAM;
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_add_cloud_filters);
 
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
-	cmd_resp->address0 = cpu_to_le32(reg_addr0);
-	cmd_resp->address1 = cpu_to_le32(reg_addr1);
+	buff_len = filter_count * sizeof(*filters);
+	desc.datalen = CPU_TO_LE16(buff_len);
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	cmd->num_filters = filter_count;
+	cmd->seid = CPU_TO_LE16(seid);
 
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+	i40e_fix_up_geneve_vni(filters, filter_count);
 
-	if (!status) {
-		*reg_val0 = le32_to_cpu(cmd_resp->data0);
+	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+	return status;
+}
+
+/**
+ * i40e_aq_add_cloud_filters_bb
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the cloud filters for a given VSI.  The contents of the
+ * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
+ * the function.
+ *
+ **/
+enum i40e_status_code
+i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+			     struct i40e_aqc_cloud_filters_element_bb *filters,
+			     u8 filter_count)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_remove_cloud_filters *cmd =
+	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+	i40e_status status;
+	u16 buff_len;
+	int i;
 
-		if (reg_val1)
-			*reg_val1 = le32_to_cpu(cmd_resp->data1);
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_add_cloud_filters);
+
+	buff_len = filter_count * sizeof(*filters);
+	desc.datalen = CPU_TO_LE16(buff_len);
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	cmd->num_filters = filter_count;
+	cmd->seid = CPU_TO_LE16(seid);
+	cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
+
+	for (i = 0; i < filter_count; i++) {
+		u16 tnl_type;
+		u32 ti;
+
+		tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
+			   I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+			   I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+
+		/* Due to hardware eccentricities, the VNI for Geneve is shifted
+		 * one more byte further than normally used for Tenant ID in
+		 * other tunnel types.
+		 */
+		if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+			ti = LE32_TO_CPU(filters[i].element.tenant_id);
+			filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
+		}
 	}
 
+	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
 	return status;
 }
 
 /**
- * i40e_aq_resume_port_tx
+ * i40e_aq_rem_cloud_filters
  * @hw: pointer to the hardware structure
- * @cmd_details: pointer to command details structure or NULL
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the cloud filters for a given VSI.  The contents of the
+ * i40e_aqc_cloud_filters_element_data are filled in by the caller
+ * of the function.
  *
- * Resume port's Tx traffic
  **/
-i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
-				   struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code
+i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
+			  struct i40e_aqc_cloud_filters_element_data *filters,
+			  u8 filter_count)
 {
 	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_remove_cloud_filters *cmd =
+	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
 	i40e_status status;
+	u16 buff_len;
 
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_remove_cloud_filters);
 
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+	buff_len = filter_count * sizeof(*filters);
+	desc.datalen = CPU_TO_LE16(buff_len);
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	cmd->num_filters = filter_count;
+	cmd->seid = CPU_TO_LE16(seid);
+
+	i40e_fix_up_geneve_vni(filters, filter_count);
+
+	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
 
 	return status;
 }
 
 /**
- * i40e_set_pci_config_data - store PCI bus info
- * @hw: pointer to hardware structure
- * @link_status: the link status word from PCI config space
+ * i40e_aq_rem_cloud_filters_bb
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the big buffer cloud filters for a given VSI.  The contents of the
+ * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
+ * function.
  *
- * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
  **/
-void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+enum i40e_status_code
+i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+			     struct i40e_aqc_cloud_filters_element_bb *filters,
+			     u8 filter_count)
 {
-	hw->bus.type = i40e_bus_type_pci_express;
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_remove_cloud_filters *cmd =
+	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+	i40e_status status;
+	u16 buff_len;
+	int i;
 
-	switch (link_status & PCI_EXP_LNKSTA_NLW) {
-	case PCI_EXP_LNKSTA_NLW_X1:
-		hw->bus.width = i40e_bus_width_pcie_x1;
-		break;
-	case PCI_EXP_LNKSTA_NLW_X2:
-		hw->bus.width = i40e_bus_width_pcie_x2;
-		break;
-	case PCI_EXP_LNKSTA_NLW_X4:
-		hw->bus.width = i40e_bus_width_pcie_x4;
-		break;
-	case PCI_EXP_LNKSTA_NLW_X8:
-		hw->bus.width = i40e_bus_width_pcie_x8;
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_remove_cloud_filters);
+
+	buff_len = filter_count * sizeof(*filters);
+	desc.datalen = CPU_TO_LE16(buff_len);
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	cmd->num_filters = filter_count;
+	cmd->seid = CPU_TO_LE16(seid);
+	cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
+
+	for (i = 0; i < filter_count; i++) {
+		u16 tnl_type;
+		u32 ti;
+
+		tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
+			   I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+			   I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+
+		/* Due to hardware eccentricities, the VNI for Geneve is shifted
+		 * one more byte further than normally used for Tenant ID in
+		 * other tunnel types.
+		 */
+		if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+			ti = LE32_TO_CPU(filters[i].element.tenant_id);
+			filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
+		}
+	}
+
+	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+	return status;
+}
+
+/**
+ * i40e_aq_replace_cloud_filters - Replace cloud filter command
+ * @hw: pointer to the hw struct
+ * @filters: pointer to the i40e_aqc_replace_cloud_filter_cmd struct
+ * @cmd_buf: pointer to the i40e_aqc_replace_cloud_filter_cmd_buf struct
+ *
+ **/
+enum
+i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
+	struct i40e_aqc_replace_cloud_filters_cmd *filters,
+	struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_replace_cloud_filters_cmd *cmd =
+		(struct i40e_aqc_replace_cloud_filters_cmd *)&desc.params.raw;
+	i40e_status status = I40E_SUCCESS;
+	int i = 0;
+
+	/* X722 doesn't support this command */
+	if (hw->mac.type == I40E_MAC_X722)
+		return I40E_ERR_DEVICE_NOT_SUPPORTED;
+
+	/* need FW version greater than 6.00 */
+	if (hw->aq.fw_maj_ver < 6)
+		return I40E_NOT_SUPPORTED;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_replace_cloud_filters);
+
+	desc.datalen = CPU_TO_LE16(32);
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	cmd->old_filter_type = filters->old_filter_type;
+	cmd->new_filter_type = filters->new_filter_type;
+	cmd->valid_flags = filters->valid_flags;
+	cmd->tr_bit = filters->tr_bit;
+	cmd->tr_bit2 = filters->tr_bit2;
+
+	status = i40e_asq_send_command(hw, &desc, cmd_buf,
+		sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf),  NULL);
+
+	/* for get cloud filters command */
+	for (i = 0; i < 32; i += 4) {
+		cmd_buf->filters[i / 4].filter_type = cmd_buf->data[i];
+		cmd_buf->filters[i / 4].input[0] = cmd_buf->data[i + 1];
+		cmd_buf->filters[i / 4].input[1] = cmd_buf->data[i + 2];
+		cmd_buf->filters[i / 4].input[2] = cmd_buf->data[i + 3];
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_alternate_read
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
+ * is not passed then only register at 'reg_addr0' is read.
+ *
+ **/
+i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
+				u32 reg_addr0, u32 *reg_val0,
+				u32 reg_addr1, u32 *reg_val1)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_alternate_write *cmd_resp =
+		(struct i40e_aqc_alternate_write *)&desc.params.raw;
+	i40e_status status;
+
+	if (reg_val0 == NULL)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+	cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
+	cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+	if (status == I40E_SUCCESS) {
+		*reg_val0 = LE32_TO_CPU(cmd_resp->data0);
+
+		if (reg_val1 != NULL)
+			*reg_val1 = LE32_TO_CPU(cmd_resp->data1);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_resume_port_tx
+ * @hw: pointer to the hardware structure
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Resume port's Tx traffic
+ **/
+i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_set_pci_config_data - store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status word from PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
+ **/
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+{
+	hw->bus.type = i40e_bus_type_pci_express;
+
+	switch (link_status & PCI_EXP_LNKSTA_NLW) {
+	case PCI_EXP_LNKSTA_NLW_X1:
+		hw->bus.width = i40e_bus_width_pcie_x1;
+		break;
+	case PCI_EXP_LNKSTA_NLW_X2:
+		hw->bus.width = i40e_bus_width_pcie_x2;
+		break;
+	case PCI_EXP_LNKSTA_NLW_X4:
+		hw->bus.width = i40e_bus_width_pcie_x4;
+		break;
+	case PCI_EXP_LNKSTA_NLW_X8:
+		hw->bus.width = i40e_bus_width_pcie_x8;
 		break;
 	default:
 		hw->bus.width = i40e_bus_width_unknown;
@@ -4315,15 +5199,16 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
  * @ret_buff_size: actual buffer size returned
  * @ret_next_table: next block to read
  * @ret_next_index: next index to read
+ * @cmd_details: pointer to command details structure or NULL
  *
  * Dump internal FW/HW data for debug purposes.
  *
  **/
 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
-			       u8 table_id, u32 start_index, u16 buff_size,
-			       void *buff, u16 *ret_buff_size,
-			       u8 *ret_next_table, u32 *ret_next_index,
-			       struct i40e_asq_cmd_details *cmd_details)
+				u8 table_id, u32 start_index, u16 buff_size,
+				void *buff, u16 *ret_buff_size,
+				u8 *ret_next_table, u32 *ret_next_index,
+				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_debug_dump_internals *cmd =
@@ -4338,24 +5223,24 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
 	i40e_fill_default_direct_cmd_desc(&desc,
 					  i40e_aqc_opc_debug_dump_internals);
 	/* Indirect Command */
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
 	if (buff_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
 
 	cmd->cluster_id = cluster_id;
 	cmd->table_id = table_id;
-	cmd->idx = cpu_to_le32(start_index);
+	cmd->idx = CPU_TO_LE32(start_index);
 
-	desc.datalen = cpu_to_le16(buff_size);
+	desc.datalen = CPU_TO_LE16(buff_size);
 
 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
 	if (!status) {
-		if (ret_buff_size)
-			*ret_buff_size = le16_to_cpu(desc.datalen);
-		if (ret_next_table)
+		if (ret_buff_size != NULL)
+			*ret_buff_size = LE16_TO_CPU(desc.datalen);
+		if (ret_next_table != NULL)
 			*ret_next_table = resp->table_id;
-		if (ret_next_index)
-			*ret_next_index = le32_to_cpu(resp->idx);
+		if (ret_next_index != NULL)
+			*ret_next_index = LE32_TO_CPU(resp->idx);
 	}
 
 	return status;
@@ -4372,8 +5257,8 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
  * Read bw from the alternate ram for the given pf
  **/
 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
-				      u32 *max_bw, u32 *min_bw,
-				      bool *min_valid, bool *max_valid)
+					u32 *max_bw, u32 *min_bw,
+					bool *min_valid, bool *max_valid)
 {
 	i40e_status status;
 	u32 max_bw_addr, min_bw_addr;
@@ -4420,19 +5305,15 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
 	u16 bwd_size = sizeof(*bw_data);
 
 	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_configure_partition_bw);
+				i40e_aqc_opc_configure_partition_bw);
 
 	/* Indirect command */
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
 
-	if (bwd_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+	desc.datalen = CPU_TO_LE16(bwd_size);
 
-	desc.datalen = cpu_to_le16(bwd_size);
-
-	status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
-				       cmd_details);
+	status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
 
 	return status;
 }
@@ -4441,13 +5322,13 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
  * i40e_read_phy_register_clause22
  * @hw: pointer to the HW structure
  * @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
  * @value: PHY register value
  *
  * Reads specified PHY register value
  **/
 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
-					    u16 reg, u8 phy_addr, u16 *value)
+					u16 reg, u8 phy_addr, u16 *value)
 {
 	i40e_status status = I40E_ERR_TIMEOUT;
 	u8 port_num = (u8)hw->func_caps.mdio_port_num;
@@ -4463,7 +5344,7 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
 	do {
 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
-			status = 0;
+			status = I40E_SUCCESS;
 			break;
 		}
 		udelay(10);
@@ -4486,13 +5367,13 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
  * i40e_write_phy_register_clause22
  * @hw: pointer to the HW structure
  * @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
  * @value: PHY register value
  *
  * Writes specified PHY register value
  **/
 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
-					     u16 reg, u8 phy_addr, u16 value)
+					u16 reg, u8 phy_addr, u16 value)
 {
 	i40e_status status = I40E_ERR_TIMEOUT;
 	u8 port_num = (u8)hw->func_caps.mdio_port_num;
@@ -4512,7 +5393,7 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
 	do {
 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
-			status = 0;
+			status = I40E_SUCCESS;
 			break;
 		}
 		udelay(10);
@@ -4527,7 +5408,7 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
  * @hw: pointer to the HW structure
  * @page: registers page number
  * @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
  * @value: PHY register value
  *
  * Reads specified PHY register value
@@ -4536,9 +5417,9 @@ i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
 				u8 page, u16 reg, u8 phy_addr, u16 *value)
 {
 	i40e_status status = I40E_ERR_TIMEOUT;
-	u32 command = 0;
+	u32 command  = 0;
 	u16 retry = 1000;
-	u8 port_num = hw->func_caps.mdio_port_num;
+	u8 port_num = (u8)hw->func_caps.mdio_port_num;
 
 	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
 		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
@@ -4551,10 +5432,10 @@ i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
 	do {
 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
-			status = 0;
+			status = I40E_SUCCESS;
 			break;
 		}
-		usleep_range(10, 20);
+		udelay(10);
 		retry--;
 	} while (retry);
 
@@ -4576,10 +5457,10 @@ i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
 	do {
 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
-			status = 0;
+			status = I40E_SUCCESS;
 			break;
 		}
-		usleep_range(10, 20);
+		udelay(10);
 		retry--;
 	} while (retry);
 
@@ -4601,7 +5482,7 @@ phy_read_end:
  * @hw: pointer to the HW structure
  * @page: registers page number
  * @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
  * @value: PHY register value
  *
  * Writes value to specified PHY register
@@ -4610,9 +5491,9 @@ i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
 				u8 page, u16 reg, u8 phy_addr, u16 value)
 {
 	i40e_status status = I40E_ERR_TIMEOUT;
-	u32 command = 0;
+	u32 command  = 0;
 	u16 retry = 1000;
-	u8 port_num = hw->func_caps.mdio_port_num;
+	u8 port_num = (u8)hw->func_caps.mdio_port_num;
 
 	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
 		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
@@ -4625,10 +5506,10 @@ i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
 	do {
 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
-			status = 0;
+			status = I40E_SUCCESS;
 			break;
 		}
-		usleep_range(10, 20);
+		udelay(10);
 		retry--;
 	} while (retry);
 	if (status) {
@@ -4652,10 +5533,10 @@ i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
 	do {
 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
-			status = 0;
+			status = I40E_SUCCESS;
 			break;
 		}
-		usleep_range(10, 20);
+		udelay(10);
 		retry--;
 	} while (retry);
 
@@ -4668,28 +5549,30 @@ phy_write_end:
  * @hw: pointer to the HW structure
  * @page: registers page number
  * @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
  * @value: PHY register value
  *
  * Writes value to specified PHY register
  **/
 i40e_status i40e_write_phy_register(struct i40e_hw *hw,
-				    u8 page, u16 reg, u8 phy_addr, u16 value)
+				u8 page, u16 reg, u8 phy_addr, u16 value)
 {
 	i40e_status status;
 
 	switch (hw->device_id) {
 	case I40E_DEV_ID_1G_BASE_T_X722:
-		status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
-							  value);
+		status = i40e_write_phy_register_clause22(hw,
+			reg, phy_addr, value);
 		break;
 	case I40E_DEV_ID_10G_BASE_T:
 	case I40E_DEV_ID_10G_BASE_T4:
+	case I40E_DEV_ID_10G_BASE_T_BC:
+	case I40E_DEV_ID_5G_BASE_T_BC:
 	case I40E_DEV_ID_10G_BASE_T_X722:
 	case I40E_DEV_ID_25G_B:
 	case I40E_DEV_ID_25G_SFP28:
-		status = i40e_write_phy_register_clause45(hw, page, reg,
-							  phy_addr, value);
+		status = i40e_write_phy_register_clause45(hw,
+			page, reg, phy_addr, value);
 		break;
 	default:
 		status = I40E_ERR_UNKNOWN_PHY;
@@ -4704,13 +5587,13 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
  * @hw: pointer to the HW structure
  * @page: registers page number
  * @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
  * @value: PHY register value
  *
  * Reads specified PHY register value
  **/
 i40e_status i40e_read_phy_register(struct i40e_hw *hw,
-				   u8 page, u16 reg, u8 phy_addr, u16 *value)
+				u8 page, u16 reg, u8 phy_addr, u16 *value)
 {
 	i40e_status status;
 
@@ -4721,6 +5604,8 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
 		break;
 	case I40E_DEV_ID_10G_BASE_T:
 	case I40E_DEV_ID_10G_BASE_T4:
+	case I40E_DEV_ID_10G_BASE_T_BC:
+	case I40E_DEV_ID_5G_BASE_T_BC:
 	case I40E_DEV_ID_10G_BASE_T_X722:
 	case I40E_DEV_ID_25G_B:
 	case I40E_DEV_ID_25G_SFP28:
@@ -4739,13 +5624,12 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
  * i40e_get_phy_address
  * @hw: pointer to the HW structure
  * @dev_num: PHY port num that address we want
- * @phy_addr: Returned PHY address
  *
  * Gets PHY address for current port
  **/
 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
 {
-	u8 port_num = hw->func_caps.mdio_port_num;
+	u8 port_num = (u8)hw->func_caps.mdio_port_num;
 	u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
 
 	return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
@@ -4760,11 +5644,11 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
  * Blinks PHY link LED
  **/
 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
-				    u32 time, u32 interval)
+					      u32 time, u32 interval)
 {
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 	u32 i;
-	u16 led_ctl;
+	u16 led_ctl = 0;
 	u16 gpio_led_port;
 	u16 led_reg;
 	u16 led_addr = I40E_PHY_LED_PROV_REG_1;
@@ -4825,6 +5709,64 @@ phy_blinking_end:
 	return status;
 }
 
+/**
+ * i40e_led_get_reg - read LED register
+ * @hw: pointer to the HW structure
+ * @led_addr: LED register address
+ * @reg_val: read register value
+ **/
+static i40e_status i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+					      u32 *reg_val)
+{
+	i40e_status status;
+	u8 phy_addr = 0;
+
+	*reg_val = 0;
+	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+		status = i40e_aq_get_phy_register(hw,
+						I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+						I40E_PHY_COM_REG_PAGE, true,
+						I40E_PHY_LED_PROV_REG_1,
+						reg_val, NULL);
+	} else {
+		phy_addr = i40e_get_phy_address(hw, hw->port);
+		status = i40e_read_phy_register_clause45(hw,
+							 I40E_PHY_COM_REG_PAGE,
+							 led_addr, phy_addr,
+							 (u16 *)reg_val);
+	}
+	return status;
+}
+
+/**
+ * i40e_led_set_reg - write LED register
+ * @hw: pointer to the HW structure
+ * @led_addr: LED register address
+ * @reg_val: register value to write
+ **/
+static i40e_status i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+					      u32 reg_val)
+{
+	i40e_status status;
+	u8 phy_addr = 0;
+
+	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+		status = i40e_aq_set_phy_register(hw,
+						I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+						I40E_PHY_COM_REG_PAGE, true,
+						I40E_PHY_LED_PROV_REG_1,
+						reg_val, NULL);
+	} else {
+		phy_addr = i40e_get_phy_address(hw, hw->port);
+		status = i40e_write_phy_register_clause45(hw,
+							  I40E_PHY_COM_REG_PAGE,
+							  led_addr, phy_addr,
+							  (u16)reg_val);
+	}
+
+	return status;
+}
+
 /**
  * i40e_led_get_phy - return current on/off mode
  * @hw: pointer to the hw struct
@@ -4833,21 +5775,27 @@ phy_blinking_end:
  *
  **/
 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
-			     u16 *val)
+				       u16 *val)
 {
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 	u16 gpio_led_port;
+	u32 reg_val_aq;
+	u16 temp_addr;
 	u8 phy_addr = 0;
 	u16 reg_val;
-	u16 temp_addr;
-	u8 port_num;
-	u32 i;
 
+	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+		status = i40e_aq_get_phy_register(hw,
+						I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+						I40E_PHY_COM_REG_PAGE, true,
+						I40E_PHY_LED_PROV_REG_1,
+						&reg_val_aq, NULL);
+		if (status == I40E_SUCCESS)
+			*val = (u16)reg_val_aq;
+		return status;
+	}
 	temp_addr = I40E_PHY_LED_PROV_REG_1;
-	i = rd32(hw, I40E_PFGEN_PORTNUM);
-	port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
-	phy_addr = i40e_get_phy_address(hw, port_num);
-
+	phy_addr = i40e_get_phy_address(hw, hw->port);
 	for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
 	     temp_addr++) {
 		status = i40e_read_phy_register_clause45(hw,
@@ -4869,62 +5817,240 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
  * i40e_led_set_phy
  * @hw: pointer to the HW structure
  * @on: true or false
+ * @led_addr: address of led register to use
  * @mode: original val plus bit for set or ignore
+ *
  * Set led's on or off when controlled by the PHY
  *
  **/
 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
-			     u16 led_addr, u32 mode)
+				       u16 led_addr, u32 mode)
 {
-	i40e_status status = 0;
-	u16 led_ctl = 0;
-	u16 led_reg = 0;
-	u8 phy_addr = 0;
-	u8 port_num;
-	u32 i;
+	i40e_status status = I40E_SUCCESS;
+	u32 led_ctl = 0;
+	u32 led_reg = 0;
 
-	i = rd32(hw, I40E_PFGEN_PORTNUM);
-	port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
-	phy_addr = i40e_get_phy_address(hw, port_num);
-	status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
-						 led_addr, phy_addr, &led_reg);
+	status = i40e_led_get_reg(hw, led_addr, &led_reg);
 	if (status)
 		return status;
 	led_ctl = led_reg;
 	if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
 		led_reg = 0;
-		status = i40e_write_phy_register_clause45(hw,
-							  I40E_PHY_COM_REG_PAGE,
-							  led_addr, phy_addr,
-							  led_reg);
+		status = i40e_led_set_reg(hw, led_addr, led_reg);
 		if (status)
 			return status;
 	}
-	status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
-						 led_addr, phy_addr, &led_reg);
+	status = i40e_led_get_reg(hw, led_addr, &led_reg);
 	if (status)
 		goto restore_config;
 	if (on)
 		led_reg = I40E_PHY_LED_MANUAL_ON;
 	else
 		led_reg = 0;
-	status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
-						  led_addr, phy_addr, led_reg);
+	status = i40e_led_set_reg(hw, led_addr, led_reg);
 	if (status)
 		goto restore_config;
 	if (mode & I40E_PHY_LED_MODE_ORIG) {
 		led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
-		status = i40e_write_phy_register_clause45(hw,
-						 I40E_PHY_COM_REG_PAGE,
-						 led_addr, phy_addr, led_ctl);
+		status = i40e_led_set_reg(hw, led_addr, led_ctl);
 	}
 	return status;
+
 restore_config:
-	status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
-						  led_addr, phy_addr, led_ctl);
+	status = i40e_led_set_reg(hw, led_addr, led_ctl);
 	return status;
 }
 
+/**
+ * i40e_get_phy_lpi_status - read LPI status from PHY or MAC register
+ * @hw: pointer to the hw struct
+ * @stat: pointer to structure with status of rx and tx lpi
+ *
+ * Read LPI state directly from external PHY register or from MAC
+ * register, depending on device ID and current link speed.
+ */
+i40e_status i40e_get_phy_lpi_status(struct i40e_hw *hw,
+					      struct i40e_hw_port_stats *stat)
+{
+	i40e_status ret = I40E_SUCCESS;
+	u32 val;
+
+	stat->rx_lpi_status = 0;
+	stat->tx_lpi_status = 0;
+
+	if ((hw->device_id == I40E_DEV_ID_10G_BASE_T_BC ||
+	     hw->device_id == I40E_DEV_ID_5G_BASE_T_BC) &&
+	    (hw->phy.link_info.link_speed == I40E_LINK_SPEED_2_5GB ||
+	     hw->phy.link_info.link_speed == I40E_LINK_SPEED_5GB)) {
+		ret = i40e_aq_get_phy_register(hw,
+					       I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+					       I40E_BCM_PHY_PCS_STATUS1_PAGE,
+					       true,
+					       I40E_BCM_PHY_PCS_STATUS1_REG,
+					       &val, NULL);
+
+		if (ret != I40E_SUCCESS)
+			return ret;
+
+		stat->rx_lpi_status = !!(val & I40E_BCM_PHY_PCS_STATUS1_RX_LPI);
+		stat->tx_lpi_status = !!(val & I40E_BCM_PHY_PCS_STATUS1_TX_LPI);
+
+		return ret;
+	}
+
+	val = rd32(hw, I40E_PRTPM_EEE_STAT);
+	stat->rx_lpi_status = (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
+			       I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
+	stat->tx_lpi_status = (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
+			       I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
+
+	return ret;
+}
+
+/**
+ * i40e_get_lpi_counters - read LPI counters from EEE statistics
+ * @hw: pointer to the hw struct
+ * @tx_counter: pointer to memory for TX LPI counter
+ * @rx_counter: pointer to memory for RX LPI counter
+ * @is_clear:   returns true if counters are clear after read
+ *
+ * Read Low Power Idle (LPI) mode counters from Energy Efficient
+ * Ethernet (EEE) statistics.
+ **/
+i40e_status i40e_get_lpi_counters(struct i40e_hw *hw,
+					    u32 *tx_counter, u32 *rx_counter,
+					    bool *is_clear)
+{
+	/* only X710-T*L requires special handling of counters
+	 * for other devices we just read the MAC registers
+	 */
+	if ((hw->device_id == I40E_DEV_ID_10G_BASE_T_BC ||
+	     hw->device_id == I40E_DEV_ID_5G_BASE_T_BC) &&
+	     hw->phy.link_info.link_speed != I40E_LINK_SPEED_1GB) {
+		i40e_status retval;
+		u32 cmd_status;
+
+		*is_clear = false;
+		retval = i40e_aq_run_phy_activity(hw,
+				I40E_AQ_RUN_PHY_ACT_ID_USR_DFND,
+				I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT,
+				&cmd_status, tx_counter, rx_counter, NULL);
+
+		if (cmd_status != I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC)
+			retval = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+		return retval;
+	}
+
+	*is_clear = true;
+	*tx_counter = rd32(hw, I40E_PRTPM_TLPIC);
+	*rx_counter = rd32(hw, I40E_PRTPM_RLPIC);
+
+	return I40E_SUCCESS;
+}
+
+/**
+ * i40e_get_lpi_duration - read LPI time duration from EEE statistics
+ * @hw: pointer to the hw struct
+ * @stat: pointer to structure with status of rx and tx lpi
+ * @tx_duration: pointer to memory for TX LPI time duration
+ * @rx_duration: pointer to memory for RX LPI time duration
+ *
+ * Read Low Power Idle (LPI) mode time duration from Energy Efficient
+ * Ethernet (EEE) statistics.
+ */
+i40e_status i40e_get_lpi_duration(struct i40e_hw *hw,
+					    struct i40e_hw_port_stats *stat,
+					    u64 *tx_duration, u64 *rx_duration)
+{
+	u32 tx_time_dur, rx_time_dur;
+	i40e_status retval;
+	u32 cmd_status;
+
+	if (hw->device_id != I40E_DEV_ID_10G_BASE_T_BC &&
+	    hw->device_id != I40E_DEV_ID_5G_BASE_T_BC)
+		return I40E_ERR_NOT_IMPLEMENTED;
+
+	retval = i40e_aq_run_phy_activity
+		(hw, I40E_AQ_RUN_PHY_ACT_ID_USR_DFND,
+		I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_DUR,
+		&cmd_status, &tx_time_dur, &rx_time_dur, NULL);
+
+	if (retval)
+		return retval;
+	if ((cmd_status & I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK) !=
+	    I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC)
+		return I40E_ERR_ADMIN_QUEUE_ERROR;
+
+	if (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB &&
+	    !tx_time_dur && !rx_time_dur &&
+	    stat->tx_lpi_status && stat->rx_lpi_status) {
+		retval = i40e_aq_run_phy_activity
+			(hw, I40E_AQ_RUN_PHY_ACT_ID_USR_DFND,
+			I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT_DUR,
+			&cmd_status,
+			&tx_time_dur, &rx_time_dur, NULL);
+
+		if (retval)
+			return retval;
+		if ((cmd_status & I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK) !=
+		    I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC)
+			return I40E_ERR_ADMIN_QUEUE_ERROR;
+		tx_time_dur = 0;
+		rx_time_dur = 0;
+	}
+
+	*tx_duration = tx_time_dur;
+	*rx_duration = rx_time_dur;
+
+	return retval;
+}
+
+/**
+ * i40e_lpi_stat_update - update LPI counters with values relative to offset
+ * @hw: pointer to the hw struct
+ * @offset_loaded: flag indicating need of writing current value to offset
+ * @tx_offset: pointer to offset of TX LPI counter
+ * @tx_stat: pointer to value of TX LPI counter
+ * @rx_offset: pointer to offset of RX LPI counter
+ * @rx_stat: pointer to value of RX LPI counter
+ *
+ * Update Low Power Idle (LPI) mode counters while having regard to passed
+ * offsets.
+ **/
+i40e_status i40e_lpi_stat_update(struct i40e_hw *hw,
+					   bool offset_loaded, u64 *tx_offset,
+					   u64 *tx_stat, u64 *rx_offset,
+					   u64 *rx_stat)
+{
+	i40e_status retval;
+	u32 tx_counter, rx_counter;
+	bool is_clear;
+
+	retval = i40e_get_lpi_counters(hw, &tx_counter, &rx_counter, &is_clear);
+	if (retval)
+		goto err;
+
+	if (is_clear) {
+		*tx_stat += tx_counter;
+		*rx_stat += rx_counter;
+	} else {
+		if (!offset_loaded) {
+			*tx_offset = tx_counter;
+			*rx_offset = rx_counter;
+		}
+
+		*tx_stat = (tx_counter >= *tx_offset) ?
+			(u32)(tx_counter - *tx_offset) :
+			(u32)((tx_counter + BIT_ULL(32)) - *tx_offset);
+		*rx_stat = (rx_counter >= *rx_offset) ?
+			(u32)(rx_counter - *rx_offset) :
+			(u32)((rx_counter + BIT_ULL(32)) - *rx_offset);
+	}
+err:
+	return retval;
+}
+
 /**
  * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
  * @hw: pointer to the hw struct
@@ -4944,17 +6070,17 @@ i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
 		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
 	i40e_status status;
 
-	if (!reg_val)
+	if (reg_val == NULL)
 		return I40E_ERR_PARAM;
 
 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
 
-	cmd_resp->address = cpu_to_le32(reg_addr);
+	cmd_resp->address = CPU_TO_LE32(reg_addr);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
-	if (status == 0)
-		*reg_val = le32_to_cpu(cmd_resp->value);
+	if (status == I40E_SUCCESS)
+		*reg_val = LE32_TO_CPU(cmd_resp->value);
 
 	return status;
 }
@@ -4966,7 +6092,7 @@ i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
  **/
 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
 {
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 	bool use_register;
 	int retry = 5;
 	u32 val = 0;
@@ -5012,10 +6138,11 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
 
 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
 
-	cmd->address = cpu_to_le32(reg_addr);
-	cmd->value = cpu_to_le32(reg_val);
+	cmd->address = CPU_TO_LE32(reg_addr);
+	cmd->value = CPU_TO_LE32(reg_val);
 
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+	status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+					      cmd_details, true);
 
 	return status;
 }
@@ -5028,7 +6155,7 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
  **/
 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
 {
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 	bool use_register;
 	int retry = 5;
 
@@ -5052,7 +6179,358 @@ do_retry:
 }
 
 /**
- * i40e_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * i40e_mdio_if_number_selection - MDIO I/F number selection
+ * @hw: pointer to the hw struct
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
+ * @cmd: pointer to PHY Register command structure
+ **/
+static void
+i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, u8 mdio_num,
+			      struct i40e_aqc_phy_register_access *cmd)
+{
+	if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
+		if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
+			cmd->cmd_flags |=
+				I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
+				((mdio_num <<
+				I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
+				I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
+		else
+			i40e_debug(hw, I40E_DEBUG_PHY,
+				   "MDIO I/F number selection not supported by current FW version.\n");
+	}
+}
+
+/**
+ * i40e_aq_set_phy_register_ext
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @page_change: enable auto page change
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
+ * @reg_addr: PHY register address
+ * @reg_val: new register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write the external PHY register.
+ * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+ * may use simple wrapper i40e_aq_set_phy_register.
+ **/
+enum i40e_status_code
+i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+			     u8 phy_select, u8 dev_addr, bool page_change,
+			     bool set_mdio, u8 mdio_num,
+			     u32 reg_addr, u32 reg_val,
+			     struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_phy_register_access *cmd =
+		(struct i40e_aqc_phy_register_access *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_set_phy_register);
+
+	cmd->phy_interface = phy_select;
+	cmd->dev_addres = dev_addr;
+	cmd->reg_address = CPU_TO_LE32(reg_addr);
+	cmd->reg_value = CPU_TO_LE32(reg_val);
+
+	if (!page_change)
+		cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
+	i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_phy_register_ext
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @page_change: enable auto page change
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
+ * @reg_addr: PHY register address
+ * @reg_val: read register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the external PHY register.
+ * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+ * may use simple wrapper i40e_aq_get_phy_register.
+ **/
+enum i40e_status_code
+i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+			     u8 phy_select, u8 dev_addr, bool page_change,
+			     bool set_mdio, u8 mdio_num,
+			     u32 reg_addr, u32 *reg_val,
+			     struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_phy_register_access *cmd =
+		(struct i40e_aqc_phy_register_access *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_get_phy_register);
+
+	cmd->phy_interface = phy_select;
+	cmd->dev_addres = dev_addr;
+	cmd->reg_address = CPU_TO_LE32(reg_addr);
+
+	if (!page_change)
+		cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
+	i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+	if (!status)
+		*reg_val = LE32_TO_CPU(cmd->reg_value);
+
+	return status;
+}
+
+/**
+ * i40e_aq_run_phy_activity
+ * @hw: pointer to the hw struct
+ * @activity_id: ID of DNL activity to run
+ * @dnl_opcode: opcode passed to DNL script
+ * @cmd_status: pointer to memory to write return value of DNL script
+ * @data0: pointer to memory for first 4 bytes of data returned by DNL script
+ * @data1: pointer to memory for last 4 bytes of data returned by DNL script
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Run DNL admin command.
+ **/
+enum i40e_status_code
+i40e_aq_run_phy_activity(struct i40e_hw *hw, u16 activity_id, u32 dnl_opcode,
+			 u32 *cmd_status, u32 *data0, u32 *data1,
+			 struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aqc_run_phy_activity *cmd;
+	i40e_status retval;
+	struct i40e_aq_desc desc;
+
+	cmd = (struct i40e_aqc_run_phy_activity *)&desc.params.raw;
+
+	if (!cmd_status || !data0 || !data1) {
+		retval = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_run_phy_activity);
+
+	cmd->activity_id = CPU_TO_LE16(activity_id);
+	cmd->params.cmd.dnl_opcode = CPU_TO_LE32(dnl_opcode);
+
+	retval = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+	if (retval)
+		goto err;
+
+	*cmd_status = LE32_TO_CPU(cmd->params.resp.cmd_status);
+	*data0 = LE32_TO_CPU(cmd->params.resp.data0);
+	*data1 = LE32_TO_CPU(cmd->params.resp.data1);
+err:
+	return retval;
+}
+
+/**
+ * i40e_aq_set_arp_proxy_config
+ * @hw: pointer to the HW structure
+ * @proxy_config: pointer to proxy config command table struct
+ * @cmd_details: pointer to command details
+ *
+ * Set ARP offload parameters from pre-populated
+ * i40e_aqc_arp_proxy_data struct
+ **/
+i40e_status i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
+				struct i40e_aqc_arp_proxy_data *proxy_config,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	i40e_status status;
+
+	if (!proxy_config)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
+
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+	desc.params.external.addr_high =
+				  CPU_TO_LE32(upper_32_bits((u64)proxy_config));
+	desc.params.external.addr_low =
+				  CPU_TO_LE32(lower_32_bits((u64)proxy_config));
+	desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_arp_proxy_data));
+
+	status = i40e_asq_send_command(hw, &desc, proxy_config,
+				       sizeof(struct i40e_aqc_arp_proxy_data),
+				       cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_opc_set_ns_proxy_table_entry
+ * @hw: pointer to the HW structure
+ * @ns_proxy_table_entry: pointer to NS table entry command struct
+ * @cmd_details: pointer to command details
+ *
+ * Set IPv6 Neighbor Solicitation (NS) protocol offload parameters
+ * from pre-populated i40e_aqc_ns_proxy_data struct
+ **/
+i40e_status i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
+			struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
+			struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	i40e_status status;
+
+	if (!ns_proxy_table_entry)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+				i40e_aqc_opc_set_ns_proxy_table_entry);
+
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+	desc.params.external.addr_high =
+		CPU_TO_LE32(upper_32_bits((u64)ns_proxy_table_entry));
+	desc.params.external.addr_low =
+		CPU_TO_LE32(lower_32_bits((u64)ns_proxy_table_entry));
+	desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_ns_proxy_data));
+
+	status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
+				       sizeof(struct i40e_aqc_ns_proxy_data),
+				       cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_clear_wol_filter
+ * @hw: pointer to the hw struct
+ * @filter_index: index of filter to modify (0-7)
+ * @filter: buffer containing filter to be set
+ * @set_filter: true to set filter, false to clear filter
+ * @no_wol_tco: if true, pass through packets cannot cause wake-up
+ *		if false, pass through packets may cause wake-up
+ * @filter_valid: true if filter action is valid
+ * @no_wol_tco_valid: true if no WoL in TCO traffic action valid
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear WoL filter for port attached to the PF
+ **/
+i40e_status i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
+				u8 filter_index,
+				struct i40e_aqc_set_wol_filter_data *filter,
+				bool set_filter, bool no_wol_tco,
+				bool filter_valid, bool no_wol_tco_valid,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_wol_filter *cmd =
+		(struct i40e_aqc_set_wol_filter *)&desc.params.raw;
+	i40e_status status;
+	u16 cmd_flags = 0;
+	u16 valid_flags = 0;
+	u16 buff_len = 0;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_wol_filter);
+
+	if (filter_index >= I40E_AQC_MAX_NUM_WOL_FILTERS)
+		return  I40E_ERR_PARAM;
+	cmd->filter_index = CPU_TO_LE16(filter_index);
+
+	if (set_filter) {
+		if (!filter)
+			return  I40E_ERR_PARAM;
+
+		cmd_flags |= I40E_AQC_SET_WOL_FILTER;
+		cmd_flags |= I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR;
+	}
+
+	if (no_wol_tco)
+		cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
+	cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+
+	if (filter_valid)
+		valid_flags |= I40E_AQC_SET_WOL_FILTER_ACTION_VALID;
+	if (no_wol_tco_valid)
+		valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
+	cmd->valid_flags = CPU_TO_LE16(valid_flags);
+
+	buff_len = sizeof(*filter);
+	desc.datalen = CPU_TO_LE16(buff_len);
+
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+	cmd->address_high = CPU_TO_LE32(upper_32_bits((u64)filter));
+	cmd->address_low = CPU_TO_LE32(lower_32_bits((u64)filter));
+
+	status = i40e_asq_send_command(hw, &desc, filter,
+				       buff_len, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_wake_event_reason
+ * @hw: pointer to the hw struct
+ * @wake_reason: return value, index of matching filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get information for the reason of a Wake Up event
+ **/
+i40e_status i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
+				u16 *wake_reason,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_wake_reason_completion *resp =
+		(struct i40e_aqc_get_wake_reason_completion *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_wake_reason);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (status == I40E_SUCCESS)
+		*wake_reason = LE16_TO_CPU(resp->wake_reason);
+
+	return status;
+}
+
+/**
+* i40e_aq_clear_all_wol_filters
+* @hw: pointer to the hw struct
+* @cmd_details: pointer to command details structure or NULL
+*
+* Get information for the reason of a Wake Up event
+**/
+i40e_status i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
+	struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_clear_all_wol_filters);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
  * @hw: pointer to the hw struct
  * @buff: command buffer (size in bytes = buff_size)
  * @buff_size: buffer size in bytes
@@ -5062,7 +6540,7 @@ do_retry:
  * @cmd_details: pointer to command details structure or NULL
  **/
 enum
-i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
 				   u16 buff_size, u32 track_id,
 				   u32 *error_offset, u32 *error_info,
 				   struct i40e_asq_cmd_details *cmd_details)
@@ -5071,41 +6549,42 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
 	struct i40e_aqc_write_personalization_profile *cmd =
 		(struct i40e_aqc_write_personalization_profile *)
 		&desc.params.raw;
-	struct i40e_aqc_write_ppp_resp *resp;
+	struct i40e_aqc_write_ddp_resp *resp;
 	i40e_status status;
 
 	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_write_personalization_profile);
+				  i40e_aqc_opc_write_personalization_profile);
 
-	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+	desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
 	if (buff_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
 
-	desc.datalen = cpu_to_le16(buff_size);
+	desc.datalen = CPU_TO_LE16(buff_size);
 
-	cmd->profile_track_id = cpu_to_le32(track_id);
+	cmd->profile_track_id = CPU_TO_LE32(track_id);
 
 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
 	if (!status) {
-		resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+		resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
 		if (error_offset)
-			*error_offset = le32_to_cpu(resp->error_offset);
+			*error_offset = LE32_TO_CPU(resp->error_offset);
 		if (error_info)
-			*error_info = le32_to_cpu(resp->error_info);
+			*error_info = LE32_TO_CPU(resp->error_info);
 	}
 
 	return status;
 }
 
 /**
- * i40e_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
  * @hw: pointer to the hw struct
  * @buff: command buffer (size in bytes = buff_size)
  * @buff_size: buffer size in bytes
+ * @flags: AdminQ command flags
  * @cmd_details: pointer to command details structure or NULL
  **/
 enum
-i40e_status_code i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
 				      u16 buff_size, u8 flags,
 				      struct i40e_asq_cmd_details *cmd_details)
 {
@@ -5115,12 +6594,12 @@ i40e_status_code i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
 	i40e_status status;
 
 	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_get_personalization_profile_list);
+			  i40e_aqc_opc_get_personalization_profile_list);
 
-	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
 	if (buff_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-	desc.datalen = cpu_to_le16(buff_size);
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+	desc.datalen = CPU_TO_LE16(buff_size);
 
 	cmd->flags = flags;
 
@@ -5158,6 +6637,165 @@ i40e_find_segment_in_package(u32 segment_type,
 	return NULL;
 }
 
+/* Get section table in profile */
+#define I40E_SECTION_TABLE(profile, sec_tbl)				\
+	do {								\
+		struct i40e_profile_segment *p = (profile);		\
+		u32 count;						\
+		u32 *nvm;						\
+		count = p->device_table_count;				\
+		nvm = (u32 *)&p->device_table[count];			\
+		sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
+	} while (0)
+
+/* Get section header in profile */
+#define I40E_SECTION_HEADER(profile, offset)				\
+	(struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
+
+/**
+ * i40e_find_section_in_profile
+ * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
+ * @profile: pointer to the i40e segment header to be searched
+ *
+ * This function searches i40e segment for a particular section type. On
+ * success it returns a pointer to the section header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+			     struct i40e_profile_segment *profile)
+{
+	struct i40e_profile_section_header *sec;
+	struct i40e_section_table *sec_tbl;
+	u32 sec_off;
+	u32 i;
+
+	if (profile->header.type != SEGMENT_TYPE_I40E)
+		return NULL;
+
+	I40E_SECTION_TABLE(profile, sec_tbl);
+
+	for (i = 0; i < sec_tbl->section_count; i++) {
+		sec_off = sec_tbl->section_offset[i];
+		sec = I40E_SECTION_HEADER(profile, sec_off);
+		if (sec->section.type == section_type)
+			return sec;
+	}
+
+	return NULL;
+}
+
+/**
+ * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
+ * @hw: pointer to the hw struct
+ * @aq: command buffer containing all data to execute AQ
+ **/
+static enum
+i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+					  struct i40e_profile_aq_section *aq)
+{
+	i40e_status status;
+	struct i40e_aq_desc desc;
+	u8 *msg = NULL;
+	u16 msglen;
+
+	i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
+	desc.flags |= CPU_TO_LE16(aq->flags);
+	i40e_memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw),
+		    I40E_NONDMA_TO_NONDMA);
+
+	msglen = aq->datalen;
+	if (msglen) {
+		desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
+						I40E_AQ_FLAG_RD));
+		if (msglen > I40E_AQ_LARGE_BUF)
+			desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+		desc.datalen = CPU_TO_LE16(msglen);
+		msg = &aq->data[0];
+	}
+
+	status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
+
+	if (status != I40E_SUCCESS) {
+		i40e_debug(hw, I40E_DEBUG_PACKAGE,
+			   "unable to exec DDP AQ opcode %u, error %d\n",
+			   aq->opcode, status);
+		return status;
+	}
+
+	/* copy returned desc to aq_buf */
+	i40e_memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw),
+		    I40E_NONDMA_TO_NONDMA);
+
+	return I40E_SUCCESS;
+}
+
+/**
+ * i40e_validate_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be validated
+ * @track_id: package tracking id
+ * @rollback: flag if the profile is for rollback.
+ *
+ * Validates supported devices and profile's sections.
+ */
+static enum i40e_status_code
+i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+		      u32 track_id, bool rollback)
+{
+	struct i40e_profile_section_header *sec = NULL;
+	i40e_status status = I40E_SUCCESS;
+	struct i40e_section_table *sec_tbl;
+	u32 vendor_dev_id;
+	u32 dev_cnt;
+	u32 sec_off;
+	u32 i;
+
+	if (track_id == I40E_DDP_TRACKID_INVALID) {
+		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
+		return I40E_NOT_SUPPORTED;
+	}
+
+	dev_cnt = profile->device_table_count;
+	for (i = 0; i < dev_cnt; i++) {
+		vendor_dev_id = profile->device_table[i].vendor_dev_id;
+		if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
+		    hw->device_id == (vendor_dev_id & 0xFFFF))
+			break;
+	}
+	if (dev_cnt && (i == dev_cnt)) {
+		i40e_debug(hw, I40E_DEBUG_PACKAGE,
+			   "Device doesn't support DDP\n");
+		return I40E_ERR_DEVICE_NOT_SUPPORTED;
+	}
+
+	I40E_SECTION_TABLE(profile, sec_tbl);
+
+	/* Validate sections types */
+	for (i = 0; i < sec_tbl->section_count; i++) {
+		sec_off = sec_tbl->section_offset[i];
+		sec = I40E_SECTION_HEADER(profile, sec_off);
+		if (rollback) {
+			if (sec->section.type == SECTION_TYPE_MMIO ||
+			    sec->section.type == SECTION_TYPE_AQ ||
+			    sec->section.type == SECTION_TYPE_RB_AQ) {
+				i40e_debug(hw, I40E_DEBUG_PACKAGE,
+					   "Not a roll-back package\n");
+				return I40E_NOT_SUPPORTED;
+			}
+		} else {
+			if (sec->section.type == SECTION_TYPE_RB_AQ ||
+			    sec->section.type == SECTION_TYPE_RB_MMIO) {
+				i40e_debug(hw, I40E_DEBUG_PACKAGE,
+					   "Not an original package\n");
+				return I40E_NOT_SUPPORTED;
+			}
+		}
+	}
+
+	return status;
+}
+
 /**
  * i40e_write_profile
  * @hw: pointer to the hardware structure
@@ -5170,55 +6808,102 @@ enum i40e_status_code
 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
 		   u32 track_id)
 {
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 	struct i40e_section_table *sec_tbl;
 	struct i40e_profile_section_header *sec = NULL;
-	u32 dev_cnt;
-	u32 vendor_dev_id;
-	u32 *nvm;
+	struct i40e_profile_aq_section *ddp_aq;
 	u32 section_size = 0;
 	u32 offset = 0, info = 0;
+	u32 sec_off;
 	u32 i;
 
-	if (!track_id) {
-		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
-		return I40E_NOT_SUPPORTED;
-	}
+	status = i40e_validate_profile(hw, profile, track_id, false);
+	if (status)
+		return status;
 
-	dev_cnt = profile->device_table_count;
+	I40E_SECTION_TABLE(profile, sec_tbl);
 
-	for (i = 0; i < dev_cnt; i++) {
-		vendor_dev_id = profile->device_table[i].vendor_dev_id;
-		if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL)
-			if (hw->device_id == (vendor_dev_id & 0xFFFF))
+	for (i = 0; i < sec_tbl->section_count; i++) {
+		sec_off = sec_tbl->section_offset[i];
+		sec = I40E_SECTION_HEADER(profile, sec_off);
+		/* Process generic admin command */
+		if (sec->section.type == SECTION_TYPE_AQ) {
+			ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
+			status = i40e_ddp_exec_aq_section(hw, ddp_aq);
+			if (status) {
+				i40e_debug(hw, I40E_DEBUG_PACKAGE,
+					   "Failed to execute aq: section %d, opcode %u\n",
+					   i, ddp_aq->opcode);
 				break;
+			}
+			sec->section.type = SECTION_TYPE_RB_AQ;
+		}
+
+		/* Skip any non-mmio sections */
+		if (sec->section.type != SECTION_TYPE_MMIO)
+			continue;
+
+		section_size = sec->section.size +
+			sizeof(struct i40e_profile_section_header);
+
+		/* Write MMIO section */
+		status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+					   track_id, &offset, &info, NULL);
+		if (status) {
+			i40e_debug(hw, I40E_DEBUG_PACKAGE,
+				   "Failed to write profile: section %d, offset %d, info %d\n",
+				   i, offset, info);
+			break;
+		}
 	}
-	if (i == dev_cnt) {
-		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
-		return I40E_ERR_DEVICE_NOT_SUPPORTED;
-	}
+	return status;
+}
 
-	nvm = (u32 *)&profile->device_table[dev_cnt];
-	sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+/**
+ * i40e_rollback_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be removed
+ * @track_id: package tracking id
+ *
+ * Rolls back previously loaded package.
+ */
+enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+		      u32 track_id)
+{
+	struct i40e_profile_section_header *sec = NULL;
+	i40e_status status = I40E_SUCCESS;
+	struct i40e_section_table *sec_tbl;
+	u32 offset = 0, info = 0;
+	u32 section_size = 0;
+	u32 sec_off;
+	int i;
 
-	for (i = 0; i < sec_tbl->section_count; i++) {
-		sec = (struct i40e_profile_section_header *)((u8 *)profile +
-					     sec_tbl->section_offset[i]);
+	status = i40e_validate_profile(hw, profile, track_id, true);
+	if (status)
+		return status;
 
-		/* Skip 'AQ', 'note' and 'name' sections */
-		if (sec->section.type != SECTION_TYPE_MMIO)
+	I40E_SECTION_TABLE(profile, sec_tbl);
+
+	/* For rollback write sections in reverse */
+	for (i = sec_tbl->section_count - 1; i >= 0; i--) {
+		sec_off = sec_tbl->section_offset[i];
+		sec = I40E_SECTION_HEADER(profile, sec_off);
+
+		/* Skip any non-rollback sections */
+		if (sec->section.type != SECTION_TYPE_RB_MMIO)
 			continue;
 
 		section_size = sec->section.size +
 			sizeof(struct i40e_profile_section_header);
 
-		/* Write profile */
-		status = i40e_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+		/* Write roll-back MMIO section */
+		status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
 					   track_id, &offset, &info, NULL);
 		if (status) {
 			i40e_debug(hw, I40E_DEBUG_PACKAGE,
-				   "Failed to write profile: offset %d, info %d",
-				   offset, info);
+				   "Failed to write profile: section %d, offset %d, info %d\n",
+				   i, offset, info);
 			break;
 		}
 	}
@@ -5239,7 +6924,7 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
 		       struct i40e_profile_segment *profile,
 		       u8 *profile_info_sec, u32 track_id)
 {
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 	struct i40e_profile_section_header *sec = NULL;
 	struct i40e_profile_info *pinfo;
 	u32 offset = 0, info = 0;
@@ -5255,10 +6940,11 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
 					     sec->section.offset);
 	pinfo->track_id = track_id;
 	pinfo->version = profile->version;
-	pinfo->op = I40E_PPP_ADD_TRACKID;
-	memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+	pinfo->op = I40E_DDP_ADD_TRACKID;
+	i40e_memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE,
+		    I40E_NONDMA_TO_NONDMA);
 
-	status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end,
+	status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
 				   track_id, &offset, &info, NULL);
 	return status;
 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 55079fe3e..0580a51fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #include "i40e_adminq.h"
 #include "i40e_prototype.h"
@@ -46,7 +23,7 @@ i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
 	*status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
 			I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
 
-	return 0;
+	return I40E_SUCCESS;
 }
 
 /**
@@ -387,7 +364,6 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
 		       I40E_LLDP_TLV_LEN_SHIFT);
 
 	dcbcfg->numapps = length / sizeof(*app);
-
 	if (!dcbcfg->numapps)
 		return;
 	if (dcbcfg->numapps > I40E_DCBX_MAX_APPS)
@@ -523,7 +499,7 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
 i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
 				    struct i40e_dcbx_config *dcbcfg)
 {
-	i40e_status ret = 0;
+	i40e_status ret = I40E_SUCCESS;
 	struct i40e_lldp_org_tlv *tlv;
 	u16 type;
 	u16 length;
@@ -578,7 +554,7 @@ i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
 				   u8 bridgetype,
 				   struct i40e_dcbx_config *dcbcfg)
 {
-	i40e_status ret = 0;
+	i40e_status ret = I40E_SUCCESS;
 	struct i40e_virt_mem mem;
 	u8 *lldpmib;
 
@@ -613,8 +589,8 @@ static void i40e_cee_to_dcb_v1_config(
 			struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg,
 			struct i40e_dcbx_config *dcbcfg)
 {
-	u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status);
-	u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+	u16 status, tlv_status = LE16_TO_CPU(cee_cfg->tlv_status);
+	u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
 	u8 i, tc, err;
 
 	/* CEE PG data to ETS config */
@@ -627,7 +603,7 @@ static void i40e_cee_to_dcb_v1_config(
 		tc = (u8)((cee_cfg->oper_prio_tc[i] &
 			 I40E_CEE_PGID_PRIO_0_MASK) >>
 			 I40E_CEE_PGID_PRIO_0_SHIFT);
-		dcbcfg->etscfg.prioritytable[i * 2] =  tc;
+		dcbcfg->etscfg.prioritytable[i*2] =  tc;
 		tc = (u8)((cee_cfg->oper_prio_tc[i] &
 			 I40E_CEE_PGID_PRIO_1_MASK) >>
 			 I40E_CEE_PGID_PRIO_1_SHIFT);
@@ -694,8 +670,8 @@ static void i40e_cee_to_dcb_config(
 				struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg,
 				struct i40e_dcbx_config *dcbcfg)
 {
-	u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
-	u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+	u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status);
+	u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
 	u8 i, tc, err, sync, oper;
 
 	/* CEE PG data to ETS config */
@@ -708,11 +684,11 @@ static void i40e_cee_to_dcb_config(
 		tc = (u8)((cee_cfg->oper_prio_tc[i] &
 			 I40E_CEE_PGID_PRIO_0_MASK) >>
 			 I40E_CEE_PGID_PRIO_0_SHIFT);
-		dcbcfg->etscfg.prioritytable[i * 2] =  tc;
+		dcbcfg->etscfg.prioritytable[i*2] =  tc;
 		tc = (u8)((cee_cfg->oper_prio_tc[i] &
 			 I40E_CEE_PGID_PRIO_1_MASK) >>
 			 I40E_CEE_PGID_PRIO_1_SHIFT);
-		dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc;
+		dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
 	}
 
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
@@ -792,7 +768,7 @@ static void i40e_cee_to_dcb_config(
  **/
 static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
 {
-	i40e_status ret = 0;
+	i40e_status ret = I40E_SUCCESS;
 
 	/* IEEE mode */
 	hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
@@ -808,7 +784,7 @@ static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
 				     &hw->remote_dcbx_config);
 	/* Don't treat ENOENT as an error for Remote MIBs */
 	if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
-		ret = 0;
+		ret = I40E_SUCCESS;
 
 out:
 	return ret;
@@ -822,7 +798,7 @@ out:
  **/
 i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
 {
-	i40e_status ret = 0;
+	i40e_status ret = I40E_SUCCESS;
 	struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
 	struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
 
@@ -837,22 +813,22 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
 	    ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) {
 		ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
 						 sizeof(cee_v1_cfg), NULL);
-		if (!ret) {
+		if (ret == I40E_SUCCESS) {
 			/* CEE mode */
 			hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
 			hw->local_dcbx_config.tlv_status =
-					le16_to_cpu(cee_v1_cfg.tlv_status);
+					LE16_TO_CPU(cee_v1_cfg.tlv_status);
 			i40e_cee_to_dcb_v1_config(&cee_v1_cfg,
 						  &hw->local_dcbx_config);
 		}
 	} else {
 		ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg,
 						 sizeof(cee_cfg), NULL);
-		if (!ret) {
+		if (ret == I40E_SUCCESS) {
 			/* CEE mode */
 			hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
 			hw->local_dcbx_config.tlv_status =
-					le32_to_cpu(cee_cfg.tlv_status);
+					LE32_TO_CPU(cee_cfg.tlv_status);
 			i40e_cee_to_dcb_config(&cee_cfg,
 					       &hw->local_dcbx_config);
 		}
@@ -862,7 +838,7 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
 	if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
 		return i40e_get_ieee_dcb_config(hw);
 
-	if (ret)
+	if (ret != I40E_SUCCESS)
 		goto out;
 
 	/* Get CEE DCB Desired Config */
@@ -873,11 +849,11 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
 
 	/* Get Remote DCB Config */
 	ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
-				     I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
-				     &hw->remote_dcbx_config);
+			     I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+			     &hw->remote_dcbx_config);
 	/* Don't treat ENOENT as an error for Remote MIBs */
 	if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
-		ret = 0;
+		ret = I40E_SUCCESS;
 
 out:
 	return ret;
@@ -886,22 +862,41 @@ out:
 /**
  * i40e_init_dcb
  * @hw: pointer to the hw struct
+ * @enable_mib_change: enable mib change event
  *
  * Update DCB configuration from the Firmware
  **/
-i40e_status i40e_init_dcb(struct i40e_hw *hw)
+i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
 {
-	i40e_status ret = 0;
+	i40e_status ret = I40E_SUCCESS;
 	struct i40e_lldp_variables lldp_cfg;
 	u8 adminstatus = 0;
 
 	if (!hw->func_caps.dcb)
-		return ret;
+		return I40E_NOT_SUPPORTED;
 
 	/* Read LLDP NVM area */
-	ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+	if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) {
+		u8 offset = 0;
+
+		if (hw->mac.type == I40E_MAC_XL710)
+			offset = I40E_LLDP_CURRENT_STATUS_XL710_OFFSET;
+		else if (hw->mac.type == I40E_MAC_X722)
+			offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET;
+		else
+			return I40E_NOT_SUPPORTED;
+
+		ret = i40e_read_nvm_module_data(hw,
+						I40E_SR_EMP_SR_SETTINGS_PTR,
+						offset,
+						I40E_LLDP_CURRENT_STATUS_OFFSET,
+						I40E_LLDP_CURRENT_STATUS_SIZE,
+						&lldp_cfg.adminstatus);
+	} else {
+		ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+	}
 	if (ret)
-		return ret;
+		return I40E_ERR_NOT_READY;
 
 	/* Get the LLDP AdminStatus for the current port */
 	adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
@@ -910,7 +905,7 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
 	/* LLDP agent disabled */
 	if (!adminstatus) {
 		hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
-		return ret;
+		return I40E_ERR_NOT_READY;
 	}
 
 	/* Get DCBX status */
@@ -919,27 +914,454 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
 		return ret;
 
 	/* Check the DCBX Status */
-	switch (hw->dcbx_status) {
-	case I40E_DCBX_STATUS_DONE:
-	case I40E_DCBX_STATUS_IN_PROGRESS:
+	if (hw->dcbx_status == I40E_DCBX_STATUS_DONE ||
+	    hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) {
 		/* Get current DCBX configuration */
 		ret = i40e_get_dcb_config(hw);
 		if (ret)
 			return ret;
-		break;
-	case I40E_DCBX_STATUS_DISABLED:
+	} else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
+		return I40E_ERR_NOT_READY;
+	}
+
+	/* Configure the LLDP MIB change event */
+	if (enable_mib_change)
+		ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
+
+	return ret;
+}
+
+/**
+ * i40e_get_fw_lldp_status
+ * @hw: pointer to the hw struct
+ * @lldp_status: pointer to the status enum
+ *
+ * Get status of FW Link Layer Discovery Protocol (LLDP) Agent.
+ * Status of agent is reported via @lldp_status parameter.
+ **/
+enum i40e_status_code
+i40e_get_fw_lldp_status(struct i40e_hw *hw,
+			enum i40e_get_fw_lldp_status_resp *lldp_status)
+{
+	i40e_status ret;
+	struct i40e_virt_mem mem;
+	u8 *lldpmib;
+
+	if (!lldp_status)
+		return I40E_ERR_PARAM;
+
+	/* Allocate buffer for the LLDPDU */
+	ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+	if (ret)
 		return ret;
-	case I40E_DCBX_STATUS_NOT_STARTED:
-	case I40E_DCBX_STATUS_MULTIPLE_PEERS:
+
+	lldpmib = (u8 *)mem.va;
+	ret = i40e_aq_get_lldp_mib(hw, 0, 0, (void *)lldpmib,
+				   I40E_LLDPDU_SIZE, NULL, NULL, NULL);
+
+	if (ret == I40E_SUCCESS) {
+		*lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
+	} else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) {
+		/* MIB is not available yet but the agent is running */
+		*lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
+		ret = I40E_SUCCESS;
+	} else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+		*lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED;
+		ret = I40E_SUCCESS;
+	}
+
+	i40e_free_virt_mem(hw, &mem);
+	return ret;
+}
+
+/**
+ * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
+ * @tlv: Fill the ETS config data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv,
+				  struct i40e_dcbx_config *dcbcfg)
+{
+	u8 priority0, priority1, maxtcwilling = 0;
+	struct i40e_dcb_ets_config *etscfg;
+	u16 offset = 0, typelength, i;
+	u8 *buf = tlv->tlvinfo;
+	u32 ouisubtype;
+
+	typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+			I40E_IEEE_ETS_TLV_LENGTH);
+	tlv->typelength = htons(typelength);
+
+	ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+			I40E_IEEE_SUBTYPE_ETS_CFG);
+	tlv->ouisubtype = htonl(ouisubtype);
+
+	/* First Octet post subtype
+	 * --------------------------
+	 * |will-|CBS  | Re-  | Max |
+	 * |ing  |     |served| TCs |
+	 * --------------------------
+	 * |1bit | 1bit|3 bits|3bits|
+	 */
+	etscfg = &dcbcfg->etscfg;
+	if (etscfg->willing)
+		maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT);
+	maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK;
+	buf[offset] = maxtcwilling;
+
+	/* Move offset to Priority Assignment Table */
+	offset++;
+
+	/* Priority Assignment Table (4 octets)
+	 * Octets:|    1    |    2    |    3    |    4    |
+	 *        -----------------------------------------
+	 *        |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+	 *        -----------------------------------------
+	 *   Bits:|7  4|3  0|7  4|3  0|7  4|3  0|7  4|3  0|
+	 *        -----------------------------------------
+	 */
+	for (i = 0; i < 4; i++) {
+		priority0 = etscfg->prioritytable[i * 2] & 0xF;
+		priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF;
+		buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+				priority1;
+		offset++;
+	}
+
+	/* TC Bandwidth Table (8 octets)
+	 * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+	 *        ---------------------------------
+	 *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+	 *        ---------------------------------
+	 */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+		buf[offset++] = etscfg->tcbwtable[i];
+
+	/* TSA Assignment Table (8 octets)
+	 * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+	 *        ---------------------------------
+	 *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+	 *        ---------------------------------
+	 */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+		buf[offset++] = etscfg->tsatable[i];
+}
+
+/**
+ * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
+ * @tlv: Fill ETS Recommended TLV in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+				     struct i40e_dcbx_config *dcbcfg)
+{
+	struct i40e_dcb_ets_config *etsrec;
+	u16 offset = 0, typelength, i;
+	u8 priority0, priority1;
+	u8 *buf = tlv->tlvinfo;
+	u32 ouisubtype;
+
+	typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+			I40E_IEEE_ETS_TLV_LENGTH);
+	tlv->typelength = htons(typelength);
+
+	ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+			I40E_IEEE_SUBTYPE_ETS_REC);
+	tlv->ouisubtype = htonl(ouisubtype);
+
+	etsrec = &dcbcfg->etsrec;
+	/* First Octet is reserved */
+	/* Move offset to Priority Assignment Table */
+	offset++;
+
+	/* Priority Assignment Table (4 octets)
+	 * Octets:|    1    |    2    |    3    |    4    |
+	 *        -----------------------------------------
+	 *        |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+	 *        -----------------------------------------
+	 *   Bits:|7  4|3  0|7  4|3  0|7  4|3  0|7  4|3  0|
+	 *        -----------------------------------------
+	 */
+	for (i = 0; i < 4; i++) {
+		priority0 = etsrec->prioritytable[i * 2] & 0xF;
+		priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF;
+		buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+				priority1;
+		offset++;
+	}
+
+	/* TC Bandwidth Table (8 octets)
+	 * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+	 *        ---------------------------------
+	 *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+	 *        ---------------------------------
+	 */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+		buf[offset++] = etsrec->tcbwtable[i];
+
+	/* TSA Assignment Table (8 octets)
+	 * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+	 *        ---------------------------------
+	 *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+	 *        ---------------------------------
+	 */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+		buf[offset++] = etsrec->tsatable[i];
+}
+
+ /**
+ * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store to get PFC CFG data
+ *
+ * Prepare IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv,
+				  struct i40e_dcbx_config *dcbcfg)
+{
+	u8 *buf = tlv->tlvinfo;
+	u32 ouisubtype;
+	u16 typelength;
+
+	typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+			I40E_IEEE_PFC_TLV_LENGTH);
+	tlv->typelength = htons(typelength);
+
+	ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+			I40E_IEEE_SUBTYPE_PFC_CFG);
+	tlv->ouisubtype = htonl(ouisubtype);
+
+	/* ----------------------------------------
+	 * |will-|MBC  | Re-  | PFC |  PFC Enable  |
+	 * |ing  |     |served| cap |              |
+	 * -----------------------------------------
+	 * |1bit | 1bit|2 bits|4bits| 1 octet      |
+	 */
+	if (dcbcfg->pfc.willing)
+		buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT);
+
+	if (dcbcfg->pfc.mbc)
+		buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT);
+
+	buf[0] |= dcbcfg->pfc.pfccap & 0xF;
+	buf[1] = dcbcfg->pfc.pfcenable;
+}
+
+/**
+ * i40e_add_ieee_app_pri_tlv -  Prepare APP TLV in IEEE format
+ * @tlv: Fill APP TLV in IEEE format
+ * @dcbcfg: Local store to get APP CFG data
+ *
+ * Prepare IEEE 802.1Qaz APP CFG TLV
+ **/
+static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv,
+				      struct i40e_dcbx_config *dcbcfg)
+{
+	u16 typelength, length, offset = 0;
+	u8 priority, selector, i = 0;
+	u8 *buf = tlv->tlvinfo;
+	u32 ouisubtype;
+
+	/* No APP TLVs then just return */
+	if (dcbcfg->numapps == 0)
+		return;
+	ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+			I40E_IEEE_SUBTYPE_APP_PRI);
+	tlv->ouisubtype = htonl(ouisubtype);
+
+	/* Move offset to App Priority Table */
+	offset++;
+	/* Application Priority Table (3 octets)
+	 * Octets:|         1          |    2    |    3    |
+	 *        -----------------------------------------
+	 *        |Priority|Rsrvd| Sel |    Protocol ID    |
+	 *        -----------------------------------------
+	 *   Bits:|23    21|20 19|18 16|15                0|
+	 *        -----------------------------------------
+	 */
+	while (i < dcbcfg->numapps) {
+		priority = dcbcfg->app[i].priority & 0x7;
+		selector = dcbcfg->app[i].selector & 0x7;
+		buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
+		buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
+		buf[offset + 2] =  dcbcfg->app[i].protocolid & 0xFF;
+		/* Move to next app */
+		offset += 3;
+		i++;
+		if (i >= I40E_DCBX_MAX_APPS)
+			break;
+	}
+	/* length includes size of ouisubtype + 1 reserved + 3*numapps */
+	length = sizeof(tlv->ouisubtype) + 1 + (i*3);
+	typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+		(length & 0x1FF));
+	tlv->typelength = htons(typelength);
+}
+
+ /**
+ * i40e_add_dcb_tlv - Add all IEEE TLVs
+ * @tlv: pointer to org tlv
+ *
+ * add tlv information
+ **/
+static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
+			     struct i40e_dcbx_config *dcbcfg,
+			     u16 tlvid)
+{
+	switch (tlvid) {
+	case I40E_IEEE_TLV_ID_ETS_CFG:
+		i40e_add_ieee_ets_tlv(tlv, dcbcfg);
+		break;
+	case I40E_IEEE_TLV_ID_ETS_REC:
+		i40e_add_ieee_etsrec_tlv(tlv, dcbcfg);
+		break;
+	case I40E_IEEE_TLV_ID_PFC_CFG:
+		i40e_add_ieee_pfc_tlv(tlv, dcbcfg);
+		break;
+	case I40E_IEEE_TLV_ID_APP_PRI:
+		i40e_add_ieee_app_pri_tlv(tlv, dcbcfg);
+		break;
 	default:
 		break;
 	}
+}
 
-	/* Configure the LLDP MIB change event */
-	ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
+ /**
+ * i40e_set_dcb_config - Set the local LLDP MIB to FW
+ * @hw: pointer to the hw struct
+ *
+ * Set DCB configuration to the Firmware
+ **/
+i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
+{
+	i40e_status ret = I40E_SUCCESS;
+	struct i40e_dcbx_config *dcbcfg;
+	struct i40e_virt_mem mem;
+	u8 mib_type, *lldpmib;
+	u16 miblen;
+
+	/* update the hw local config */
+	dcbcfg = &hw->local_dcbx_config;
+	/* Allocate the LLDPDU */
+	ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
 	if (ret)
 		return ret;
 
+	mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB;
+	if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) {
+		mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS <<
+			    SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT;
+	}
+	lldpmib = (u8 *)mem.va;
+	ret = i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg);
+	ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL);
+
+	i40e_free_virt_mem(hw, &mem);
+	return ret;
+}
+
+/**
+ * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format
+ * @lldpmib: pointer to mib to be output
+ * @miblen: pointer to u16 for length of lldpmib
+ * @dcbcfg: store for LLDPDU data
+ *
+ * send DCB configuration to FW
+ **/
+i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+					      struct i40e_dcbx_config *dcbcfg)
+{
+	u16 length, offset = 0, tlvid = I40E_TLV_ID_START;
+	i40e_status ret = I40E_SUCCESS;
+	struct i40e_lldp_org_tlv *tlv;
+	u16 typelength;
+
+	tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+	while (1) {
+		i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
+		typelength = ntohs(tlv->typelength);
+		length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+				I40E_LLDP_TLV_LEN_SHIFT);
+		if (length)
+			offset += length + 2;
+		/* END TLV or beyond LLDPDU size */
+		if ((tlvid >= I40E_TLV_ID_END_OF_LLDPPDU) ||
+		    (offset > I40E_LLDPDU_SIZE))
+			break;
+		/* Move to next TLV */
+		if (length)
+			tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+			      sizeof(tlv->typelength) + length);
+	}
+	*miblen = offset;
+	return ret;
+}
+
+/**
+ * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ * @module: address of the module pointer
+ * @word_offset: offset of LLDP configuration
+ *
+ * Reads the LLDP configuration data from NVM using passed addresses
+ **/
+static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw,
+					  struct i40e_lldp_variables *lldp_cfg,
+					  u8 module, u32 word_offset)
+{
+	u32 address, offset = (2 * word_offset);
+	i40e_status ret;
+	__le16 raw_mem;
+	u16 mem;
+
+	ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+	if (ret != I40E_SUCCESS)
+		return ret;
+
+	ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(raw_mem), &raw_mem,
+			       true, NULL);
+	i40e_release_nvm(hw);
+	if (ret != I40E_SUCCESS)
+		return ret;
+
+	mem = LE16_TO_CPU(raw_mem);
+	/* Check if this pointer needs to be read in word size or 4K sector
+	 * units.
+	 */
+	if (mem & I40E_PTR_TYPE)
+		address = (0x7FFF & mem) * 4096;
+	else
+		address = (0x7FFF & mem) * 2;
+
+	ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+	if (ret != I40E_SUCCESS)
+		goto err_lldp_cfg;
+
+	ret = i40e_aq_read_nvm(hw, module, offset, sizeof(raw_mem), &raw_mem,
+			       true, NULL);
+	i40e_release_nvm(hw);
+	if (ret != I40E_SUCCESS)
+		return ret;
+
+	mem = LE16_TO_CPU(raw_mem);
+	offset = mem + word_offset;
+	offset *= 2;
+
+	ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+	if (ret != I40E_SUCCESS)
+		goto err_lldp_cfg;
+
+	ret = i40e_aq_read_nvm(hw, 0, address + offset,
+			       sizeof(struct i40e_lldp_variables), lldp_cfg,
+			       true, NULL);
+	i40e_release_nvm(hw);
+
+err_lldp_cfg:
 	return ret;
 }
 
@@ -951,24 +1373,37 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
  * Reads the LLDP configuration data from NVM
  **/
 i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
-			       struct i40e_lldp_variables *lldp_cfg)
+					 struct i40e_lldp_variables *lldp_cfg)
 {
-	i40e_status ret = 0;
-	u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR);
+	i40e_status ret = I40E_SUCCESS;
+	u32 mem;
 
 	if (!lldp_cfg)
 		return I40E_ERR_PARAM;
 
 	ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
-	if (ret)
-		goto err_lldp_cfg;
+	if (ret != I40E_SUCCESS)
+		return ret;
 
-	ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset,
-			       sizeof(struct i40e_lldp_variables),
-			       (u8 *)lldp_cfg,
-			       true, NULL);
+	ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem),
+			       &mem, true, NULL);
 	i40e_release_nvm(hw);
+	if (ret != I40E_SUCCESS)
+		return ret;
+
+	/* Read a bit that holds information whether we are running flat or
+	 * structured NVM image. Flat image has LLDP configuration in shadow
+	 * ram, so there is a need to pass different addresses for both cases.
+	 */
+	if (mem & I40E_SR_NVM_MAP_STRUCTURE_TYPE) {
+		/* Flat NVM case */
+		ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_SR_EMP_MODULE_PTR,
+					  I40E_SR_LLDP_CFG_PTR);
+	} else {
+		/* Good old structured NVM image */
+		ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_EMP_MODULE_PTR,
+					  I40E_NVM_LLDP_CFG_PTR);
+	}
 
-err_lldp_cfg:
 	return ret;
 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 92d01042c..3cdaeeb15 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifndef _I40E_DCB_H_
 #define _I40E_DCB_H_
@@ -53,6 +30,11 @@
 #define I40E_CEE_SUBTYPE_APP_PRI	4
 
 #define I40E_CEE_MAX_FEAT_TYPE		3
+#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET	0x2B
+#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET	0x31
+#define I40E_LLDP_CURRENT_STATUS_OFFSET		1
+#define I40E_LLDP_CURRENT_STATUS_SIZE		1
+
 /* Defines for LLDP TLV header */
 #define I40E_LLDP_TLV_LEN_SHIFT		0
 #define I40E_LLDP_TLV_LEN_MASK		(0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
@@ -98,6 +80,20 @@
 #define I40E_IEEE_APP_PRIO_SHIFT	5
 #define I40E_IEEE_APP_PRIO_MASK		(0x7 << I40E_IEEE_APP_PRIO_SHIFT)
 
+/* TLV definitions for preparing MIB */
+#define I40E_TLV_ID_CHASSIS_ID		0
+#define I40E_TLV_ID_PORT_ID		1
+#define I40E_TLV_ID_TIME_TO_LIVE	2
+#define I40E_IEEE_TLV_ID_ETS_CFG	3
+#define I40E_IEEE_TLV_ID_ETS_REC	4
+#define I40E_IEEE_TLV_ID_PFC_CFG	5
+#define I40E_IEEE_TLV_ID_APP_PRI	6
+#define I40E_TLV_ID_END_OF_LLDPPDU	7
+#define I40E_TLV_ID_START		I40E_IEEE_TLV_ID_ETS_CFG
+
+#define I40E_IEEE_ETS_TLV_LENGTH	25
+#define I40E_IEEE_PFC_TLV_LENGTH	6
+#define I40E_IEEE_APP_TLV_LENGTH	11
 
 #pragma pack(1)
 
@@ -139,6 +135,11 @@ struct i40e_cee_app_prio {
 };
 #pragma pack()
 
+enum i40e_get_fw_lldp_status_resp {
+	I40E_GET_FW_LLDP_STATUS_DISABLED = 0,
+	I40E_GET_FW_LLDP_STATUS_ENABLED = 1
+};
+
 i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
 					   u16 *status);
 i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
@@ -147,5 +148,12 @@ i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
 					     u8 bridgetype,
 					     struct i40e_dcbx_config *dcbcfg);
 i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
-i40e_status i40e_init_dcb(struct i40e_hw *hw);
+i40e_status i40e_init_dcb(struct i40e_hw *hw,
+				    bool enable_mib_change);
+enum i40e_status_code
+i40e_get_fw_lldp_status(struct i40e_hw *hw,
+			enum i40e_get_fw_lldp_status_resp *lldp_status);
+i40e_status i40e_set_dcb_config(struct i40e_hw *hw);
+i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+					      struct i40e_dcbx_config *dcbcfg);
 #endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 886e667f2..1610197b4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -1,33 +1,11 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
-#ifdef CONFIG_I40E_DCB
+#ifdef CONFIG_DCB
 #include "i40e.h"
 #include <net/dcbnl.h>
 
+#ifdef HAVE_DCBNL_IEEE
 /**
  * i40e_get_pfc_delay - retrieve PFC Link Delay
  * @hw: pointer to hardware struct
@@ -46,7 +24,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
 
 /**
  * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration
- * @netdev: the corresponding netdev
+ * @dev: the corresponding netdev
  * @ets: structure to hold the ETS information
  *
  * Returns local IEEE ETS configuration
@@ -85,8 +63,8 @@ static int i40e_dcbnl_ieee_getets(struct net_device *dev,
 
 /**
  * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration
- * @netdev: the corresponding netdev
- * @ets: structure to hold the PFC information
+ * @dev: the corresponding netdev
+ * @pfc: structure to hold the PFC information
  *
  * Returns local IEEE PFC configuration
  **/
@@ -118,7 +96,7 @@ static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
 
 /**
  * i40e_dcbnl_getdcbx - retrieve current DCBx capability
- * @netdev: the corresponding netdev
+ * @dev: the corresponding netdev
  *
  * Returns DCBx capability features
  **/
@@ -131,7 +109,8 @@ static u8 i40e_dcbnl_getdcbx(struct net_device *dev)
 
 /**
  * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx
- * @netdev: the corresponding netdev
+ * @dev: the corresponding netdev
+ * @perm_addr: buffer to store the MAC address
  *
  * Returns the SAN MAC address used for LLDP exchange
  **/
@@ -198,8 +177,10 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
 		}
 	}
 
+#ifdef HAVE_DCBNL_IEEE_DELAPP
 	/* Notify user-space of the changes */
 	dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
+#endif
 }
 
 /**
@@ -317,4 +298,5 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
 	/* Set initial IEEE DCB settings */
 	i40e_dcbnl_set_all(vsi);
 }
-#endif /* CONFIG_I40E_DCB */
+#endif /* HAVE_DCBNL_IEEE */
+#endif /* CONFIG_DCB */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
new file mode 100644
index 000000000..bda29f1a8
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -0,0 +1,478 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
+
+#include "i40e.h"
+
+#include <linux/firmware.h>
+
+/**
+ * i40e_ddp_profiles_eq - checks if DDP profiles are the equivalent
+ * @a: new profile info
+ * @b: old profile info
+ *
+ * checks if DDP profiles are the equivalent.
+ * Returns true if profiles are the same.
+ **/
+static bool i40e_ddp_profiles_eq(struct i40e_profile_info *a,
+				 struct i40e_profile_info *b)
+{
+	return a->track_id == b->track_id &&
+		!memcmp(&a->version, &b->version, sizeof(a->version)) &&
+		!memcmp(&a->name, &b->name, I40E_DDP_NAME_SIZE);
+}
+
+/**
+ * i40e_ddp_does_profile_exist - checks if DDP profile loaded already
+ * @hw: HW data structure
+ * @pinfo: DDP profile information structure
+ *
+ * checks if DDP profile loaded already.
+ * Returns >0 if the profile exists.
+ * Returns  0 if the profile is absent.
+ * Returns <0 if error.
+ **/
+static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,
+				       struct i40e_profile_info *pinfo)
+{
+	struct i40e_ddp_profile_list *profile_list;
+	u8 buff[I40E_PROFILE_LIST_SIZE];
+	i40e_status status;
+	int i;
+
+	status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
+				      NULL);
+	if (status != I40E_SUCCESS)
+		return -1;
+
+	profile_list = (struct i40e_ddp_profile_list *)buff;
+	for (i = 0; i < profile_list->p_count; i++) {
+		if (i40e_ddp_profiles_eq(pinfo, &profile_list->p_info[i]))
+			return 1;
+	}
+	return 0;
+}
+
+/**
+ * i40e_ddp_profiles_overlap - checks if DDP profiles overlap.
+ * @new: new profile info
+ * @old: old profile info
+ *
+ * checks if DDP profiles overlap.
+ * Returns true if profiles are overlap.
+ **/
+static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new,
+				      struct i40e_profile_info *old)
+{
+	unsigned int group_id_old = (u8)((old->track_id & 0x00FF0000) >> 16);
+	unsigned int group_id_new = (u8)((new->track_id & 0x00FF0000) >> 16);
+
+	/* 0x00 group must be only the first */
+	if (group_id_new == 0)
+		return true;
+	/* 0xFF group is compatible with anything else */
+	if (group_id_new == 0xFF || group_id_old == 0xFF)
+		return false;
+	/* otherwise only profiles from the same group are compatible*/
+	return group_id_old != group_id_new;
+}
+
+/**
+ * i40e_ddp_does_profiles_ - checks if DDP overlaps with existing one.
+ * @hw: HW data structure
+ * @pinfo: DDP profile information structure
+ *
+ * checks if DDP profile overlaps with existing one.
+ * Returns >0 if the profile overlaps.
+ * Returns  0 if the profile is ok.
+ * Returns <0 if error.
+ **/
+static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
+				         struct i40e_profile_info *pinfo)
+{
+	struct i40e_ddp_profile_list *profile_list;
+	u8 buff[I40E_PROFILE_LIST_SIZE];
+	i40e_status status;
+	int i;
+
+	status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
+				      NULL);
+	if (status != I40E_SUCCESS)
+		return -EIO;
+
+	profile_list = (struct i40e_ddp_profile_list *)buff;
+	for (i = 0; i < profile_list->p_count; i++) {
+		if (i40e_ddp_profiles_overlap(pinfo,
+					      &profile_list->p_info[i]))
+			return 1;
+	}
+	return 0;
+}
+
+
+/**
+ * i40e_add_pinfo
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+static enum i40e_status_code
+i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+	       u8 *profile_info_sec, u32 track_id)
+{
+	struct i40e_profile_section_header *sec;
+	struct i40e_profile_info *pinfo;
+	i40e_status status;
+	u32 offset = 0, info = 0;
+
+	sec = (struct i40e_profile_section_header *)profile_info_sec;
+	sec->tbl_size = 1;
+	sec->data_end = sizeof(struct i40e_profile_section_header) +
+			sizeof(struct i40e_profile_info);
+	sec->section.type = SECTION_TYPE_INFO;
+	sec->section.offset = sizeof(struct i40e_profile_section_header);
+	sec->section.size = sizeof(struct i40e_profile_info);
+	pinfo = (struct i40e_profile_info *)(profile_info_sec +
+					     sec->section.offset);
+	pinfo->track_id = track_id;
+	pinfo->version = profile->version;
+	pinfo->op = I40E_DDP_ADD_TRACKID;
+
+	/* Clear reserved field */
+	memset(pinfo->reserved, 0, sizeof(pinfo->reserved));
+	i40e_memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE,
+		    I40E_NONDMA_TO_NONDMA);
+
+	status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+				   track_id, &offset, &info, NULL);
+	return status;
+}
+
+/**
+ * i40e_del_pinfo - delete DDP profile info from NIC
+ * @hw: HW data structure
+ * @profile: DDP profile segment to be deleted
+ * @profile_info_sec: DDP profile section header
+ * @track_id: track ID of the profile for deletion
+ *
+ * Removes DDP profile from the NIC.
+ **/
+static enum i40e_status_code
+i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+	       u8 *profile_info_sec, u32 track_id)
+{
+	struct i40e_profile_section_header *sec;
+	struct i40e_profile_info *pinfo;
+	i40e_status status;
+	u32 offset = 0, info = 0;
+
+	sec = (struct i40e_profile_section_header *)profile_info_sec;
+	sec->tbl_size = 1;
+	sec->data_end = sizeof(struct i40e_profile_section_header) +
+			sizeof(struct i40e_profile_info);
+	sec->section.type = SECTION_TYPE_INFO;
+	sec->section.offset = sizeof(struct i40e_profile_section_header);
+	sec->section.size = sizeof(struct i40e_profile_info);
+	pinfo = (struct i40e_profile_info *)(profile_info_sec +
+					     sec->section.offset);
+	pinfo->track_id = track_id;
+	pinfo->version = profile->version;
+	pinfo->op = I40E_DDP_REMOVE_TRACKID;
+
+	/* Clear reserved field */
+	memset(pinfo->reserved, 0, sizeof(pinfo->reserved));
+	memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+
+	status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+				   track_id, &offset, &info, NULL);
+	return status;
+}
+
+/**
+ * i40e_ddp_is_pkg_hdr_valid - performs basic pkg header integrity checks
+ * @netdev: net device structure (for logging purposes)
+ * @pkg_hdr: pointer to package header
+ * @size_huge: size of the whole DDP profile package in size_t
+ *
+ * Checks correctness of pkg header: Version, size too big/small, and
+ * all segment offsets alignment and boundaries. This function lets
+ * reject non DDP profile file to be loaded by administrator mistake.
+ **/
+static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev,
+				      struct i40e_package_header *pkg_hdr,
+				      size_t size_huge)
+{
+	u32 size = 0xFFFFFFFFU & size_huge;
+	u32 pkg_hdr_size;
+	u32 segment;
+
+	if (!pkg_hdr)
+		return false;
+
+	if (pkg_hdr->version.major > 0) {
+		struct i40e_ddp_version ver = pkg_hdr->version;
+		netdev_err(netdev, "Unsupported DDP profile version %u.%u.%u.%u",
+				   ver.major, ver.minor, ver.update, ver.draft);
+		return false;
+	}
+	if (size_huge > size) {
+		netdev_err(netdev, "Invalid DDP profile - size is bigger than 4G");
+		return false;
+	}
+	if (size < (sizeof(struct i40e_package_header) +
+		sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
+		netdev_err(netdev, "Invalid DDP profile - size is too small.");
+		return false;
+	}
+
+	pkg_hdr_size = sizeof(u32) * (pkg_hdr->segment_count + 2U);
+	if (size < pkg_hdr_size) {
+		netdev_err(netdev, "Invalid DDP profile - too many segments");
+		return false;
+	}
+	for (segment = 0; segment < pkg_hdr->segment_count; ++segment) {
+		u32 offset = pkg_hdr->segment_offset[segment];
+
+		if (0xFU & offset) {
+			netdev_err(netdev, "Invalid DDP profile %u segment alignment", segment);
+			return false;
+		}
+		if (pkg_hdr_size > offset || offset >= size) {
+			netdev_err(netdev, "Invalid DDP profile %u segment offset", segment);
+			return false;
+		}
+	}
+
+	return true;
+}
+
+/**
+ * i40e_ddp_load - performs DDP loading
+ * @netdev: net device structure
+ * @data: buffer containing recipe file
+ * @size: size of the buffer
+ * @is_add: true when loading profile, false when rolling back the previous one
+ *
+ * Checks correctness and loads DDP profile to the NIC. The function is
+ * also used for rolling back previously loaded profile.
+ **/
+int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
+		  bool is_add)
+{
+	u8 profile_info_sec[sizeof(struct i40e_profile_section_header) +
+			    sizeof(struct i40e_profile_info)];
+	struct i40e_metadata_segment *metadata_hdr;
+	struct i40e_profile_segment *profile_hdr;
+	struct i40e_profile_info pinfo;
+	struct i40e_package_header *pkg_hdr;
+	i40e_status status;
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	u32 track_id;
+	int istatus;
+
+	pkg_hdr = (struct i40e_package_header *)data;
+	if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size))
+		return -EINVAL;
+
+	if (size < (sizeof(struct i40e_package_header) +
+		    sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
+		netdev_err(netdev, "Invalid DDP recipe size.");
+		return -EINVAL;
+	}
+
+	/* Find beginning of segment data in buffer */
+	metadata_hdr = (struct i40e_metadata_segment *)
+		i40e_find_segment_in_package(SEGMENT_TYPE_METADATA, pkg_hdr);
+	if (!metadata_hdr) {
+		netdev_err(netdev, "Failed to find metadata segment in DDP recipe.");
+		return -EINVAL;
+	}
+
+	track_id = metadata_hdr->track_id;
+	profile_hdr = (struct i40e_profile_segment *)
+		i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
+	if (!profile_hdr) {
+		netdev_err(netdev, "Failed to find profile segment in DDP recipe.");
+		return -EINVAL;
+	}
+
+	pinfo.track_id = track_id;
+	pinfo.version = profile_hdr->version;
+	if (is_add)
+		pinfo.op = I40E_DDP_ADD_TRACKID;
+	else
+		pinfo.op = I40E_DDP_REMOVE_TRACKID;
+
+	memcpy(pinfo.name, profile_hdr->name, I40E_DDP_NAME_SIZE);
+
+	/* Check if profile data already exists*/
+	istatus = i40e_ddp_does_profile_exist(&pf->hw, &pinfo);
+	if (istatus < 0) {
+		netdev_err(netdev, "Failed to fetch loaded profiles.");
+		return istatus;
+	}
+	if (is_add) {
+		if (istatus > 0) {
+			netdev_err(netdev, "DDP profile already loaded.");
+			return -EINVAL;
+		}
+		istatus = i40e_ddp_does_profile_overlap(&pf->hw, &pinfo);
+		if (istatus < 0) {
+			netdev_err(netdev, "Failed to fetch loaded profiles.");
+			return istatus;
+		}
+		if (istatus > 0) {
+			netdev_err(netdev, "DDP profile overlaps with existing one.");
+			return -EINVAL;
+		}
+	} else {
+		if (istatus == 0) {
+			netdev_err(netdev,
+				   "DDP profile for deletion does not exist.");
+			return -EINVAL;
+		}
+	}
+
+	/* Load profile data */
+	if (is_add) {
+		status = i40e_write_profile(&pf->hw, profile_hdr, track_id);
+		if (status) {
+			if (status == I40E_ERR_DEVICE_NOT_SUPPORTED) {
+			    netdev_err(netdev, "Profile is not supported by the device.");
+			    return -EPERM;
+			}
+			netdev_err(netdev, "Failed to write DDP profile.");
+			return -EIO;
+		}
+	} else {
+		status = i40e_rollback_profile(&pf->hw, profile_hdr, track_id);
+		if (status) {
+			netdev_err(netdev, "Failed to remove DDP profile.");
+			return -EIO;
+		}
+	}
+
+	/* Add/remove profile to/from profile list in FW */
+	if (is_add) {
+		status = i40e_add_pinfo(&pf->hw, profile_hdr, profile_info_sec,
+					track_id);
+		if (status) {
+			netdev_err(netdev, "Failed to add DDP profile info.");
+			return -EIO;
+		}
+	} else {
+		status = i40e_del_pinfo(&pf->hw, profile_hdr, profile_info_sec,
+					track_id);
+		if (status) {
+			netdev_err(netdev, "Failed to restore DDP profile info.");
+			return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_ddp_restore - restore previously loaded profile and remove from list
+ * @pf: PF data struct
+ *
+ * Restores previously loaded profile stored on the list in driver memory.
+ * After rolling back removes entry from the list.
+ **/
+static int i40e_ddp_restore(struct i40e_pf *pf)
+{
+	struct i40e_ddp_old_profile_list *entry;
+	struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+	int status = 0;
+
+	if (!list_empty(&pf->ddp_old_prof)) {
+		entry = list_first_entry(&pf->ddp_old_prof,
+					 struct i40e_ddp_old_profile_list,
+					 list);
+		status = i40e_ddp_load(netdev, entry->old_ddp_buf,
+				       entry->old_ddp_size, false);
+		list_del(&entry->list);
+		kfree(entry);
+	}
+	return status;
+}
+
+#define I40E_DDP_PROFILE_NAME_MAX \
+	(sizeof(I40E_DDP_PROFILE_PATH) + ETHTOOL_FLASH_MAX_FILENAME)
+
+/**
+ * i40e_ddp_flash - callback function for ethtool flash feature
+ * @netdev: net device structure
+ * @flash: kernel flash structure
+ *
+ * Ethtool callback function used for loading and unloading DDP profiles.
+ **/
+int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash)
+{
+	const struct firmware *ddp_config;
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	int status = 0;
+
+	/* Check for valid region first */
+	if (flash->region != I40_DDP_FLASH_REGION) {
+		netdev_err(netdev, "Requested firmware region is not recognized by this driver.");
+		return -EINVAL;
+	}
+	if (pf->hw.bus.func != 0) {
+		netdev_err(netdev, "Any DDP operation is allowed only on Phy0 NIC interface");
+		return -EINVAL;
+	}
+
+	/* If the user supplied "-" instead of file name rollback previously
+	 * stored profile.
+	 */
+	if (strncmp(flash->data, "-", 2) != 0) {
+		struct i40e_ddp_old_profile_list *list_entry;
+		char profile_name[I40E_DDP_PROFILE_NAME_MAX];
+
+		profile_name[sizeof(profile_name)-1]=0;
+		strncpy(profile_name, I40E_DDP_PROFILE_PATH, sizeof(profile_name)-1);
+		strncat(profile_name, flash->data, ETHTOOL_FLASH_MAX_FILENAME);
+		/* Load DDP recipe. */
+		status = request_firmware(&ddp_config, profile_name,
+					  &netdev->dev);
+		if (status) {
+			netdev_err(netdev, "DDP recipe file request failed.");
+			return status;
+		}
+
+		status = i40e_ddp_load(netdev, ddp_config->data,
+				       ddp_config->size, true);
+
+		if (!status) {
+			list_entry = kzalloc(
+				sizeof(struct i40e_ddp_old_profile_list) +
+				ddp_config->size, GFP_KERNEL);
+			if (!list_entry) {
+				netdev_info(netdev, "Failed to allocate memory for previous DDP profile data.");
+				netdev_info(netdev, "New profile loaded but roll-back will be impossible.");
+			} else {
+				memcpy(list_entry->old_ddp_buf,
+				       ddp_config->data, ddp_config->size);
+				list_entry->old_ddp_size = ddp_config->size;
+				list_add(&list_entry->list, &pf->ddp_old_prof);
+			}
+		}
+
+		release_firmware(ddp_config);
+	} else {
+		if (!list_empty(&pf->ddp_old_prof)) {
+			status = i40e_ddp_restore(pf);
+		} else {
+			netdev_warn(netdev, "There is no DDP profile to restore.");
+			status = -ENOENT;
+		}
+	}
+	return status;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 8f326f87a..928ef5b27 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifdef CONFIG_DEBUG_FS
 
@@ -35,8 +12,8 @@ static struct dentry *i40e_dbg_root;
 
 /**
  * i40e_dbg_find_vsi - searches for the vsi with the given seid
- * @pf - the PF structure to search for the vsi
- * @seid - seid of the vsi it is searching for
+ * @pf: the PF structure to search for the vsi
+ * @seid: seid of the vsi it is searching for
  **/
 static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
 {
@@ -54,8 +31,8 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
 
 /**
  * i40e_dbg_find_veb - searches for the veb with the given seid
- * @pf - the PF structure to search for the veb
- * @seid - seid of the veb it is searching for
+ * @pf: the PF structure to search for the veb
+ * @seid: seid of the veb it is searching for
  **/
 static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
 {
@@ -88,7 +65,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
 {
 	struct i40e_pf *pf = filp->private_data;
 	int bytes_not_copied;
-	int buf_size = 256;
+	size_t buf_size = 256;
 	char *buf;
 	int len;
 
@@ -124,6 +101,44 @@ static char *i40e_filter_state_string[] = {
 	"REMOVE",
 };
 
+/**
+ * i40e_dbg_dump_vsi_filters - handles dump of mac/vlan filters for a VSI
+ * @pf: the i40e_pf created in command write
+ * @vsi: the vsi to dump
+ */
+static void i40e_dbg_dump_vsi_filters(struct i40e_pf *pf, struct i40e_vsi *vsi)
+{
+	struct i40e_mac_filter *f;
+	int bkt;
+
+	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+		dev_info(&pf->pdev->dev,
+			 "    mac_filter_hash: %pM vid=%d, state %s\n",
+			 f->macaddr, f->vlan,
+			 i40e_filter_state_string[f->state]);
+	}
+	dev_info(&pf->pdev->dev, "    active_filters %u, promisc_threshold %u, overflow promisc %s\n",
+		 vsi->active_filters, vsi->promisc_threshold,
+		 (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ?
+		  "ON" : "OFF"));
+}
+
+/**
+ * i40e_dbg_dump_all_vsi_filters - dump mac/vlan filters for all VSI on a PF
+ * @pf: the i40e_pf created in command write
+ */
+static void i40e_dbg_dump_all_vsi_filters(struct i40e_pf *pf)
+{
+	int i;
+
+	for (i = 0; i < pf->num_alloc_vsi; i++)
+		if (pf->vsi[i]) {
+			dev_info(&pf->pdev->dev, "vsi seid %d\n",
+				 pf->vsi[i]->seid);
+			i40e_dbg_dump_vsi_filters(pf, pf->vsi[i]);
+		}
+}
+
 /**
  * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
  * @pf: the i40e_pf created in command write
@@ -131,10 +146,13 @@ static char *i40e_filter_state_string[] = {
  **/
 static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
 {
+#ifdef HAVE_NDO_GET_STATS64
 	struct rtnl_link_stats64 *nstat;
-	struct i40e_mac_filter *f;
+#else
+	struct net_device_stats *nstat;
+#endif
 	struct i40e_vsi *vsi;
-	int i, bkt;
+	int i;
 
 	vsi = i40e_dbg_find_vsi(pf, seid);
 	if (!vsi) {
@@ -145,20 +163,38 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
 	dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
 	if (vsi->netdev) {
 		struct net_device *nd = vsi->netdev;
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+		u32 hw_features;
+#endif
 
 		dev_info(&pf->pdev->dev, "    netdev: name = %s, state = %lu, flags = 0x%08x\n",
 			 nd->name, nd->state, nd->flags);
 		dev_info(&pf->pdev->dev, "        features      = 0x%08lx\n",
 			 (unsigned long int)nd->features);
+#ifdef HAVE_NDO_SET_FEATURES
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+		hw_features = get_netdev_hw_features(vsi->netdev);
+		dev_info(&pf->pdev->dev, "        hw_features   = 0x%08x\n",
+			 hw_features);
+#else
 		dev_info(&pf->pdev->dev, "        hw_features   = 0x%08lx\n",
 			 (unsigned long int)nd->hw_features);
+#endif
+#endif
+#ifdef HAVE_NETDEV_VLAN_FEATURES
 		dev_info(&pf->pdev->dev, "        vlan_features = 0x%08lx\n",
 			 (unsigned long int)nd->vlan_features);
+#endif
 	}
+#ifdef HAVE_VLAN_RX_REGISTER
+	dev_info(&pf->pdev->dev, "    vlgrp is %s\n",
+		 vsi->vlgrp ? "<valid>" : "<null>");
+#else
+	dev_info(&pf->pdev->dev, "    active_vlans is %s\n",
+		 vsi->active_vlans ? "<valid>" : "<null>");
+#endif /* HAVE_VLAN_RX_REGISTER */
 	dev_info(&pf->pdev->dev,
-		 "    vlgrp: & = %p\n", vsi->active_vlans);
-	dev_info(&pf->pdev->dev,
-		 "    flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
+		 "    flags = 0x%016llx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
 		 vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
 	for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++)
 		dev_info(&pf->pdev->dev,
@@ -169,16 +205,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
 			 pf->hw.mac.addr,
 			 pf->hw.mac.san_addr,
 			 pf->hw.mac.port_addr);
-	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
-		dev_info(&pf->pdev->dev,
-			 "    mac_filter_hash: %pM vid=%d, state %s\n",
-			 f->macaddr, f->vlan,
-			 i40e_filter_state_string[f->state]);
-	}
-	dev_info(&pf->pdev->dev, "    active_filters %u, promisc_threshold %u, overflow promisc %s\n",
-		 vsi->active_filters, vsi->promisc_threshold,
-		 (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ?
-		  "ON" : "OFF"));
+	i40e_dbg_dump_vsi_filters(pf, vsi);
 	nstat = i40e_get_vsi_stats_struct(vsi);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
@@ -264,22 +291,14 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
 		 vsi->rx_buf_failed, vsi->rx_page_failed);
 	rcu_read_lock();
 	for (i = 0; i < vsi->num_queue_pairs; i++) {
-		struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+		struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
 
 		if (!rx_ring)
 			continue;
 
 		dev_info(&pf->pdev->dev,
-			 "    rx_rings[%i]: desc = %p\n",
-			 i, rx_ring->desc);
-		dev_info(&pf->pdev->dev,
-			 "    rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
-			 i, rx_ring->dev,
-			 rx_ring->netdev,
-			 rx_ring->rx_bi);
-		dev_info(&pf->pdev->dev,
-			 "    rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
-			 i, rx_ring->state,
+			 "    rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
+			 i, *rx_ring->state,
 			 rx_ring->queue_index,
 			 rx_ring->reg_idx);
 		dev_info(&pf->pdev->dev,
@@ -307,35 +326,23 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
 			 rx_ring->rx_stats.realloc_count,
 			 rx_ring->rx_stats.page_reuse_count);
 		dev_info(&pf->pdev->dev,
-			 "    rx_rings[%i]: size = %i, dma = 0x%08lx\n",
-			 i, rx_ring->size,
-			 (unsigned long int)rx_ring->dma);
-		dev_info(&pf->pdev->dev,
-			 "    rx_rings[%i]: vsi = %p, q_vector = %p\n",
-			 i, rx_ring->vsi,
-			 rx_ring->q_vector);
+			 "    rx_rings[%i]: size = %i\n",
+			 i, rx_ring->size);
 		dev_info(&pf->pdev->dev,
 			 "    rx_rings[%i]: rx_itr_setting = %d (%s)\n",
-			 i, rx_ring->rx_itr_setting,
-			 ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed");
+			 i, rx_ring->itr_setting,
+			 ITR_IS_DYNAMIC(rx_ring->itr_setting) ?
+				"dynamic" : "fixed");
 	}
 	for (i = 0; i < vsi->num_queue_pairs; i++) {
-		struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+		struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
 
 		if (!tx_ring)
 			continue;
 
 		dev_info(&pf->pdev->dev,
-			 "    tx_rings[%i]: desc = %p\n",
-			 i, tx_ring->desc);
-		dev_info(&pf->pdev->dev,
-			 "    tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
-			 i, tx_ring->dev,
-			 tx_ring->netdev,
-			 tx_ring->tx_bi);
-		dev_info(&pf->pdev->dev,
-			 "    tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
-			 i, tx_ring->state,
+			 "    tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
+			 i, *tx_ring->state,
 			 tx_ring->queue_index,
 			 tx_ring->reg_idx);
 		dev_info(&pf->pdev->dev,
@@ -355,20 +362,16 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
 			 tx_ring->tx_stats.tx_busy,
 			 tx_ring->tx_stats.tx_done_old);
 		dev_info(&pf->pdev->dev,
-			 "    tx_rings[%i]: size = %i, dma = 0x%08lx\n",
-			 i, tx_ring->size,
-			 (unsigned long int)tx_ring->dma);
-		dev_info(&pf->pdev->dev,
-			 "    tx_rings[%i]: vsi = %p, q_vector = %p\n",
-			 i, tx_ring->vsi,
-			 tx_ring->q_vector);
+			 "    tx_rings[%i]: size = %i\n",
+			 i, tx_ring->size);
 		dev_info(&pf->pdev->dev,
 			 "    tx_rings[%i]: DCB tc = %d\n",
 			 i, tx_ring->dcb_tc);
 		dev_info(&pf->pdev->dev,
 			 "    tx_rings[%i]: tx_itr_setting = %d (%s)\n",
-			 i, tx_ring->tx_itr_setting,
-			 ITR_IS_DYNAMIC(tx_ring->tx_itr_setting) ? "dynamic" : "fixed");
+			 i, tx_ring->itr_setting,
+			 ITR_IS_DYNAMIC(tx_ring->itr_setting) ?
+				"dynamic" : "fixed");
 	}
 	rcu_read_unlock();
 	dev_info(&pf->pdev->dev,
@@ -384,8 +387,9 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
 		 "    seid = %d, id = %d, uplink_seid = %d\n",
 		 vsi->seid, vsi->id, vsi->uplink_seid);
 	dev_info(&pf->pdev->dev,
-		 "    base_queue = %d, num_queue_pairs = %d, num_desc = %d\n",
-		 vsi->base_queue, vsi->num_queue_pairs, vsi->num_desc);
+		 "    base_queue = %d, num_queue_pairs = %d, num_tx_desc = %d, num_rx_desc = %d\n",
+		 vsi->base_queue, vsi->num_queue_pairs, vsi->num_tx_desc,
+		 vsi->num_rx_desc);
 	dev_info(&pf->pdev->dev, "    type = %i\n", vsi->type);
 	if (vsi->type == I40E_VSI_SRIOV)
 		dev_info(&pf->pdev->dev, "    VF ID = %i\n", vsi->vf_id);
@@ -466,8 +470,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
 		 vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
 		 vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
 		 vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
-	if (vsi->back)
-		dev_info(&pf->pdev->dev, "    PF = %p\n", vsi->back);
 	dev_info(&pf->pdev->dev, "    idx = %d\n", vsi->idx);
 	dev_info(&pf->pdev->dev,
 		 "    tc_config: numtc = %d, enabled_tc = 0x%x\n",
@@ -534,6 +536,15 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
 	}
 }
 
+/* Helper macros for printing upper half of the 32byte descriptor. */
+#ifdef I40E_32BYTE_RX
+#define RXD_RSVD1(_rxd) ((_rxd)->read.rsvd1)
+#define RXD_RSVD2(_rxd) ((_rxd)->read.rsvd2)
+#else
+#define RXD_RSVD1(_rxd) 0ULL
+#define RXD_RSVD2(_rxd) 0ULL
+#endif
+
 /**
  * i40e_dbg_dump_desc - handles dump desc write into command datum
  * @cnt: number of arguments that the user supplied
@@ -548,7 +559,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
 {
 	struct i40e_tx_desc *txd;
 	union i40e_rx_desc *rxd;
-	struct i40e_ring *ring;
+	struct i40e_ring ring;
 	struct i40e_vsi *vsi;
 	int i;
 
@@ -567,58 +578,59 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
 			 vsi_seid);
 		return;
 	}
-
-	ring = kmemdup(is_rx_ring
-		       ? vsi->rx_rings[ring_id] : vsi->tx_rings[ring_id],
-		       sizeof(*ring), GFP_KERNEL);
-	if (!ring)
-		return;
-
+	if (is_rx_ring)
+		ring = *vsi->rx_rings[ring_id];
+	else
+		ring = *vsi->tx_rings[ring_id];
 	if (cnt == 2) {
+		void *head = (struct i40e_tx_desc *)ring.desc + ring.count;
+		u32 tx_head = le32_to_cpu(*(volatile __le32 *)head);
+
 		dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
 			 vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
-		for (i = 0; i < ring->count; i++) {
+		dev_info(&pf->pdev->dev, "head = %04x tail = %04x\n",
+			 is_rx_ring ? 0 : tx_head, readl(ring.tail));
+		dev_info(&pf->pdev->dev, "ntc = %04x ntu = %04x\n",
+			 ring.next_to_clean, ring.next_to_use);
+		for (i = 0; i < ring.count; i++) {
 			if (!is_rx_ring) {
-				txd = I40E_TX_DESC(ring, i);
+				txd = I40E_TX_DESC(&ring, i);
 				dev_info(&pf->pdev->dev,
 					 "   d[%03x] = 0x%016llx 0x%016llx\n",
 					 i, txd->buffer_addr,
 					 txd->cmd_type_offset_bsz);
 			} else {
-				rxd = I40E_RX_DESC(ring, i);
+				rxd = I40E_RX_DESC(&ring, i);
 				dev_info(&pf->pdev->dev,
 					 "   d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
 					 i, rxd->read.pkt_addr,
 					 rxd->read.hdr_addr,
-					 rxd->read.rsvd1, rxd->read.rsvd2);
+					 RXD_RSVD1(rxd), RXD_RSVD2(rxd));
 			}
 		}
 	} else if (cnt == 3) {
-		if (desc_n >= ring->count || desc_n < 0) {
+		if (desc_n >= ring.count || desc_n < 0) {
 			dev_info(&pf->pdev->dev,
 				 "descriptor %d not found\n", desc_n);
-			goto out;
+			return;
 		}
 		if (!is_rx_ring) {
-			txd = I40E_TX_DESC(ring, desc_n);
+			txd = I40E_TX_DESC(&ring, desc_n);
 			dev_info(&pf->pdev->dev,
 				 "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
 				 vsi_seid, ring_id, desc_n,
 				 txd->buffer_addr, txd->cmd_type_offset_bsz);
 		} else {
-			rxd = I40E_RX_DESC(ring, desc_n);
+			rxd = I40E_RX_DESC(&ring, desc_n);
 			dev_info(&pf->pdev->dev,
 				 "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
 				 vsi_seid, ring_id, desc_n,
 				 rxd->read.pkt_addr, rxd->read.hdr_addr,
-				 rxd->read.rsvd1, rxd->read.rsvd2);
+				 RXD_RSVD1(rxd), RXD_RSVD2(rxd));
 		}
 	} else {
 		dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
 	}
-
-out:
-	kfree(ring);
 }
 
 /**
@@ -636,7 +648,161 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
 }
 
 /**
- * i40e_dbg_dump_stats - handles dump stats write into command datum
+ * i40e_dbg_dump_resources - handles dump resources request
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_resources(struct i40e_pf *pf)
+{
+	struct i40e_aqc_switch_resource_alloc_element_resp *buf;
+	int buf_len;
+	u16 count = 32;
+	u8 num_entries;
+	int ret, i;
+
+	buf_len = count * sizeof(*buf);
+	buf = kzalloc(buf_len, GFP_KERNEL);
+	if (!buf) {
+		dev_err(&pf->pdev->dev, "Can't get memory\n");
+		return;
+	}
+
+	ret = i40e_aq_get_switch_resource_alloc(&pf->hw, &num_entries,
+						buf, count, NULL);
+	if (ret) {
+		dev_err(&pf->pdev->dev,
+			"fail to get resources, err %s aq_err %s\n",
+			i40e_stat_str(&pf->hw, ret),
+			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+		kfree(buf);
+		return;
+	}
+
+	dev_info(&pf->pdev->dev, "  resources:\n");
+	dev_info(&pf->pdev->dev, "  guar  total  used unalloc   name\n");
+	for (i = 0; i < num_entries; i++) {
+		char *p;
+
+		switch (buf[i].resource_type) {
+		case I40E_AQ_RESOURCE_TYPE_VEB:
+			p = "vebs";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_VSI:
+			p = "vsis";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_MACADDR:
+			p = "macaddrs";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_STAG:
+			p = "stags";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_ETAG:
+			p = "etags";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH:
+			p = "multicast hash";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_UNICAST_HASH:
+			p = "unicast hash";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_VLAN:
+			p = "vlans";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY:
+			p = "vsi list entries";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY:
+			p = "etag list entries";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL:
+			p = "vlan stat pools";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_MIRROR_RULE:
+			p = "mirror rules";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_QUEUE_SETS:
+			p = "queue sets";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS:
+			p = "vlan filters";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS:
+			p = "inner mac filters";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_IP_FILTERS:
+			p = "ip filters";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS:
+			p = "gre vn keys";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_VN2_KEYS:
+			p = "vn2 keys";
+			break;
+		case I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS:
+			p = "tunnel ports";
+			break;
+		default:
+			p = "unknown";
+			break;
+		}
+
+		dev_info(&pf->pdev->dev, "  %4d   %4d  %4d  %4d   %s\n",
+			 buf[i].guaranteed, buf[i].total, buf[i].used,
+			 buf[i].total_unalloced, p);
+	}
+
+	kfree(buf);
+}
+
+/**
+ * i40e_dbg_dump_capabilities - handles dump capabilities request
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_capabilities(struct i40e_pf *pf)
+{
+	struct i40e_hw_capabilities *p;
+
+	p = (struct i40e_hw_capabilities *)&pf->hw.func_caps;
+	dev_info(&pf->pdev->dev, "  capabilities:\n");
+	dev_info(&pf->pdev->dev,
+		 "    switch_mode = %d\tmgmt_mode = %d\tnpar = %d\tos2bmc = %d\n",
+		 p->switch_mode, p->management_mode, p->npar_enable, p->os2bmc);
+	dev_info(&pf->pdev->dev,
+		 "    valid_functions = 0x%04x\tsr_iov_1_1 = %d\tnum_vfs = %d\tvf_base_id = %d\n",
+		 p->valid_functions, p->sr_iov_1_1, p->num_vfs, p->vf_base_id);
+	dev_info(&pf->pdev->dev, "    nvm_image_type = %d\n", p->nvm_image_type);
+	dev_info(&pf->pdev->dev,
+		 "    num_vsis = %d\tvmdq = %d\tflex10_enable = %d\tflex10_capable = %d\n",
+		 p->num_vsis, p->vmdq, p->flex10_enable, p->flex10_capable);
+	dev_info(&pf->pdev->dev,
+		 "    evb_802_1_qbg = %d\tevb_802_1_qbh = %d\tmgmt_cem = %d\tieee_1588 = %d\n",
+		 p->evb_802_1_qbg, p->evb_802_1_qbh, p->mgmt_cem, p->ieee_1588);
+	dev_info(&pf->pdev->dev,
+		 "    fcoe = %d\tiwarp = %d\tmdio_port_num = %d\tmdio_port_mode = %d\n",
+		 p->fcoe, p->iwarp, p->mdio_port_num, p->mdio_port_mode);
+	dev_info(&pf->pdev->dev,
+		 "    dcb = %d\tenabled_tcmap = %d\tmaxtc = %d\tiscsi = %d\n",
+		 p->dcb, p->enabled_tcmap, p->maxtc, p->iscsi);
+	dev_info(&pf->pdev->dev,
+		 "    fd = %d\tfd_filters_guaranteed = %d\tfd_filters_best_effort = %d\tnum_flow_director_filters = %d\n",
+		 p->fd, p->fd_filters_guaranteed, p->fd_filters_best_effort,
+		 p->num_flow_director_filters);
+	dev_info(&pf->pdev->dev,
+		 "    rss = %d\trss_table_size = %d\trss_table_entry_width = %d\n",
+		 p->rss, p->rss_table_size, p->rss_table_entry_width);
+	dev_info(&pf->pdev->dev,
+		 "    led[0] = %d\tsdp[0] = %d\tled_pin_num = %d\tsdp_pin_num = %d\n",
+		 p->led[0], p->sdp[0], p->led_pin_num, p->sdp_pin_num);
+	dev_info(&pf->pdev->dev,
+		 "    num_rx_qp = %d\tnum_tx_qp = %d\tbase_queue = %d\n",
+		 p->num_rx_qp, p->num_tx_qp, p->base_queue);
+	dev_info(&pf->pdev->dev,
+		 "    num_msix_vectors = %d\tnum_msix_vectors_vf = %d\trx_buf_chain_len = %d\n",
+		 p->num_msix_vectors, p->num_msix_vectors_vf,
+		 p->rx_buf_chain_len);
+}
+
+/**
+ * i40e_dbg_dump_eth_stats - handles dump stats write into command datum
  * @pf: the i40e_pf created in command write
  * @estats: the eth stats structure to be dumped
  **/
@@ -669,17 +835,35 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
 static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
 {
 	struct i40e_veb *veb;
+	int i;
 
 	veb = i40e_dbg_find_veb(pf, seid);
 	if (!veb) {
 		dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
 		return;
 	}
+#ifdef HAVE_BRIDGE_ATTRIBS
 	dev_info(&pf->pdev->dev,
 		 "veb idx=%d,%d stats_ic=%d  seid=%d uplink=%d mode=%s\n",
 		 veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
 		 veb->uplink_seid,
 		 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+#else
+	dev_info(&pf->pdev->dev,
+		 "veb idx=%d,%d stats_ic=%d  seid=%d uplink=%d mode=%s\n",
+		 veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
+		 veb->uplink_seid,
+		"VEB");
+#endif
+	dev_info(&pf->pdev->dev,
+		 "veb bw: enabled_tc=0x%x bw_limit=%d bw_max_quanta=%d is_abs_credits=%d\n",
+		 veb->enabled_tc, veb->bw_limit, veb->bw_max_quanta,
+		 veb->is_abs_credits);
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		dev_info(&pf->pdev->dev, "veb bw: tc=%d bw_share=%d bw_limit=%d max_quanta=%d\n",
+			 i, veb->bw_tc_share_credits[i],
+			 veb->bw_tc_limit_credits[i], veb->bw_tc_max_quanta[i]);
+	}
 	i40e_dbg_dump_eth_stats(pf, &veb->stats);
 }
 
@@ -740,6 +924,100 @@ static void i40e_dbg_dump_vf_all(struct i40e_pf *pf)
 			i40e_dbg_dump_vf(pf, i);
 }
 
+/**
+ * i40e_dbg_dump_dcb_cfg - Dump DCB config data struct
+ * @pf: the corresponding PF
+ * @cfg: DCB Config data structure
+ * @prefix: Prefix string
+ **/
+static void i40e_dbg_dump_dcb_cfg(struct i40e_pf *pf,
+				  struct i40e_dcbx_config *cfg,
+				  char *prefix)
+{
+	int i;
+
+	dev_info(&pf->pdev->dev,
+		 "%s ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
+		 prefix, cfg->etscfg.willing, cfg->etscfg.cbs,
+		 cfg->etscfg.maxtcs);
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		dev_info(&pf->pdev->dev, "%s ets_cfg: up=%d tc=%d\n",
+			 prefix, i, cfg->etscfg.prioritytable[i]);
+	}
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		dev_info(&pf->pdev->dev, "%s ets_cfg: tc=%d tcbw=%d tctsa=%d\n",
+			 prefix, i, cfg->etscfg.tcbwtable[i],
+			 cfg->etscfg.tsatable[i]);
+	}
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		dev_info(&pf->pdev->dev, "%s ets_rec: up=%d tc=%d\n",
+			 prefix, i, cfg->etsrec.prioritytable[i]);
+	}
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		dev_info(&pf->pdev->dev, "%s ets_rec: tc=%d tcbw=%d tctsa=%d\n",
+			 prefix, i, cfg->etsrec.tcbwtable[i],
+			 cfg->etsrec.tsatable[i]);
+	}
+	dev_info(&pf->pdev->dev,
+		 "%s pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
+		 prefix, cfg->pfc.willing, cfg->pfc.mbc,
+		 cfg->pfc.pfccap, cfg->pfc.pfcenable);
+
+	dev_info(&pf->pdev->dev,
+		 "%s app_table: num_apps=%d\n", prefix, (int)cfg->numapps);
+	for (i = 0; i < (int)cfg->numapps; i++) {
+		dev_info(&pf->pdev->dev, "%s app_table: %d prio=%d selector=%d protocol=0x%x\n",
+			 prefix, i, cfg->app[i].priority,
+			 cfg->app[i].selector,
+			 cfg->app[i].protocolid);
+	}
+}
+
+/**
+ * i40e_dbg_dump_fdir_filter - Dump out flow director filter contents
+ * @pf: the corresponding PF
+ * @f: the flow director filter
+ **/
+static inline void i40e_dbg_dump_fdir_filter(struct i40e_pf *pf,
+					     struct i40e_fdir_filter *f)
+{
+	dev_info(&pf->pdev->dev, "fdir filter %d:\n", f->fd_id);
+	dev_info(&pf->pdev->dev, "    flow_type=%d ip4_proto=%d\n",
+		 f->flow_type, f->ip4_proto);
+	dev_info(&pf->pdev->dev, "    dst_ip= %pi4  dst_port=%d\n",
+		 &f->dst_ip, f->dst_port);
+	dev_info(&pf->pdev->dev, "    src_ip= %pi4  src_port=%d\n",
+		 &f->src_ip, f->src_port);
+	dev_info(&pf->pdev->dev, "    sctp_v_tag=%d q_index=%d\n",
+		 f->sctp_v_tag, f->q_index);
+	if (f->flex_filter)
+		dev_info(&pf->pdev->dev, "    flex_word=%04x flex_offset=%d\n",
+			 f->flex_word, f->flex_offset);
+	dev_info(&pf->pdev->dev, "    pctype=%d dest_vsi=%d dest_ctl=%d\n",
+		 f->pctype, f->dest_vsi, f->dest_ctl);
+	dev_info(&pf->pdev->dev, "    fd_status=%d cnt_index=%d\n",
+		 f->fd_status, f->cnt_index);
+}
+
+/**
+ * i40e_dbg_dump_cloud_filter - Dump out cloud filter contents
+ * @pf: the corresponding PF
+ * @f: the flow director filter
+ **/
+static inline void i40e_dbg_dump_cloud_filter(struct i40e_pf *pf,
+					      struct i40e_cloud_filter *f)
+{
+	dev_info(&pf->pdev->dev, "cloud filter %d:\n", f->id);
+	dev_info(&pf->pdev->dev, "    outer_mac[]=%pM  inner_mac=%pM\n",
+		 f->outer_mac, f->inner_mac);
+	dev_info(&pf->pdev->dev, "    inner_vlan %d, inner_ip[0] %pi4\n",
+		 be16_to_cpu(f->inner_vlan), f->inner_ip);
+	dev_info(&pf->pdev->dev, "    tenant_id=%d flags=0x%02x, tunnel_type=0x%02x\n",
+		 f->tenant_id, f->flags, f->tunnel_type);
+	dev_info(&pf->pdev->dev, "    seid=%d queue_id=%d\n",
+		 f->seid, f->queue_id);
+}
+
 #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
 /**
  * i40e_dbg_command_write - write into command datum
@@ -781,7 +1059,48 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 		count = cmd_buf_tmp - cmd_buf + 1;
 	}
 
-	if (strncmp(cmd_buf, "add vsi", 7) == 0) {
+	if (strncmp(cmd_buf, "read", 4) == 0) {
+		u32 address;
+		u32 value;
+
+		cnt = sscanf(&cmd_buf[4], "%i", &address);
+		if (cnt != 1) {
+			dev_info(&pf->pdev->dev, "read <reg>\n");
+			goto command_write_done;
+		}
+
+		/* check the range on address */
+		if (address > (pf->ioremap_len - sizeof(u32))) {
+			dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
+				 address, (pf->ioremap_len - sizeof(u32)));
+			goto command_write_done;
+		}
+
+		value = rd32(&pf->hw, address);
+		dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
+			 address, value);
+
+	} else if (strncmp(cmd_buf, "write", 5) == 0) {
+		u32 address, value;
+
+		cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
+		if (cnt != 2) {
+			dev_info(&pf->pdev->dev, "write <reg> <value>\n");
+			goto command_write_done;
+		}
+
+		/* check the range on address */
+		if (address > (pf->ioremap_len - sizeof(u32))) {
+			dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
+				 address, (pf->ioremap_len - sizeof(u32)));
+			goto command_write_done;
+		}
+		wr32(&pf->hw, address, value);
+		value = rd32(&pf->hw, address);
+		dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
+			 address, value);
+
+	} else if (strncmp(cmd_buf, "add vsi", 7) == 0) {
 		vsi_seid = -1;
 		cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
 		if (cnt == 0) {
@@ -798,8 +1117,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 		 */
 		if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
 			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
-			i40e_do_reset_safe(pf,
-					   BIT_ULL(__I40E_PF_RESET_REQUESTED));
+			i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
 		}
 
 		vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
@@ -871,6 +1189,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 
 	} else if (strncmp(cmd_buf, "del relay", 9) == 0) {
 		int i;
+
 		cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
 		if (cnt != 1) {
 			dev_info(&pf->pdev->dev,
@@ -898,9 +1217,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 	} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
 		i40e_status ret;
 		u16 vid;
-		unsigned int v;
+		int v;
 
-		cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
+		cnt = sscanf(&cmd_buf[8], "%i %d", &vsi_seid, &v);
 		if (cnt != 2) {
 			dev_info(&pf->pdev->dev,
 				 "add pvid: bad command string, cnt=%d\n", cnt);
@@ -914,7 +1233,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 			goto command_write_done;
 		}
 
-		vid = v;
+		vid = (unsigned)v;
 		ret = i40e_vsi_add_pvid(vsi, vid);
 		if (!ret)
 			dev_info(&pf->pdev->dev,
@@ -949,6 +1268,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 	} else if (strncmp(cmd_buf, "dump", 4) == 0) {
 		if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
 			i40e_fetch_switch_configuration(pf, true);
+		} else if (strncmp(&cmd_buf[5], "resources", 9) == 0) {
+			i40e_dbg_dump_resources(pf);
+		} else if (strncmp(&cmd_buf[5], "capabilities", 7) == 0) {
+			i40e_dbg_dump_capabilities(pf);
 		} else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
 			cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
 			if (cnt > 0)
@@ -969,6 +1292,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 				i40e_dbg_dump_vf_all(pf);
 		} else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
 			int ring_id, desc_n;
+
 			if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
 				cnt = sscanf(&cmd_buf[12], "%i %i %i",
 					     &vsi_seid, &ring_id, &desc_n);
@@ -1007,6 +1331,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 						&pf->hw.local_dcbx_config;
 			struct i40e_dcbx_config *r_cfg =
 						&pf->hw.remote_dcbx_config;
+			struct i40e_dcbx_config *d_cfg =
+						&pf->hw.desired_dcbx_config;
 			int i, ret;
 			u16 switch_id;
 
@@ -1049,68 +1375,18 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 			kfree(bw_data);
 			bw_data = NULL;
 
-			dev_info(&pf->pdev->dev,
-				 "port dcbx_mode=%d\n", cfg->dcbx_mode);
-			dev_info(&pf->pdev->dev,
-				 "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
-				 cfg->etscfg.willing, cfg->etscfg.cbs,
-				 cfg->etscfg.maxtcs);
-			for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-				dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
-					 i, cfg->etscfg.prioritytable[i],
-					 cfg->etscfg.tcbwtable[i],
-					 cfg->etscfg.tsatable[i]);
-			}
-			for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-				dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
-					 i, cfg->etsrec.prioritytable[i],
-					 cfg->etsrec.tcbwtable[i],
-					 cfg->etsrec.tsatable[i]);
-			}
-			dev_info(&pf->pdev->dev,
-				 "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
-				 cfg->pfc.willing, cfg->pfc.mbc,
-				 cfg->pfc.pfccap, cfg->pfc.pfcenable);
-			dev_info(&pf->pdev->dev,
-				 "port app_table: num_apps=%d\n", cfg->numapps);
-			for (i = 0; i < cfg->numapps; i++) {
-				dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n",
-					 i, cfg->app[i].priority,
-					 cfg->app[i].selector,
-					 cfg->app[i].protocolid);
-			}
-			/* Peer TLV DCBX data */
-			dev_info(&pf->pdev->dev,
-				 "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
-				 r_cfg->etscfg.willing,
-				 r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs);
-			for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-				dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
-					 i, r_cfg->etscfg.prioritytable[i],
-					 r_cfg->etscfg.tcbwtable[i],
-					 r_cfg->etscfg.tsatable[i]);
-			}
-			for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-				dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
-					 i, r_cfg->etsrec.prioritytable[i],
-					 r_cfg->etsrec.tcbwtable[i],
-					 r_cfg->etsrec.tsatable[i]);
-			}
-			dev_info(&pf->pdev->dev,
-				 "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
-				 r_cfg->pfc.willing,
-				 r_cfg->pfc.mbc,
-				 r_cfg->pfc.pfccap,
-				 r_cfg->pfc.pfcenable);
-			dev_info(&pf->pdev->dev,
-				 "remote port app_table: num_apps=%d\n",
-				 r_cfg->numapps);
-			for (i = 0; i < r_cfg->numapps; i++) {
-				dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n",
-					 i, r_cfg->app[i].priority,
-					 r_cfg->app[i].selector,
-					 r_cfg->app[i].protocolid);
+			if (cfg->dcbx_mode == I40E_DCBX_MODE_CEE) {
+				dev_info(&pf->pdev->dev,
+					 "CEE DCBX mode with Oper TLV Status = 0x%x\n",
+					 cfg->tlv_status);
+				i40e_dbg_dump_dcb_cfg(pf, d_cfg, "DesiredCfg");
+			} else {
+				dev_info(&pf->pdev->dev, "IEEE DCBX mode\n");
 			}
+
+			i40e_dbg_dump_dcb_cfg(pf, cfg, "OperCfg");
+			i40e_dbg_dump_dcb_cfg(pf, r_cfg, "PeerCfg");
+
 		} else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) {
 			int cluster_id, table_id;
 			int index, ret;
@@ -1155,73 +1431,65 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 				       buff, rlen, true);
 			kfree(buff);
 			buff = NULL;
+		} else if (strncmp(&cmd_buf[5], "filters", 7) == 0) {
+			struct i40e_fdir_filter *f_rule;
+			struct i40e_cloud_filter *c_rule;
+			struct hlist_node *node2;
+
+			hlist_for_each_entry_safe(f_rule, node2,
+						  &pf->fdir_filter_list,
+						  fdir_node) {
+				i40e_dbg_dump_fdir_filter(pf, f_rule);
+			}
+
+			/* find the cloud filter rule ids */
+			hlist_for_each_entry_safe(c_rule, node2,
+						  &pf->cloud_filter_list,
+						  cloud_node) {
+				i40e_dbg_dump_cloud_filter(pf, c_rule);
+			}
+			i40e_dbg_dump_all_vsi_filters(pf);
 		} else {
 			dev_info(&pf->pdev->dev,
 				 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
 			dev_info(&pf->pdev->dev, "dump switch\n");
 			dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
+			dev_info(&pf->pdev->dev, "dump capabilities\n");
+			dev_info(&pf->pdev->dev, "dump resources\n");
 			dev_info(&pf->pdev->dev, "dump reset stats\n");
 			dev_info(&pf->pdev->dev, "dump port\n");
-			dev_info(&pf->pdev->dev, "dump vf [vf_id]\n");
+			dev_info(&pf->pdev->dev, "dump VF [vf_id]\n");
 			dev_info(&pf->pdev->dev,
 				 "dump debug fwdata <cluster_id> <table_id> <index>\n");
+			dev_info(&pf->pdev->dev, "dump filters\n");
 		}
-	} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
-		dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
-		i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
-
-	} else if (strncmp(cmd_buf, "corer", 5) == 0) {
-		dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
-		i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
 
-	} else if (strncmp(cmd_buf, "globr", 5) == 0) {
-		dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
-		i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
+	} else if (strncmp(cmd_buf, "msg_enable", 10) == 0) {
+		u32 level;
 
-	} else if (strncmp(cmd_buf, "empr", 4) == 0) {
-		dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
-		i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED));
-
-	} else if (strncmp(cmd_buf, "read", 4) == 0) {
-		u32 address;
-		u32 value;
-
-		cnt = sscanf(&cmd_buf[4], "%i", &address);
-		if (cnt != 1) {
-			dev_info(&pf->pdev->dev, "read <reg>\n");
-			goto command_write_done;
-		}
-
-		/* check the range on address */
-		if (address > (pf->ioremap_len - sizeof(u32))) {
-			dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
-				 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
-			goto command_write_done;
-		}
-
-		value = rd32(&pf->hw, address);
-		dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
-			 address, value);
-
-	} else if (strncmp(cmd_buf, "write", 5) == 0) {
-		u32 address, value;
-
-		cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
-		if (cnt != 2) {
-			dev_info(&pf->pdev->dev, "write <reg> <value>\n");
-			goto command_write_done;
-		}
-
-		/* check the range on address */
-		if (address > (pf->ioremap_len - sizeof(u32))) {
-			dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
-				 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
-			goto command_write_done;
+		cnt = sscanf(&cmd_buf[10], "%i", &level);
+		if (cnt) {
+			if (I40E_DEBUG_USER & level) {
+				pf->hw.debug_mask = level;
+				dev_info(&pf->pdev->dev,
+					 "set hw.debug_mask = 0x%08x\n",
+					 pf->hw.debug_mask);
+			}
+			pf->msg_enable = level;
+			dev_info(&pf->pdev->dev, "set msg_enable = 0x%08x\n",
+				 pf->msg_enable);
+		} else {
+			dev_info(&pf->pdev->dev, "msg_enable = 0x%08x\n",
+				 pf->msg_enable);
 		}
-		wr32(&pf->hw, address, value);
-		value = rd32(&pf->hw, address);
-		dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
-			 address, value);
+	} else if (strncmp(cmd_buf, "defport on", 10) == 0) {
+		dev_info(&pf->pdev->dev, "debugfs: forcing PFR with defport enabled\n");
+		pf->cur_promisc = true;
+		i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
+	} else if (strncmp(cmd_buf, "defport off", 11) == 0) {
+		dev_info(&pf->pdev->dev, "debugfs: forcing PFR with defport disabled\n");
+		pf->cur_promisc = false;
+		i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
 	} else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
 		if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
 			cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
@@ -1260,7 +1528,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 		struct i40e_aq_desc *desc;
 		i40e_status ret;
 
-		desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 		if (!desc)
 			goto command_write_done;
 		cnt = sscanf(&cmd_buf[11],
@@ -1308,7 +1576,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 		u16 buffer_len;
 		u8 *buff;
 
-		desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 		if (!desc)
 			goto command_write_done;
 		cnt = sscanf(&cmd_buf[20],
@@ -1339,7 +1607,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 			desc = NULL;
 			goto command_write_done;
 		}
-		desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+		desc->flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
 		ret = i40e_asq_send_command(&pf->hw, desc, buff,
 					    buffer_len, NULL);
 		if (!ret) {
@@ -1371,11 +1639,78 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 	} else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
 		dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
 			 i40e_get_current_fd_count(pf));
+	/* vf base mode on/off hooks needs to be used by validation only to
+	 * make sure vf base mode driver is not broken
+	 */
+	} else if (strncmp(cmd_buf, "vf base mode on", 15) == 0) {
+		if (!pf->num_alloc_vfs) {
+			pf->vf_base_mode_only = true;
+			dev_info(&pf->pdev->dev, "VF Base mode is enabled\n");
+		} else
+			dev_info(&pf->pdev->dev,
+				 "cannot configure VF Base mode when VFs are allocated\n");
+	} else if (strncmp(cmd_buf, "vf base mode off", 16) == 0) {
+		if (!pf->num_alloc_vfs) {
+			pf->vf_base_mode_only = false;
+			dev_info(&pf->pdev->dev, "VF Base mode is disabled\n");
+		} else
+			dev_info(&pf->pdev->dev,
+				 "cannot configure VF Base mode when VFs are allocated\n");
+	} else if ((strncmp(cmd_buf, "add ethtype filter", 18) == 0) ||
+		   (strncmp(cmd_buf, "rem ethtype filter", 18) == 0)) {
+		u16 ethtype;
+		u16 queue;
+		bool add = false;
+		int ret;
+
+		if (strncmp(cmd_buf, "add", 3) == 0)
+			add = true;
+
+		cnt = sscanf(&cmd_buf[18],
+			     "%hi %hi",
+			     &ethtype, &queue);
+		if (cnt != 2) {
+			dev_info(&pf->pdev->dev,
+				 "%s ethtype filter: bad command string, cnt=%d\n",
+				 add ? "add" : "rem",
+				 cnt);
+			goto command_write_done;
+		}
+		ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+					pf->hw.mac.addr,
+					ethtype, 0,
+					pf->vsi[pf->lan_vsi]->seid,
+					queue, add, NULL, NULL);
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				"%s: add/rem Control Packet Filter AQ command failed =0x%x\n",
+				add ? "add" : "rem",
+				pf->hw.aq.asq_last_status);
+			goto command_write_done;
+		}
+
+	} else if (strncmp(cmd_buf, "dcb off", 7) == 0) {
+		u8 tc = i40e_pf_get_num_tc(pf);
+		/* Allow disabling only when in single TC mode */
+		if (tc > 1) {
+			dev_info(&pf->pdev->dev, "Failed to disable DCB as TC count(%d) is greater than 1.\n",
+				 tc);
+			goto command_write_done;
+		}
+		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+	} else if (strncmp(cmd_buf, "dcb on", 6) == 0) {
+		pf->flags |= I40E_FLAG_DCB_ENABLED;
 	} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
 		if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
 			int ret;
 
-			ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
+			if (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) {
+				dev_info(&pf->pdev->dev,
+					 "Use ethtool to disable LLDP firmware agent:"\
+					 "\"ethtool --set-priv-flags <interface> disable-fw-lldp on\".\n");
+				goto command_write_done;
+			}
+			ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL);
 			if (ret) {
 				dev_info(&pf->pdev->dev,
 					 "Stop LLDP AQ command failed =0x%x\n",
@@ -1389,17 +1724,25 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 						0, true, NULL, NULL);
 			if (ret) {
 				dev_info(&pf->pdev->dev,
-					"%s: Add Control Packet Filter AQ command failed =0x%x\n",
-					__func__, pf->hw.aq.asq_last_status);
+					 "%s: Add Control Packet Filter AQ command failed =0x%x\n",
+					 __func__, pf->hw.aq.asq_last_status);
 				goto command_write_done;
 			}
-#ifdef CONFIG_I40E_DCB
+#ifdef CONFIG_DCB
+#ifdef HAVE_DCBNL_IEEE
 			pf->dcbx_cap = DCB_CAP_DCBX_HOST |
 				       DCB_CAP_DCBX_VER_IEEE;
-#endif /* CONFIG_I40E_DCB */
+#endif /* HAVE_DCBNL_IEEE */
+#endif /* CONFIG_DCB */
 		} else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
 			int ret;
 
+			if (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) {
+				dev_info(&pf->pdev->dev,
+					 "Use ethtool to enable LLDP firmware agent:"\
+					 "\"ethtool --set-priv-flags <interface> disable-fw-lldp off\".\n");
+				goto command_write_done;
+			}
 			ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
 						pf->hw.mac.addr,
 						I40E_ETH_P_LLDP, 0,
@@ -1407,22 +1750,23 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 						0, false, NULL, NULL);
 			if (ret) {
 				dev_info(&pf->pdev->dev,
-					"%s: Remove Control Packet Filter AQ command failed =0x%x\n",
-					__func__, pf->hw.aq.asq_last_status);
+					 "%s: Remove Control Packet Filter AQ command failed =0x%x\n",
+					 __func__, pf->hw.aq.asq_last_status);
 				/* Continue and start FW LLDP anyways */
 			}
-
-			ret = i40e_aq_start_lldp(&pf->hw, NULL);
+			ret = i40e_aq_start_lldp(&pf->hw, false, NULL);
 			if (ret) {
 				dev_info(&pf->pdev->dev,
 					 "Start LLDP AQ command failed =0x%x\n",
 					 pf->hw.aq.asq_last_status);
 				goto command_write_done;
 			}
-#ifdef CONFIG_I40E_DCB
+#ifdef CONFIG_DCB
+#ifdef HAVE_DCBNL_IEEE
 			pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
 				       DCB_CAP_DCBX_VER_IEEE;
-#endif /* CONFIG_I40E_DCB */
+#endif /* HAVE_DCBNL_IEEE */
+#endif /* CONFIG_DCB */
 		} else if (strncmp(&cmd_buf[5],
 			   "get local", 9) == 0) {
 			u16 llen, rlen;
@@ -1564,6 +1908,251 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 		}
 		kfree(buff);
 		buff = NULL;
+	} else if (strncmp(cmd_buf, "set rss_size", 12) == 0) {
+		int q_count;
+
+		cnt = sscanf(&cmd_buf[12], "%i", &q_count);
+		if (cnt != 1) {
+			dev_info(&pf->pdev->dev,
+				 "set rss_size: bad command string, cnt=%d\n", cnt);
+			goto command_write_done;
+		}
+		if (q_count <= 0) {
+			dev_info(&pf->pdev->dev,
+				 "set rss_size: %d is too small\n",
+				 q_count);
+			goto command_write_done;
+		}
+		dev_info(&pf->pdev->dev,
+			 "set rss_size requesting %d queues\n", q_count);
+		rtnl_lock();
+		i40e_reconfig_rss_queues(pf, q_count);
+		rtnl_unlock();
+		dev_info(&pf->pdev->dev, "new rss_size %d\n",
+			 pf->alloc_rss_size);
+	} else if (strncmp(cmd_buf, "get bw", 6) == 0) {
+		i40e_status status;
+		u32 max_bw, min_bw;
+		bool min_valid, max_valid;
+
+		status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
+						   &min_valid, &max_valid);
+
+		if (status) {
+			dev_info(&pf->pdev->dev, "get bw failed with status %d\n",
+				status);
+			goto command_write_done;
+		}
+		if (!min_valid) {
+			dev_info(&pf->pdev->dev, "min bw invalid\n");
+		} else if (min_bw & I40E_ALT_BW_RELATIVE_MASK) {
+			dev_info(&pf->pdev->dev, "relative min bw = %d%%\n",
+				min_bw & I40E_ALT_BW_VALUE_MASK);
+		} else {
+			dev_info(&pf->pdev->dev, "absolute min bw = %dMb/s\n",
+				(min_bw & I40E_ALT_BW_VALUE_MASK)*128);
+		}
+		if (!max_valid) {
+			dev_info(&pf->pdev->dev, "max bw invalid\n");
+		} else if (max_bw & I40E_ALT_BW_RELATIVE_MASK) {
+			dev_info(&pf->pdev->dev, "relative max bw = %d%%\n",
+				max_bw & I40E_ALT_BW_VALUE_MASK);
+		} else {
+			dev_info(&pf->pdev->dev, "absolute max bw = %dMb/s\n",
+				(max_bw & I40E_ALT_BW_VALUE_MASK)*128);
+		}
+	} else if (strncmp(cmd_buf, "set bw", 6) == 0) {
+		struct i40e_aqc_configure_partition_bw_data bw_data;
+		i40e_status status;
+		u32 max_bw, min_bw;
+
+		/* Set the valid bit for this PF */
+		bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
+
+		/* Get the bw's */
+		cnt = sscanf(&cmd_buf[7], "%u %u", &max_bw, &min_bw);
+		if (cnt != 2) {
+			dev_info(&pf->pdev->dev,"set bw <MAX> <MIN>\n");
+			goto command_write_done;
+		}
+		bw_data.max_bw[pf->hw.pf_id] = max_bw;
+		bw_data.min_bw[pf->hw.pf_id] = min_bw;
+
+		/* Set the new bandwidths */
+		status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
+		if (status) {
+			dev_info(&pf->pdev->dev, "configure partition bw failed with status %d\n",
+				 status);
+			goto command_write_done;
+		}
+	} else if (strncmp(cmd_buf, "commit bw", 9) == 0) {
+		/* Commit temporary BW setting to permanent NVM image */
+		enum i40e_admin_queue_err last_aq_status;
+		i40e_status aq_status;
+		u16 nvm_word;
+
+		if (pf->hw.partition_id != 1) {
+			dev_info(&pf->pdev->dev,
+				 "Commit BW only works on first partition!\n");
+			goto command_write_done;
+		}
+
+		/* Acquire NVM for read access */
+		aq_status = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
+		if (aq_status) {
+			dev_info(&pf->pdev->dev,
+				 "Error %d: Cannot acquire NVM for Read Access\n",
+				 aq_status);
+			goto command_write_done;
+		}
+
+		/* Read word 0x10 of NVM - SW compatibility word 1 */
+		aq_status = i40e_aq_read_nvm(&pf->hw,
+					     I40E_SR_NVM_CONTROL_WORD,
+					     0x10, sizeof(nvm_word), &nvm_word,
+					     false, NULL);
+		/* Save off last admin queue command status before releasing
+		 * the NVM
+		 */
+		last_aq_status = pf->hw.aq.asq_last_status;
+		i40e_release_nvm(&pf->hw);
+		if (aq_status) {
+			dev_info(&pf->pdev->dev, "NVM read error %d:%d\n",
+				 aq_status, last_aq_status);
+			goto command_write_done;
+		}
+
+		/* Wait a bit for NVM release to complete */
+		msleep(100);
+
+		/* Acquire NVM for write access */
+		aq_status = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
+		if (aq_status) {
+			dev_info(&pf->pdev->dev,
+				 "Error %d: Cannot acquire NVM for Write Access\n",
+				 aq_status);
+			goto command_write_done;
+		}
+		/* Write it back out unchanged to initiate update NVM,
+		 * which will force a write of the shadow (alt) RAM to
+		 * the NVM - thus storing the bandwidth values permanently.
+		 */
+		aq_status = i40e_aq_update_nvm(&pf->hw,
+					       I40E_SR_NVM_CONTROL_WORD,
+					       0x10, sizeof(nvm_word),
+					       &nvm_word, true, 0, NULL);
+		/* Save off last admin queue command status before releasing
+		 * the NVM
+		 */
+		last_aq_status = pf->hw.aq.asq_last_status;
+		i40e_release_nvm(&pf->hw);
+		if (aq_status)
+			dev_info(&pf->pdev->dev,
+				 "BW settings NOT SAVED - error %d:%d updating NVM\n",
+				 aq_status, last_aq_status);
+	} else if (strncmp(cmd_buf, "add switch ingress mirror", 25) == 0) {
+		u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
+		u16 switch_seid, dst_vsi_seid, rule_id;
+		i40e_status aq_status;
+
+		cnt = sscanf(&cmd_buf[25], "%hu %hu",
+			     &switch_seid, &dst_vsi_seid);
+		if (cnt != 2) {
+			dev_info(&pf->pdev->dev,
+				 "add mirror: bad command string, cnt=%d\n",
+				 cnt);
+			goto command_write_done;
+		}
+
+		aq_status =
+			i40e_aq_add_mirrorrule(&pf->hw,
+					       switch_seid, rule_type,
+					       dst_vsi_seid, 0, NULL, NULL,
+					       &rule_id, NULL, NULL);
+		if (aq_status)
+			dev_info(&pf->pdev->dev,
+				 "add ingress mirror failed with status %d\n",
+				 aq_status);
+		else
+			dev_info(&pf->pdev->dev,
+				 "Ingress mirror rule %d added\n", rule_id);
+	} else if (strncmp(cmd_buf, "add switch egress mirror", 24) == 0) {
+		u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
+		u16 switch_seid, dst_vsi_seid, rule_id;
+		i40e_status aq_status;
+
+		cnt = sscanf(&cmd_buf[24], "%hu %hu",
+			     &switch_seid, &dst_vsi_seid);
+		if (cnt != 2) {
+			dev_info(&pf->pdev->dev,
+				 "add mirror: bad command string, cnt=%d\n",
+				 cnt);
+			goto command_write_done;
+		}
+
+		aq_status =
+			i40e_aq_add_mirrorrule(&pf->hw,
+					       switch_seid, rule_type,
+					       dst_vsi_seid, 0, NULL, NULL,
+					       &rule_id, NULL, NULL);
+		if (aq_status)
+			dev_info(&pf->pdev->dev,
+				 "add egress mirror failed with status %d\n",
+				 aq_status);
+		else
+			dev_info(&pf->pdev->dev,
+				 "Egress mirror rule %d added\n", rule_id);
+	} else if (strncmp(cmd_buf, "del switch ingress mirror", 25) == 0) {
+		u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
+		i40e_status aq_status;
+		u16 switch_seid, rule_id;
+
+		cnt = sscanf(&cmd_buf[25], "%hu %hu",
+			     &switch_seid, &rule_id);
+		if (cnt != 2) {
+			dev_info(&pf->pdev->dev,
+				 "del mirror: bad command string, cnt=%d\n",
+				 cnt);
+			goto command_write_done;
+		}
+
+		aq_status =
+			i40e_aq_delete_mirrorrule(&pf->hw, switch_seid,
+						  rule_type, rule_id, 0, NULL,
+						  NULL, NULL, NULL);
+		if (aq_status)
+			dev_info(&pf->pdev->dev,
+				 "mirror rule remove failed with status %d\n",
+				 aq_status);
+		else
+			dev_info(&pf->pdev->dev,
+				 "Mirror rule %d removed\n", rule_id);
+	} else if (strncmp(cmd_buf, "del switch egress mirror", 24) == 0) {
+		u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
+		i40e_status aq_status;
+		u16 switch_seid, rule_id;
+
+		cnt = sscanf(&cmd_buf[24], "%hu %hu",
+			     &switch_seid, &rule_id);
+		if (cnt != 2) {
+			dev_info(&pf->pdev->dev,
+				 "del mirror: bad command string, cnt=%d\n",
+				 cnt);
+			goto command_write_done;
+		}
+
+		aq_status =
+			i40e_aq_delete_mirrorrule(&pf->hw, switch_seid,
+						  rule_type, rule_id, 0, NULL,
+						  NULL, NULL, NULL);
+		if (aq_status)
+			dev_info(&pf->pdev->dev,
+				 "mirror rule remove failed with status %d\n",
+				 aq_status);
+		else
+			dev_info(&pf->pdev->dev,
+				 "Mirror rule %d removed\n", rule_id);
+
 	} else {
 		dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
 		dev_info(&pf->pdev->dev, "available commands\n");
@@ -1575,21 +2164,27 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 		dev_info(&pf->pdev->dev, "  del pvid <vsi_seid>\n");
 		dev_info(&pf->pdev->dev, "  dump switch\n");
 		dev_info(&pf->pdev->dev, "  dump vsi [seid]\n");
+		dev_info(&pf->pdev->dev, "  dump capabilities\n");
+		dev_info(&pf->pdev->dev, "  dump resources\n");
 		dev_info(&pf->pdev->dev, "  dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
 		dev_info(&pf->pdev->dev, "  dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
 		dev_info(&pf->pdev->dev, "  dump desc aq\n");
 		dev_info(&pf->pdev->dev, "  dump reset stats\n");
 		dev_info(&pf->pdev->dev, "  dump debug fwdata <cluster_id> <table_id> <index>\n");
+		dev_info(&pf->pdev->dev, "  msg_enable [level]\n");
 		dev_info(&pf->pdev->dev, "  read <reg>\n");
 		dev_info(&pf->pdev->dev, "  write <reg> <value>\n");
 		dev_info(&pf->pdev->dev, "  clear_stats vsi [seid]\n");
 		dev_info(&pf->pdev->dev, "  clear_stats port\n");
-		dev_info(&pf->pdev->dev, "  pfr\n");
-		dev_info(&pf->pdev->dev, "  corer\n");
-		dev_info(&pf->pdev->dev, "  globr\n");
+		dev_info(&pf->pdev->dev, "  defport on\n");
+		dev_info(&pf->pdev->dev, "  defport off\n");
 		dev_info(&pf->pdev->dev, "  send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
 		dev_info(&pf->pdev->dev, "  send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
 		dev_info(&pf->pdev->dev, "  fd current cnt");
+		dev_info(&pf->pdev->dev, "  vf base mode on\n");
+		dev_info(&pf->pdev->dev, "  vf base mode off\n");
+		dev_info(&pf->pdev->dev, "  add ethtype filter <ethtype> <to_queue>");
+		dev_info(&pf->pdev->dev, "  rem ethtype filter <ethtype> <to_queue>");
 		dev_info(&pf->pdev->dev, "  lldp start\n");
 		dev_info(&pf->pdev->dev, "  lldp stop\n");
 		dev_info(&pf->pdev->dev, "  lldp get local\n");
@@ -1597,6 +2192,16 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 		dev_info(&pf->pdev->dev, "  lldp event on\n");
 		dev_info(&pf->pdev->dev, "  lldp event off\n");
 		dev_info(&pf->pdev->dev, "  nvm read [module] [word_offset] [word_count]\n");
+		dev_info(&pf->pdev->dev, "  set rss_size <count>\n");
+		dev_info(&pf->pdev->dev, "  dcb off\n");
+		dev_info(&pf->pdev->dev, "  dcb on\n");
+		dev_info(&pf->pdev->dev, "  get bw\n");
+		dev_info(&pf->pdev->dev, "  set bw <MAX> <MIN>\n");
+		dev_info(&pf->pdev->dev, "  commit bw\n");
+		dev_info(&pf->pdev->dev, "  add switch ingress mirror <sw_seid> <dst_seid>\n");
+		dev_info(&pf->pdev->dev, "  add switch egress mirror <sw_seid> <dst_seid>\n");
+		dev_info(&pf->pdev->dev, "  del switch ingress mirror <sw_seid> <rule_id>\n");
+		dev_info(&pf->pdev->dev, "  del switch egress mirror <sw_seid> <rule_id>\n");
 	}
 
 command_write_done:
@@ -1631,7 +2236,7 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
 {
 	struct i40e_pf *pf = filp->private_data;
 	int bytes_not_copied;
-	int buf_size = 256;
+	size_t buf_size = 256;
 	char *buf;
 	int len;
 
@@ -1696,30 +2301,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
 		count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
 	}
 
-	if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
-		cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
-		if (cnt != 1) {
-			dev_info(&pf->pdev->dev, "tx_timeout <vsi_seid>\n");
-			goto netdev_ops_write_done;
-		}
-		vsi = i40e_dbg_find_vsi(pf, vsi_seid);
-		if (!vsi) {
-			dev_info(&pf->pdev->dev,
-				 "tx_timeout: VSI %d not found\n", vsi_seid);
-		} else if (!vsi->netdev) {
-			dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n",
-				 vsi_seid);
-		} else if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
-			dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n",
-				 vsi_seid);
-		} else if (rtnl_trylock()) {
-			vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
-			rtnl_unlock();
-			dev_info(&pf->pdev->dev, "tx_timeout called\n");
-		} else {
-			dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
-		}
-	} else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
+	if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
 		int mtu;
 
 		cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
@@ -1736,8 +2318,13 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
 			dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n",
 				 vsi_seid);
 		} else if (rtnl_trylock()) {
+#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU
+			vsi->netdev->netdev_ops->extended.ndo_change_mtu(
+						 vsi->netdev, mtu);
+#else
 			vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
 								mtu);
+#endif
 			rtnl_unlock();
 			dev_info(&pf->pdev->dev, "change_mtu called\n");
 		} else {
@@ -1783,14 +2370,33 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
 				napi_schedule(&vsi->q_vectors[i]->napi);
 			dev_info(&pf->pdev->dev, "napi called\n");
 		}
+	} else if (strncmp(i40e_dbg_netdev_ops_buf,
+			   "toggle_tx_timeout", 17) == 0) {
+		cnt = sscanf(&i40e_dbg_netdev_ops_buf[17], "%i", &vsi_seid);
+		if (cnt != 1) {
+			dev_info(&pf->pdev->dev, "toggle_tx_timeout <vsi_seid>\n");
+			goto netdev_ops_write_done;
+		}
+		vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+		if (!vsi) {
+			dev_info(&pf->pdev->dev, "toggle_tx_timeout: VSI %d not found\n",
+				 vsi_seid);
+		} else {
+			if (vsi->block_tx_timeout)
+				vsi->block_tx_timeout = false;
+			else
+				vsi->block_tx_timeout = true;
+			dev_info(&pf->pdev->dev, "toggle_tx_timeout: block_tx_timeout = %d\n",
+				 vsi->block_tx_timeout);
+		}
 	} else {
 		dev_info(&pf->pdev->dev, "unknown command '%s'\n",
 			 i40e_dbg_netdev_ops_buf);
 		dev_info(&pf->pdev->dev, "available commands\n");
-		dev_info(&pf->pdev->dev, "  tx_timeout <vsi_seid>\n");
 		dev_info(&pf->pdev->dev, "  change_mtu <vsi_seid> <mtu>\n");
 		dev_info(&pf->pdev->dev, "  set_rx_mode <vsi_seid>\n");
 		dev_info(&pf->pdev->dev, "  napi <vsi_seid>\n");
+		dev_info(&pf->pdev->dev, "  toggle_tx_timeout <vsi_seid>\n");
 	}
 netdev_ops_write_done:
 	return count;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 8e46098ba..55a7c1079 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -1,33 +1,11 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
-#ifndef _I40E_DEVIDS_H_
 #define _I40E_DEVIDS_H_
 
 /* Device IDs */
+#define I40E_DEV_ID_X710_N3000		0x0CF8
+#define I40E_DEV_ID_XXV710_N3000	0x0D58
 #define I40E_DEV_ID_SFP_XL710		0x1572
 #define I40E_DEV_ID_QEMU		0x1574
 #define I40E_DEV_ID_KX_B		0x1580
@@ -41,6 +19,13 @@
 #define I40E_DEV_ID_10G_BASE_T4		0x1589
 #define I40E_DEV_ID_25G_B		0x158A
 #define I40E_DEV_ID_25G_SFP28		0x158B
+#define I40E_DEV_ID_10G_BASE_T_BC	0x15FF
+#define I40E_DEV_ID_10G_B		0x104F
+#define I40E_DEV_ID_10G_SFP		0x104E
+#define I40E_DEV_ID_5G_BASE_T_BC	0x101F
+#define I40E_IS_X710TL_DEVICE(d) \
+	(((d) == I40E_DEV_ID_10G_BASE_T_BC) || \
+	((d) == I40E_DEV_ID_5G_BASE_T_BC))
 #define I40E_DEV_ID_KX_X722		0x37CE
 #define I40E_DEV_ID_QSFP_X722		0x37CF
 #define I40E_DEV_ID_SFP_X722		0x37D0
@@ -52,4 +37,6 @@
 					 (d) == I40E_DEV_ID_QSFP_B  || \
 					 (d) == I40E_DEV_ID_QSFP_C)
 
-#endif /* _I40E_DEVIDS_H_ */
+#define i40e_is_25G_device(d)		((d) == I40E_DEV_ID_25G_B  || \
+					 (d) == I40E_DEV_ID_25G_SFP28)
+
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index f141e78d4..dc2e9d6b4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #include "i40e_diag.h"
 #include "i40e_prototype.h"
@@ -46,9 +23,11 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
 		wr32(hw, reg, (pat & mask));
 		val = rd32(hw, reg);
 		if ((val & mask) != (pat & mask)) {
+#ifdef ETHTOOL_TEST
 			i40e_debug(hw, I40E_DEBUG_DIAG,
 				   "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n",
 				   __func__, reg, pat, val);
+#endif
 			return I40E_ERR_DIAG_TEST_FAILED;
 		}
 	}
@@ -56,35 +35,29 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
 	wr32(hw, reg, orig_val);
 	val = rd32(hw, reg);
 	if (val != orig_val) {
+#ifdef ETHTOOL_TEST
 		i40e_debug(hw, I40E_DEBUG_DIAG,
 			   "%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n",
 			   __func__, reg, orig_val, val);
+#endif
 		return I40E_ERR_DIAG_TEST_FAILED;
 	}
 
-	return 0;
+	return I40E_SUCCESS;
 }
 
 struct i40e_diag_reg_test_info i40e_reg_list[] = {
 	/* offset               mask         elements   stride */
-	{I40E_QTX_CTL(0),       0x0000FFBF, 1,
-		I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
-	{I40E_PFINT_ITR0(0),    0x00000FFF, 3,
-		I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
-	{I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1,
-		I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
-	{I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1,
-		I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
-	{I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1,
-		I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+	{I40E_QTX_CTL(0),       0x0000FFBF, 1, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+	{I40E_PFINT_ITR0(0),    0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
+	{I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+	{I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+	{I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
 	{I40E_PFINT_STAT_CTL0,  0x0000000C, 1, 0},
 	{I40E_PFINT_LNKLST0,    0x00001FFF, 1, 0},
-	{I40E_PFINT_LNKLSTN(0), 0x000007FF, 1,
-		I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
-	{I40E_QINT_TQCTL(0),    0x000000FF, 1,
-		I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
-	{I40E_QINT_RQCTL(0),    0x000000FF, 1,
-		I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+	{I40E_PFINT_LNKLSTN(0), 0x000007FF, 1, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+	{I40E_QINT_TQCTL(0),    0x000000FF, 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+	{I40E_QINT_RQCTL(0),    0x000000FF, 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
 	{I40E_PFINT_ICR0_ENA,   0xF7F20000, 1, 0},
 	{ 0 }
 };
@@ -97,12 +70,12 @@ struct i40e_diag_reg_test_info i40e_reg_list[] = {
  **/
 i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 	u32 reg, mask;
 	u32 i, j;
 
 	for (i = 0; i40e_reg_list[i].offset != 0 &&
-					     !ret_code; i++) {
+					     ret_code == I40E_SUCCESS; i++) {
 
 		/* set actual reg range for dynamically allocated resources */
 		if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
@@ -119,9 +92,10 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
 
 		/* test register access */
 		mask = i40e_reg_list[i].mask;
-		for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
-			reg = i40e_reg_list[i].offset +
-			      (j * i40e_reg_list[i].stride);
+		for (j = 0; j < i40e_reg_list[i].elements &&
+			    ret_code == I40E_SUCCESS; j++) {
+			reg = i40e_reg_list[i].offset
+				+ (j * i40e_reg_list[i].stride);
 			ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
 		}
 	}
@@ -142,7 +116,7 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
 
 	/* read NVM control word and if NVM valid, validate EEPROM checksum*/
 	ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
-	if (!ret_code &&
+	if ((ret_code == I40E_SUCCESS) &&
 	    ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
 	     BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
 		return i40e_validate_nvm_checksum(hw, NULL);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index 0b5911652..697015b4f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifndef _I40E_DIAG_H_
 #define _I40E_DIAG_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 05e89864f..7f93f74d4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1,54 +1,30 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 /* ethtool support for i40e */
 
 #include "i40e.h"
 #include "i40e_diag.h"
 
-struct i40e_stats {
-	char stat_string[ETH_GSTRING_LEN];
-	int sizeof_stat;
-	int stat_offset;
-};
+#ifdef SIOCETHTOOL
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
 
-#define I40E_STAT(_type, _name, _stat) { \
-	.stat_string = _name, \
-	.sizeof_stat = FIELD_SIZEOF(_type, _stat), \
-	.stat_offset = offsetof(_type, _stat) \
-}
+#endif
+#ifdef ETHTOOL_GSTATS
+
+#include "i40e_ethtool_stats.h"
 
-#define I40E_NETDEV_STAT(_net_stat) \
-		I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
 #define I40E_PF_STAT(_name, _stat) \
-		I40E_STAT(struct i40e_pf, _name, _stat)
+	I40E_STAT(struct i40e_pf, _name, _stat)
 #define I40E_VSI_STAT(_name, _stat) \
-		I40E_STAT(struct i40e_vsi, _name, _stat)
+	I40E_STAT(struct i40e_vsi, _name, _stat)
 #define I40E_VEB_STAT(_name, _stat) \
-		I40E_STAT(struct i40e_veb, _name, _stat)
+	I40E_STAT(struct i40e_veb, _name, _stat)
+#define I40E_PFC_STAT(_name, _stat) \
+	I40E_STAT(struct i40e_pfc_stats, _name, _stat)
+#define I40E_QUEUE_STAT(_name, _stat) \
+	I40E_STAT(struct i40e_ring, _name, _stat)
 
 static const struct i40e_stats i40e_gstrings_net_stats[] = {
 	I40E_NETDEV_STAT(rx_packets),
@@ -65,18 +41,25 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
 };
 
 static const struct i40e_stats i40e_gstrings_veb_stats[] = {
-	I40E_VEB_STAT("rx_bytes", stats.rx_bytes),
-	I40E_VEB_STAT("tx_bytes", stats.tx_bytes),
-	I40E_VEB_STAT("rx_unicast", stats.rx_unicast),
-	I40E_VEB_STAT("tx_unicast", stats.tx_unicast),
-	I40E_VEB_STAT("rx_multicast", stats.rx_multicast),
-	I40E_VEB_STAT("tx_multicast", stats.tx_multicast),
-	I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast),
-	I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast),
-	I40E_VEB_STAT("rx_discards", stats.rx_discards),
-	I40E_VEB_STAT("tx_discards", stats.tx_discards),
-	I40E_VEB_STAT("tx_errors", stats.tx_errors),
-	I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol),
+	I40E_VEB_STAT("veb.rx_bytes", stats.rx_bytes),
+	I40E_VEB_STAT("veb.tx_bytes", stats.tx_bytes),
+	I40E_VEB_STAT("veb.rx_unicast", stats.rx_unicast),
+	I40E_VEB_STAT("veb.tx_unicast", stats.tx_unicast),
+	I40E_VEB_STAT("veb.rx_multicast", stats.rx_multicast),
+	I40E_VEB_STAT("veb.tx_multicast", stats.tx_multicast),
+	I40E_VEB_STAT("veb.rx_broadcast", stats.rx_broadcast),
+	I40E_VEB_STAT("veb.tx_broadcast", stats.tx_broadcast),
+	I40E_VEB_STAT("veb.rx_discards", stats.rx_discards),
+	I40E_VEB_STAT("veb.tx_discards", stats.tx_discards),
+	I40E_VEB_STAT("veb.tx_errors", stats.tx_errors),
+	I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
+};
+
+static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
+	I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets),
+	I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes),
+	I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets),
+	I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes),
 };
 
 static const struct i40e_stats i40e_gstrings_misc_stats[] = {
@@ -89,6 +72,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
 	I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
 	I40E_VSI_STAT("tx_linearize", tx_linearize),
 	I40E_VSI_STAT("tx_force_wb", tx_force_wb),
+	I40E_VSI_STAT("tx_busy", tx_busy),
 	I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
 	I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
 };
@@ -104,93 +88,171 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
  * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
  */
 static const struct i40e_stats i40e_gstrings_stats[] = {
-	I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
-	I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
-	I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
-	I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast),
-	I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast),
-	I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast),
-	I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
-	I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
-	I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
-	I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
-	I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
-	I40E_PF_STAT("rx_crc_errors", stats.crc_errors),
-	I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
-	I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
-	I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
-	I40E_PF_STAT("tx_timeout", tx_timeout_count),
-	I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
-	I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
-	I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
-	I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
-	I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
-	I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
-	I40E_PF_STAT("rx_size_64", stats.rx_size_64),
-	I40E_PF_STAT("rx_size_127", stats.rx_size_127),
-	I40E_PF_STAT("rx_size_255", stats.rx_size_255),
-	I40E_PF_STAT("rx_size_511", stats.rx_size_511),
-	I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
-	I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
-	I40E_PF_STAT("rx_size_big", stats.rx_size_big),
-	I40E_PF_STAT("tx_size_64", stats.tx_size_64),
-	I40E_PF_STAT("tx_size_127", stats.tx_size_127),
-	I40E_PF_STAT("tx_size_255", stats.tx_size_255),
-	I40E_PF_STAT("tx_size_511", stats.tx_size_511),
-	I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
-	I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
-	I40E_PF_STAT("tx_size_big", stats.tx_size_big),
-	I40E_PF_STAT("rx_undersize", stats.rx_undersize),
-	I40E_PF_STAT("rx_fragments", stats.rx_fragments),
-	I40E_PF_STAT("rx_oversize", stats.rx_oversize),
-	I40E_PF_STAT("rx_jabber", stats.rx_jabber),
-	I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
-	I40E_PF_STAT("arq_overflows", arq_overflows),
-	I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
-	I40E_PF_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
-	I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
-	I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
-	I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
-	I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status),
-	I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
-	I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status),
+	I40E_PF_STAT("port.rx_bytes", stats.eth.rx_bytes),
+	I40E_PF_STAT("port.tx_bytes", stats.eth.tx_bytes),
+	I40E_PF_STAT("port.rx_unicast", stats.eth.rx_unicast),
+	I40E_PF_STAT("port.tx_unicast", stats.eth.tx_unicast),
+	I40E_PF_STAT("port.rx_multicast", stats.eth.rx_multicast),
+	I40E_PF_STAT("port.tx_multicast", stats.eth.tx_multicast),
+	I40E_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast),
+	I40E_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast),
+	I40E_PF_STAT("port.tx_errors", stats.eth.tx_errors),
+	I40E_PF_STAT("port.rx_dropped", stats.eth.rx_discards),
+	I40E_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down),
+	I40E_PF_STAT("port.rx_crc_errors", stats.crc_errors),
+	I40E_PF_STAT("port.illegal_bytes", stats.illegal_bytes),
+	I40E_PF_STAT("port.mac_local_faults", stats.mac_local_faults),
+	I40E_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults),
+	I40E_PF_STAT("port.tx_timeout", tx_timeout_count),
+	I40E_PF_STAT("port.rx_csum_bad", hw_csum_rx_error),
+	I40E_PF_STAT("port.rx_length_errors", stats.rx_length_errors),
+	I40E_PF_STAT("port.link_xon_rx", stats.link_xon_rx),
+	I40E_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx),
+	I40E_PF_STAT("port.link_xon_tx", stats.link_xon_tx),
+	I40E_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx),
+	I40E_PF_STAT("port.rx_size_64", stats.rx_size_64),
+	I40E_PF_STAT("port.rx_size_127", stats.rx_size_127),
+	I40E_PF_STAT("port.rx_size_255", stats.rx_size_255),
+	I40E_PF_STAT("port.rx_size_511", stats.rx_size_511),
+	I40E_PF_STAT("port.rx_size_1023", stats.rx_size_1023),
+	I40E_PF_STAT("port.rx_size_1522", stats.rx_size_1522),
+	I40E_PF_STAT("port.rx_size_big", stats.rx_size_big),
+	I40E_PF_STAT("port.tx_size_64", stats.tx_size_64),
+	I40E_PF_STAT("port.tx_size_127", stats.tx_size_127),
+	I40E_PF_STAT("port.tx_size_255", stats.tx_size_255),
+	I40E_PF_STAT("port.tx_size_511", stats.tx_size_511),
+	I40E_PF_STAT("port.tx_size_1023", stats.tx_size_1023),
+	I40E_PF_STAT("port.tx_size_1522", stats.tx_size_1522),
+	I40E_PF_STAT("port.tx_size_big", stats.tx_size_big),
+	I40E_PF_STAT("port.rx_undersize", stats.rx_undersize),
+	I40E_PF_STAT("port.rx_fragments", stats.rx_fragments),
+	I40E_PF_STAT("port.rx_oversize", stats.rx_oversize),
+	I40E_PF_STAT("port.rx_jabber", stats.rx_jabber),
+	I40E_PF_STAT("port.VF_admin_queue_requests", vf_aq_requests),
+	I40E_PF_STAT("port.arq_overflows", arq_overflows),
+#ifdef HAVE_PTP_1588_CLOCK
+	I40E_PF_STAT("port.tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+	I40E_PF_STAT("port.rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+	I40E_PF_STAT("port.tx_hwtstamp_skipped", tx_hwtstamp_skipped),
+#endif /* HAVE_PTP_1588_CLOCK */
+	I40E_PF_STAT("port.fdir_flush_cnt", fd_flush_cnt),
+	I40E_PF_STAT("port.fdir_atr_match", stats.fd_atr_match),
+	I40E_PF_STAT("port.fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
+	I40E_PF_STAT("port.fdir_atr_status", stats.fd_atr_status),
+	I40E_PF_STAT("port.fdir_sb_match", stats.fd_sb_match),
+	I40E_PF_STAT("port.fdir_sb_status", stats.fd_sb_status),
+#ifdef I40E_ADD_PROBES
+	I40E_PF_STAT("port.tx_tcp_segments", tcp_segs),
+	I40E_PF_STAT("port.tx_tcp_cso", tx_tcp_cso),
+	I40E_PF_STAT("port.tx_udp_cso", tx_udp_cso),
+	I40E_PF_STAT("port.tx_sctp_cso", tx_sctp_cso),
+	I40E_PF_STAT("port.tx_ip4_cso", tx_ip4_cso),
+	I40E_PF_STAT("port.rx_tcp_cso", rx_tcp_cso),
+	I40E_PF_STAT("port.rx_udp_cso", rx_udp_cso),
+	I40E_PF_STAT("port.rx_sctp_cso", rx_sctp_cso),
+	I40E_PF_STAT("port.rx_ip4_cso", rx_ip4_cso),
+	I40E_PF_STAT("port.rx_csum_offload_outer", hw_csum_rx_outer),
+	I40E_PF_STAT("port.rx_tcp_cso_error", rx_tcp_cso_err),
+	I40E_PF_STAT("port.rx_udp_cso_error", rx_udp_cso_err),
+	I40E_PF_STAT("port.rx_sctp_cso_error", rx_sctp_cso_err),
+	I40E_PF_STAT("port.rx_ip4_cso_error", rx_ip4_cso_err),
+#endif
 
 	/* LPI stats */
-	I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
-	I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
-	I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count),
-	I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
+	I40E_PF_STAT("port.tx_lpi_status", stats.tx_lpi_status),
+	I40E_PF_STAT("port.rx_lpi_status", stats.rx_lpi_status),
+	I40E_PF_STAT("port.tx_lpi_count", stats.tx_lpi_count),
+	I40E_PF_STAT("port.rx_lpi_count", stats.rx_lpi_count),
+	I40E_PF_STAT("port.tx_lpi_duration", stats.tx_lpi_duration),
+	I40E_PF_STAT("port.rx_lpi_duration", stats.rx_lpi_duration),
 };
 
-#define I40E_QUEUE_STATS_LEN(n) \
-	(((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
-	    * 2 /* Tx and Rx together */                                     \
-	    * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
-#define I40E_GLOBAL_STATS_LEN	ARRAY_SIZE(i40e_gstrings_stats)
-#define I40E_NETDEV_STATS_LEN   ARRAY_SIZE(i40e_gstrings_net_stats)
+struct i40e_pfc_stats {
+	u64 priority_xon_rx;
+	u64 priority_xoff_rx;
+	u64 priority_xon_tx;
+	u64 priority_xoff_tx;
+	u64 priority_xon_2_xoff;
+};
+
+static const struct i40e_stats i40e_gstrings_pfc_stats[] = {
+	I40E_PFC_STAT("port.tx_priority_%u_xon_tx", priority_xon_tx),
+	I40E_PFC_STAT("port.tx_priority_%u_xoff_tx", priority_xoff_tx),
+	I40E_PFC_STAT("port.rx_priority_%u_xon_rx", priority_xon_rx),
+	I40E_PFC_STAT("port.rx_priority_%u_xoff_rx", priority_xoff_rx),
+	I40E_PFC_STAT("port.rx_priority_%u_xon_2_xoff", priority_xon_2_xoff),
+};
+
+#define I40E_NETDEV_STATS_LEN	ARRAY_SIZE(i40e_gstrings_net_stats)
+
 #define I40E_MISC_STATS_LEN	ARRAY_SIZE(i40e_gstrings_misc_stats)
-#define I40E_VSI_STATS_LEN(n)   (I40E_NETDEV_STATS_LEN + \
-				 I40E_MISC_STATS_LEN + \
-				 I40E_QUEUE_STATS_LEN((n)))
-#define I40E_PFC_STATS_LEN ( \
-		(FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
-		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
-		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
-		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
-		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
-		 / sizeof(u64))
-#define I40E_VEB_TC_STATS_LEN ( \
-		(FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_packets) + \
-		 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_bytes) + \
-		 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \
-		 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \
-		 / sizeof(u64))
-#define I40E_VEB_STATS_LEN	ARRAY_SIZE(i40e_gstrings_veb_stats)
-#define I40E_VEB_STATS_TOTAL	(I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN)
-#define I40E_PF_STATS_LEN(n)	(I40E_GLOBAL_STATS_LEN + \
+
+#define I40E_VSI_STATS_LEN	(I40E_NETDEV_STATS_LEN + I40E_MISC_STATS_LEN)
+
+#define I40E_PFC_STATS_LEN	(ARRAY_SIZE(i40e_gstrings_pfc_stats) * \
+				 I40E_MAX_USER_PRIORITY)
+
+#define I40E_VEB_STATS_LEN	(ARRAY_SIZE(i40e_gstrings_veb_stats) + \
+				 (ARRAY_SIZE(i40e_gstrings_veb_tc_stats) * \
+				  I40E_MAX_TRAFFIC_CLASS))
+
+#define I40E_GLOBAL_STATS_LEN	ARRAY_SIZE(i40e_gstrings_stats)
+
+/* Length (number) of PF core stats only (i.e. without queues / extra stats): */
+#define I40E_PF_STATS_LEN	(I40E_GLOBAL_STATS_LEN + \
 				 I40E_PFC_STATS_LEN + \
-				 I40E_VSI_STATS_LEN((n)))
+				 I40E_VEB_STATS_LEN + \
+				 I40E_VSI_STATS_LEN)
+
+/* Length of stats for a single queue */
+#define I40E_QUEUE_STATS_LEN	ARRAY_SIZE(i40e_gstrings_queue_stats)
+#ifdef HAVE_XDP_SUPPORT
+#define I40E_QUEUE_STATS_XDP_LEN ARRAY_SIZE(i40e_gstrings_rx_queue_xdp_stats)
+#endif
 
+#ifndef I40E_PF_EXTRA_STATS_OFF
+
+#define I40E_STATS_NAME_VFID_EXTRA "vf___."
+#define I40E_STATS_NAME_VFID_EXTRA_LEN (sizeof(I40E_STATS_NAME_VFID_EXTRA) - 1)
+
+static struct i40e_stats i40e_gstrings_eth_stats_extra[] = {
+	I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
+		      "rx_bytes", eth_stats.rx_bytes),
+	I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
+		      "rx_unicast", eth_stats.rx_unicast),
+	I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
+		      "rx_multicast", eth_stats.rx_multicast),
+	I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
+		      "rx_broadcast", eth_stats.rx_broadcast),
+	I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
+		      "rx_discards", eth_stats.rx_discards),
+	I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
+		      "rx_unknown_protocol", eth_stats.rx_unknown_protocol),
+	I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
+		      "tx_bytes", eth_stats.tx_bytes),
+	I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
+		      "tx_unicast", eth_stats.tx_unicast),
+	I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
+		      "tx_multicast", eth_stats.tx_multicast),
+	I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
+		      "tx_broadcast", eth_stats.tx_broadcast),
+	I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
+		      "tx_discards", eth_stats.tx_discards),
+	I40E_VSI_STAT(I40E_STATS_NAME_VFID_EXTRA
+		      "tx_errors", eth_stats.tx_errors),
+};
+
+#define I40E_STATS_EXTRA_COUNT	128  /* as for now only I40E_MAX_VF_COUNT */
+/* Following length value does not include the length values for queues stats */
+#define I40E_STATS_EXTRA_LEN	ARRAY_SIZE(i40e_gstrings_eth_stats_extra)
+/* Length (number) of PF extra stats only (i.e. without core stats / queues): */
+#define I40E_PF_STATS_EXTRA_LEN (I40E_STATS_EXTRA_COUNT * I40E_STATS_EXTRA_LEN)
+/* Length (number) of enhanced/all PF stats (i.e. core with extra stats): */
+#define I40E_PF_STATS_ENHANCE_LEN (I40E_PF_STATS_LEN + I40E_PF_STATS_EXTRA_LEN)
+
+#endif /* !I40E_PF_EXTRA_STATS_OFF */
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
 enum i40e_ethtool_test_id {
 	I40E_ETH_TEST_REG = 0,
 	I40E_ETH_TEST_EEPROM,
@@ -207,6 +269,9 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
 
 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
 
+#endif /* ETHTOOL_TEST */
+
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
 struct i40e_priv_flags {
 	char flag_string[ETH_GSTRING_LEN];
 	u64 flag;
@@ -222,11 +287,21 @@ struct i40e_priv_flags {
 static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
 	/* NOTE: MFP setting cannot be changed */
 	I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1),
+	I40E_PRIV_FLAG("total-port-shutdown", I40E_FLAG_TOTAL_PORT_SHUTDOWN, 1),
 	I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
 	I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
 	I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
 	I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0),
+	I40E_PRIV_FLAG("link-down-on-close",
+		       I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED, 0),
+#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC
 	I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
+#endif
+	I40E_PRIV_FLAG("disable-source-pruning",
+		       I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
+	I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
+	I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0),
+	I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
 };
 
 #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -239,6 +314,7 @@ static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = {
 
 #define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags)
 
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
 /**
  * i40e_partition_setting_complaint - generic complaint for MFP restriction
  * @pf: the PF struct
@@ -251,431 +327,1001 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
 
 /**
  * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes
- * @phy_types: PHY types to convert
- * @supported: pointer to the ethtool supported variable to fill in
- * @advertising: pointer to the ethtool advertising variable to fill in
+ * @pf: PF struct with phy_types
+ * @ks: ethtool link ksettings struct to fill out
  *
  **/
-static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
-				     u32 *advertising)
+static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
+				     struct ethtool_link_ksettings *ks)
 {
 	struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info;
 	u64 phy_types = pf->hw.phy.phy_types;
 
-	*supported = 0x0;
-	*advertising = 0x0;
+	ethtool_link_ksettings_zero_link_mode(ks, supported);
+	ethtool_link_ksettings_zero_link_mode(ks, advertising);
 
 	if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
-		*supported |= SUPPORTED_Autoneg |
-			      SUPPORTED_1000baseT_Full;
-		*advertising |= ADVERTISED_Autoneg;
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     1000baseT_Full);
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
-			*advertising |= ADVERTISED_1000baseT_Full;
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     1000baseT_Full);
 		if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) {
-			*supported |= SUPPORTED_100baseT_Full;
-			*advertising |= ADVERTISED_100baseT_Full;
+			ethtool_link_ksettings_add_link_mode(ks, supported,
+							     100baseT_Full);
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     100baseT_Full);
 		}
 	}
 	if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
 	    phy_types & I40E_CAP_PHY_TYPE_XFI ||
 	    phy_types & I40E_CAP_PHY_TYPE_SFI ||
 	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
-	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
-		*supported |= SUPPORTED_10000baseT_Full;
-	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
-	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
-	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
-	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
-	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
-		*supported |= SUPPORTED_Autoneg |
-			      SUPPORTED_10000baseT_Full;
-		*advertising |= ADVERTISED_Autoneg;
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseT_Full);
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     10000baseT_Full);
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_T) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseT_Full);
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
-			*advertising |= ADVERTISED_10000baseT_Full;
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     10000baseT_Full);
+	}
+#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS
+	if (phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     2500baseT_Full);
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB)
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     2500baseT_Full);
 	}
+#endif /* HAVE_ETHTOOL_NEW_2500MB_BITS */
+#ifdef HAVE_ETHTOOL_5G_BITS
+	if (phy_types & I40E_CAP_PHY_TYPE_5GBASE_T) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     5000baseT_Full);
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB)
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     5000baseT_Full);
+	}
+#endif /* HAVE_ETHTOOL_5G_BITS */
 	if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
 	    phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
 	    phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
-		*supported |= SUPPORTED_40000baseCR4_Full;
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     40000baseCR4_Full);
 	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
 	    phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
-		*supported |= SUPPORTED_Autoneg |
-			      SUPPORTED_40000baseCR4_Full;
-		*advertising |= ADVERTISED_Autoneg;
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     40000baseCR4_Full);
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB)
-			*advertising |= ADVERTISED_40000baseCR4_Full;
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     40000baseCR4_Full);
 	}
 	if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
-		*supported |= SUPPORTED_Autoneg |
-			      SUPPORTED_100baseT_Full;
-		*advertising |= ADVERTISED_Autoneg;
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     100baseT_Full);
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
-			*advertising |= ADVERTISED_100baseT_Full;
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     100baseT_Full);
 	}
-	if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
-	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
-	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
-	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
-		*supported |= SUPPORTED_Autoneg |
-			      SUPPORTED_1000baseT_Full;
-		*advertising |= ADVERTISED_Autoneg;
+	if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     1000baseT_Full);
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
-			*advertising |= ADVERTISED_1000baseT_Full;
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     1000baseT_Full);
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     40000baseSR4_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     40000baseSR4_Full);
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     40000baseLR4_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     40000baseLR4_Full);
 	}
-	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
-		*supported |= SUPPORTED_40000baseSR4_Full;
-	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
-		*supported |= SUPPORTED_40000baseLR4_Full;
 	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
-		*supported |= SUPPORTED_40000baseKR4_Full |
-			      SUPPORTED_Autoneg;
-		*advertising |= ADVERTISED_40000baseKR4_Full |
-				ADVERTISED_Autoneg;
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     40000baseKR4_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     40000baseKR4_Full);
 	}
 	if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
-		*supported |= SUPPORTED_20000baseKR2_Full |
-			      SUPPORTED_Autoneg;
-		*advertising |= ADVERTISED_Autoneg;
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     20000baseKR2_Full);
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB)
-			*advertising |= ADVERTISED_20000baseKR2_Full;
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     20000baseKR2_Full);
 	}
-	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
-		if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER))
-			*supported |= SUPPORTED_10000baseKR_Full |
-				      SUPPORTED_Autoneg;
-		*advertising |= ADVERTISED_Autoneg;
+	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseKX4_Full);
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
-			if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER))
-				*advertising |= ADVERTISED_10000baseKR_Full;
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     10000baseKX4_Full);
 	}
-	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
-		*supported |= SUPPORTED_10000baseKX4_Full |
-			      SUPPORTED_Autoneg;
-		*advertising |= ADVERTISED_Autoneg;
+	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR &&
+	    !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseKR_Full);
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
-			*advertising |= ADVERTISED_10000baseKX4_Full;
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     10000baseKR_Full);
 	}
-	if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
-		if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER))
-			*supported |= SUPPORTED_1000baseKX_Full |
-				      SUPPORTED_Autoneg;
-		*advertising |= ADVERTISED_Autoneg;
+	if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX &&
+	    !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     1000baseKX_Full);
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
-			if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER))
-				*advertising |= ADVERTISED_1000baseKX_Full;
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     1000baseKX_Full);
+	}
+#ifdef HAVE_ETHTOOL_25G_BITS
+	/* need to add 25G PHY types */
+	if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     25000baseKR_Full);
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     25000baseKR_Full);
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     25000baseCR_Full);
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     25000baseCR_Full);
 	}
+	if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
+	    phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     25000baseSR_Full);
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     25000baseSR_Full);
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC ||
+	    phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     25000baseCR_Full);
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     25000baseCR_Full);
+	}
+#ifdef ETHTOOL_GFECPARAM
 	if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
 	    phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
 	    phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
-	    phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR) {
-		*supported |= SUPPORTED_Autoneg;
-		*advertising |= ADVERTISED_Autoneg;
+	    phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR ||
+	    phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC ||
+	    phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) {
+		ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+		ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+		ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) {
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     FEC_NONE);
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     FEC_RS);
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     FEC_BASER);
+		}
+	}
+#endif /* ETHTOOL_GFECPARAM */
+#endif /* HAVE_ETHTOOL_25G_BITS */
+#ifdef HAVE_ETHTOOL_NEW_10G_BITS
+	/* need to add new 10G PHY types */
+	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseCR_Full);
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     10000baseCR_Full);
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseSR_Full);
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     10000baseSR_Full);
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseLR_Full);
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     10000baseLR_Full);
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     1000baseX_Full);
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     1000baseX_Full);
+	}
+#else
+	/* need to keep backward compatibility with older kernels */
+	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseT_Full);
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     10000baseT_Full);
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     1000baseT_Full);
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     1000baseT_Full);
+	}
+#endif /* HAVE_ETHTOOL_NEW_10G_BITS */
+	/* Autoneg PHY types */
+	if (phy_types & I40E_CAP_PHY_TYPE_SGMII ||
+	    phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4 ||
+	    phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+	    phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4 ||
+	    phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
+	    phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR ||
+	    phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
+	    phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
+	    phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2 ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4 ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+	    phy_types & I40E_CAP_PHY_TYPE_5GBASE_T ||
+	    phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T ||
+	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL ||
+	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
+	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX ||
+	    phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     Autoneg);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     Autoneg);
 	}
 }
 
+#ifdef ETHTOOL_GFECPARAM
 /**
- * i40e_get_settings_link_up - Get the Link settings for when link is up
+ * i40e_get_settings_link_up_fec - Get the FEC mode encoding from mask
+ * @req_fec_info: mask request fec info
+ * @ks: ethtool ksettings to fill in
+ **/
+static void i40e_get_settings_link_up_fec(u8 req_fec_info,
+					  struct ethtool_link_ksettings *ks)
+{
+	ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+	ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+	ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+
+	if ((I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) &&
+	    (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info)) {
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     FEC_NONE);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     FEC_BASER);
+		ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+	} else if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
+		ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+	} else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) {
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     FEC_BASER);
+	} else {
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     FEC_NONE);
+	}
+}
+#endif /* ETHTOOL_GFECPARAM */
+
+/**
+ * i40e_get_settings_link_up - Get Link settings for when link is up
  * @hw: hw structure
- * @ecmd: ethtool command to fill in
+ * @ks: ethtool ksettings to fill in
  * @netdev: network interface device structure
- *
+ * @pf: pointer to physical function struct
  **/
 static void i40e_get_settings_link_up(struct i40e_hw *hw,
-				      struct ethtool_link_ksettings *cmd,
+				      struct ethtool_link_ksettings *ks,
 				      struct net_device *netdev,
 				      struct i40e_pf *pf)
 {
 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+	struct ethtool_link_ksettings cap_ksettings;
 	u32 link_speed = hw_link_info->link_speed;
-	u32 e_advertising = 0x0;
-	u32 e_supported = 0x0;
-	u32 supported, advertising;
-
-	ethtool_convert_link_mode_to_legacy_u32(&supported,
-						cmd->link_modes.supported);
-	ethtool_convert_link_mode_to_legacy_u32(&advertising,
-						cmd->link_modes.advertising);
 
 	/* Initialize supported and advertised settings based on phy settings */
 	switch (hw_link_info->phy_type) {
 	case I40E_PHY_TYPE_40GBASE_CR4:
 	case I40E_PHY_TYPE_40GBASE_CR4_CU:
-		supported = SUPPORTED_Autoneg |
-			    SUPPORTED_40000baseCR4_Full;
-		advertising = ADVERTISED_Autoneg |
-			      ADVERTISED_40000baseCR4_Full;
+		ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     40000baseCR4_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     40000baseCR4_Full);
 		break;
 	case I40E_PHY_TYPE_XLAUI:
 	case I40E_PHY_TYPE_XLPPI:
 	case I40E_PHY_TYPE_40GBASE_AOC:
-		supported = SUPPORTED_40000baseCR4_Full;
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     40000baseCR4_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     40000baseCR4_Full);
 		break;
 	case I40E_PHY_TYPE_40GBASE_SR4:
-		supported = SUPPORTED_40000baseSR4_Full;
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     40000baseSR4_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     40000baseSR4_Full);
 		break;
 	case I40E_PHY_TYPE_40GBASE_LR4:
-		supported = SUPPORTED_40000baseLR4_Full;
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     40000baseLR4_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     40000baseLR4_Full);
 		break;
+	case I40E_PHY_TYPE_25GBASE_SR:
+	case I40E_PHY_TYPE_25GBASE_LR:
 	case I40E_PHY_TYPE_10GBASE_SR:
 	case I40E_PHY_TYPE_10GBASE_LR:
 	case I40E_PHY_TYPE_1000BASE_SX:
 	case I40E_PHY_TYPE_1000BASE_LX:
-		supported = SUPPORTED_10000baseT_Full;
+		ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+		ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+#ifdef HAVE_ETHTOOL_25G_BITS
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     25000baseSR_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     25000baseSR_Full);
+#ifdef ETHTOOL_GFECPARAM
+		i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+#endif /* ETHTOOL_GFECPARAM */
+#endif /* HAVE_ETHTOOL_25G_BITS */
+#ifdef HAVE_ETHTOOL_NEW_10G_BITS
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseSR_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     10000baseSR_Full);
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseLR_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     10000baseLR_Full);
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     1000baseX_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     1000baseX_Full);
+#endif /* HAVE_ETHTOOL_NEW_10G_BITS */
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseT_Full);
 		if (hw_link_info->module_type[2] &
 		    I40E_MODULE_TYPE_1000BASE_SX ||
 		    hw_link_info->module_type[2] &
 		    I40E_MODULE_TYPE_1000BASE_LX) {
-			supported |= SUPPORTED_1000baseT_Full;
+			ethtool_link_ksettings_add_link_mode(ks, supported,
+							     1000baseT_Full);
 			if (hw_link_info->requested_speeds &
 			    I40E_LINK_SPEED_1GB)
-				advertising |= ADVERTISED_1000baseT_Full;
+				ethtool_link_ksettings_add_link_mode(
+				     ks, advertising, 1000baseT_Full);
 		}
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
-			advertising |= ADVERTISED_10000baseT_Full;
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     10000baseT_Full);
 		break;
 	case I40E_PHY_TYPE_10GBASE_T:
+	case I40E_PHY_TYPE_5GBASE_T:
+	case I40E_PHY_TYPE_2_5GBASE_T:
 	case I40E_PHY_TYPE_1000BASE_T:
 	case I40E_PHY_TYPE_100BASE_TX:
-		supported = SUPPORTED_Autoneg |
-			    SUPPORTED_10000baseT_Full |
-			    SUPPORTED_1000baseT_Full |
-			    SUPPORTED_100baseT_Full;
-		advertising = ADVERTISED_Autoneg;
+		ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseT_Full);
+#ifdef HAVE_ETHTOOL_5G_BITS
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     5000baseT_Full);
+#endif /* HAVE_ETHTOOL_5G_BITS */
+#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     2500baseT_Full);
+#endif /* HAVE_ETHTOOL_NEW_2500MB_BITS */
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     1000baseT_Full);
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     100baseT_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
-			advertising |= ADVERTISED_10000baseT_Full;
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     10000baseT_Full);
+#ifdef HAVE_ETHTOOL_5G_BITS
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB)
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     5000baseT_Full);
+#endif /* HAVE_ETHTOOL_5G_BITS */
+#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB)
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     2500baseT_Full);
+#endif /* HAVE_ETHTOOL_NEW_2500MB_BITS */
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
-			advertising |= ADVERTISED_1000baseT_Full;
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     1000baseT_Full);
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
-			advertising |= ADVERTISED_100baseT_Full;
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     100baseT_Full);
 		break;
 	case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
-		supported = SUPPORTED_Autoneg |
-			    SUPPORTED_1000baseT_Full;
-		advertising = ADVERTISED_Autoneg |
-			      ADVERTISED_1000baseT_Full;
+		ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     1000baseT_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     1000baseT_Full);
 		break;
 	case I40E_PHY_TYPE_10GBASE_CR1_CU:
 	case I40E_PHY_TYPE_10GBASE_CR1:
-		supported = SUPPORTED_Autoneg |
-			    SUPPORTED_10000baseT_Full;
-		advertising = ADVERTISED_Autoneg |
-			      ADVERTISED_10000baseT_Full;
+		ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseT_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     10000baseT_Full);
 		break;
 	case I40E_PHY_TYPE_XAUI:
 	case I40E_PHY_TYPE_XFI:
 	case I40E_PHY_TYPE_SFI:
 	case I40E_PHY_TYPE_10GBASE_SFPP_CU:
 	case I40E_PHY_TYPE_10GBASE_AOC:
-		supported = SUPPORTED_10000baseT_Full;
-		advertising = SUPPORTED_10000baseT_Full;
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseT_Full);
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     10000baseT_Full);
 		break;
 	case I40E_PHY_TYPE_SGMII:
-		supported = SUPPORTED_Autoneg |
-			    SUPPORTED_1000baseT_Full;
+		ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     1000baseT_Full);
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
-			advertising |= ADVERTISED_1000baseT_Full;
+			ethtool_link_ksettings_add_link_mode(ks, advertising,
+							     1000baseT_Full);
 		if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) {
-			supported |= SUPPORTED_100baseT_Full;
+			ethtool_link_ksettings_add_link_mode(ks, supported,
+							     100baseT_Full);
 			if (hw_link_info->requested_speeds &
 			    I40E_LINK_SPEED_100MB)
-				advertising |= ADVERTISED_100baseT_Full;
+				ethtool_link_ksettings_add_link_mode(
+				      ks, advertising, 100baseT_Full);
 		}
 		break;
 	case I40E_PHY_TYPE_40GBASE_KR4:
+	case I40E_PHY_TYPE_25GBASE_KR:
 	case I40E_PHY_TYPE_20GBASE_KR2:
 	case I40E_PHY_TYPE_10GBASE_KR:
 	case I40E_PHY_TYPE_10GBASE_KX4:
 	case I40E_PHY_TYPE_1000BASE_KX:
-		supported |= SUPPORTED_40000baseKR4_Full |
-			     SUPPORTED_20000baseKR2_Full |
-			     SUPPORTED_10000baseKR_Full |
-			     SUPPORTED_10000baseKX4_Full |
-			     SUPPORTED_1000baseKX_Full |
-			     SUPPORTED_Autoneg;
-		advertising |= ADVERTISED_40000baseKR4_Full |
-			       ADVERTISED_20000baseKR2_Full |
-			       ADVERTISED_10000baseKR_Full |
-			       ADVERTISED_10000baseKX4_Full |
-			       ADVERTISED_1000baseKX_Full |
-			       ADVERTISED_Autoneg;
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     40000baseKR4_Full);
+#ifdef HAVE_ETHTOOL_25G_BITS
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     25000baseKR_Full);
+#endif /* HAVE_ETHTOOL_25G_BITS */
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     20000baseKR2_Full);
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseKR_Full);
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseKX4_Full);
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     1000baseKX_Full);
+		ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     40000baseKR4_Full);
+#ifdef HAVE_ETHTOOL_25G_BITS
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     25000baseKR_Full);
+#ifdef ETHTOOL_GFECPARAM
+		i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+#endif /* ETHTOOL_GFECPARAM */
+#endif /* HAVE_ETHTOOL_25G_BITS */
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     20000baseKR2_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     10000baseKR_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     10000baseKX4_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     1000baseKX_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
 		break;
-	case I40E_PHY_TYPE_25GBASE_KR:
 	case I40E_PHY_TYPE_25GBASE_CR:
-	case I40E_PHY_TYPE_25GBASE_SR:
-	case I40E_PHY_TYPE_25GBASE_LR:
-		supported = SUPPORTED_Autoneg;
-		advertising = ADVERTISED_Autoneg;
-		/* TODO: add speeds when ethtool is ready to support*/
+		ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+		ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+#ifdef HAVE_ETHTOOL_25G_BITS
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     25000baseCR_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     25000baseCR_Full);
+#ifdef ETHTOOL_GFECPARAM
+		i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+#endif /* ETHTOOL_GFECPARAM */
+
+#endif /* HAVE_ETHTOOL_25G_BITS */
+		break;
+	case I40E_PHY_TYPE_25GBASE_AOC:
+	case I40E_PHY_TYPE_25GBASE_ACC:
+		ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+		ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+#ifdef HAVE_ETHTOOL_25G_BITS
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     25000baseCR_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     25000baseCR_Full);
+#ifdef ETHTOOL_GFECPARAM
+		i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+#endif /* ETHTOOL_GFECPARAM */
+
+#endif /* HAVE_ETHTOOL_25G_BITS */
+#ifdef HAVE_ETHTOOL_NEW_10G_BITS
+		ethtool_link_ksettings_add_link_mode(ks, supported,
+						     10000baseCR_Full);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
+						     10000baseCR_Full);
+#endif /* HAVE_ETHTOOL_NEW_10G_BITS */
 		break;
 	default:
 		/* if we got here and link is up something bad is afoot */
-		netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
+		netdev_info(netdev,
+			    "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
 			    hw_link_info->phy_type);
 	}
 
 	/* Now that we've worked out everything that could be supported by the
-	 * current PHY type, get what is supported by the NVM and them to
-	 * get what is truly supported
+	 * current PHY type, get what is supported by the NVM and intersect
+	 * them to get what is truly supported
 	 */
-	i40e_phy_type_to_ethtool(pf, &e_supported,
-				 &e_advertising);
-
-	supported = supported & e_supported;
-	advertising = advertising & e_advertising;
+	memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings));
+	i40e_phy_type_to_ethtool(pf, &cap_ksettings);
+	ethtool_intersect_link_masks(ks, &cap_ksettings);
 
 	/* Set speed and duplex */
 	switch (link_speed) {
 	case I40E_LINK_SPEED_40GB:
-		cmd->base.speed = SPEED_40000;
+		ks->base.speed = SPEED_40000;
 		break;
 	case I40E_LINK_SPEED_25GB:
-#ifdef SPEED_25000
-		cmd->base.speed = SPEED_25000;
-#else
-		netdev_info(netdev,
-			    "Speed is 25G, display not supported by this version of ethtool.\n");
-#endif
+		ks->base.speed = SPEED_25000;
 		break;
 	case I40E_LINK_SPEED_20GB:
-		cmd->base.speed = SPEED_20000;
+		ks->base.speed = SPEED_20000;
 		break;
 	case I40E_LINK_SPEED_10GB:
-		cmd->base.speed = SPEED_10000;
+		ks->base.speed = SPEED_10000;
+		break;
+	case I40E_LINK_SPEED_5GB:
+		ks->base.speed = SPEED_5000;
+		break;
+	case I40E_LINK_SPEED_2_5GB:
+		ks->base.speed = SPEED_2500;
 		break;
 	case I40E_LINK_SPEED_1GB:
-		cmd->base.speed = SPEED_1000;
+		ks->base.speed = SPEED_1000;
 		break;
 	case I40E_LINK_SPEED_100MB:
-		cmd->base.speed = SPEED_100;
+		ks->base.speed = SPEED_100;
 		break;
 	default:
+		ks->base.speed = SPEED_UNKNOWN;
 		break;
 	}
-	cmd->base.duplex = DUPLEX_FULL;
-
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
-						supported);
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
-						advertising);
+	ks->base.duplex = DUPLEX_FULL;
 }
 
 /**
- * i40e_get_settings_link_down - Get the Link settings for when link is down
+ * i40e_get_settings_link_down - Get the Link settings when link is down
  * @hw: hw structure
- * @ecmd: ethtool command to fill in
+ * @ks: ethtool ksettings to fill in
+ * @pf: pointer to physical function struct
  *
  * Reports link settings that can be determined when link is down
  **/
 static void i40e_get_settings_link_down(struct i40e_hw *hw,
-					struct ethtool_link_ksettings *cmd,
+					struct ethtool_link_ksettings *ks,
 					struct i40e_pf *pf)
 {
-	u32 supported, advertising;
-
 	/* link is down and the driver needs to fall back on
 	 * supported phy types to figure out what info to display
 	 */
-	i40e_phy_type_to_ethtool(pf, &supported, &advertising);
-
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
-						supported);
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
-						advertising);
-
+	i40e_phy_type_to_ethtool(pf, ks);
 	/* With no link speed and duplex are unknown */
-	cmd->base.speed = SPEED_UNKNOWN;
-	cmd->base.duplex = DUPLEX_UNKNOWN;
+	ks->base.speed = SPEED_UNKNOWN;
+	ks->base.duplex = DUPLEX_UNKNOWN;
 }
 
 /**
- * i40e_get_settings - Get Link Speed and Duplex settings
+ * i40e_get_link_ksettings - Get Link Speed and Duplex settings
  * @netdev: network interface device structure
- * @ecmd: ethtool command
+ * @ks: ethtool ksettings
  *
  * Reports speed/duplex settings based on media_type
  **/
 static int i40e_get_link_ksettings(struct net_device *netdev,
-				   struct ethtool_link_ksettings *cmd)
+				   struct ethtool_link_ksettings *ks)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_pf *pf = np->vsi->back;
 	struct i40e_hw *hw = &pf->hw;
 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
 	bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
-	u32 advertising;
+
+	ethtool_link_ksettings_zero_link_mode(ks, supported);
+	ethtool_link_ksettings_zero_link_mode(ks, advertising);
 
 	if (link_up)
-		i40e_get_settings_link_up(hw, cmd, netdev, pf);
+		i40e_get_settings_link_up(hw, ks, netdev, pf);
 	else
-		i40e_get_settings_link_down(hw, cmd, pf);
+		i40e_get_settings_link_down(hw, ks, pf);
 
 	/* Now set the settings that don't rely on link being up/down */
 	/* Set autoneg settings */
-	cmd->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
-			  AUTONEG_ENABLE : AUTONEG_DISABLE);
+	ks->base.autoneg = (hw_link_info->an_info & I40E_AQ_AN_COMPLETED ?
+			    AUTONEG_ENABLE : AUTONEG_DISABLE);
 
+	/* Set media type settings */
 	switch (hw->phy.media_type) {
 	case I40E_MEDIA_TYPE_BACKPLANE:
-		ethtool_link_ksettings_add_link_mode(cmd, supported,
-						     Autoneg);
-		ethtool_link_ksettings_add_link_mode(cmd, supported,
+		ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+		ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
+		ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
 						     Backplane);
-		ethtool_link_ksettings_add_link_mode(cmd, advertising,
-						     Autoneg);
-		ethtool_link_ksettings_add_link_mode(cmd, advertising,
-						     Backplane);
-		cmd->base.port = PORT_NONE;
+		ks->base.port = PORT_NONE;
 		break;
 	case I40E_MEDIA_TYPE_BASET:
-		ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
-		ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
-		cmd->base.port = PORT_TP;
+		ethtool_link_ksettings_add_link_mode(ks, supported, TP);
+		ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
+		ks->base.port = PORT_TP;
 		break;
 	case I40E_MEDIA_TYPE_DA:
 	case I40E_MEDIA_TYPE_CX4:
-		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-		ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
-		cmd->base.port = PORT_DA;
+		ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
+		ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
+		ks->base.port = PORT_DA;
 		break;
 	case I40E_MEDIA_TYPE_FIBER:
-		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-		cmd->base.port = PORT_FIBRE;
+		ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
+		ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
+		ks->base.port = PORT_FIBRE;
 		break;
 	case I40E_MEDIA_TYPE_UNKNOWN:
 	default:
-		cmd->base.port = PORT_OTHER;
+		ks->base.port = PORT_OTHER;
 		break;
 	}
 
 	/* Set flow control settings */
-	ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+	ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
 
 	switch (hw->fc.requested_mode) {
 	case I40E_FC_FULL:
-		ethtool_link_ksettings_add_link_mode(cmd, advertising,
-						     Pause);
+		ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
 		break;
 	case I40E_FC_TX_PAUSE:
-		ethtool_link_ksettings_add_link_mode(cmd, advertising,
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
 						     Asym_Pause);
 		break;
 	case I40E_FC_RX_PAUSE:
-		ethtool_link_ksettings_add_link_mode(cmd, advertising,
-						     Pause);
-		ethtool_link_ksettings_add_link_mode(cmd, advertising,
+		ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
+		ethtool_link_ksettings_add_link_mode(ks, advertising,
 						     Asym_Pause);
 		break;
 	default:
-		ethtool_convert_link_mode_to_legacy_u32(
-			&advertising, cmd->link_modes.advertising);
+		ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
+		ethtool_link_ksettings_del_link_mode(ks, advertising,
+						     Asym_Pause);
+		break;
+	}
+	return 0;
+}
 
-		advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+#ifdef ETHTOOL_GLINKSETTINGS
+/**
+ * i40e_set_link_ksettings - Set Speed and Duplex
+ * @netdev: network interface device structure
+ * @ks: ethtool ksettings
+ *
+ * Set speed/duplex per media_types advertised/forced
+ **/
+static int i40e_set_link_ksettings(struct net_device *netdev,
+				   const struct ethtool_link_ksettings *ks)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_aq_get_phy_abilities_resp abilities;
+	struct ethtool_link_ksettings safe_ks;
+	struct ethtool_link_ksettings copy_ks;
+	struct i40e_aq_set_phy_config config;
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_hw *hw = &pf->hw;
+	bool autoneg_changed = false;
+	i40e_status status = 0;
+	int timeout = 50;
+	int err = 0;
+	u8 autoneg;
 
-		ethtool_convert_legacy_u32_to_link_mode(
-			cmd->link_modes.advertising, advertising);
-		break;
+	/* Changing port settings is not supported if this isn't the
+	 * port's controlling PF
+	 */
+	if (hw->partition_id != 1) {
+		i40e_partition_setting_complaint(pf);
+		return -EOPNOTSUPP;
+	}
+	if (vsi != pf->vsi[pf->lan_vsi])
+		return -EOPNOTSUPP;
+	if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
+	    hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
+	    hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
+	    hw->phy.media_type != I40E_MEDIA_TYPE_DA &&
+	    hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
+		return -EOPNOTSUPP;
+	if (hw->device_id == I40E_DEV_ID_KX_B ||
+	    hw->device_id == I40E_DEV_ID_KX_C ||
+	    hw->device_id == I40E_DEV_ID_20G_KR2 ||
+	    hw->device_id == I40E_DEV_ID_20G_KR2_A ||
+	    hw->device_id == I40E_DEV_ID_25G_B ||
+	    hw->device_id == I40E_DEV_ID_KX_X722) {
+		netdev_info(netdev, "Changing settings is not supported on backplane.\n");
+		return -EOPNOTSUPP;
+	}
+
+	/* copy the ksettings to copy_ks to avoid modifying the origin */
+	memcpy(&copy_ks, ks, sizeof(struct ethtool_link_ksettings));
+
+	/* save autoneg out of ksettings */
+	autoneg = copy_ks.base.autoneg;
+
+	/* get our own copy of the bits to check against */
+	memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings));
+	safe_ks.base.cmd = copy_ks.base.cmd;
+	safe_ks.base.link_mode_masks_nwords =
+		copy_ks.base.link_mode_masks_nwords;
+	i40e_get_link_ksettings(netdev, &safe_ks);
+
+	/* Get link modes supported by hardware and check against modes
+	 * requested by user.  Return an error if unsupported mode was set.
+	 */
+	if (!bitmap_subset(copy_ks.link_modes.advertising,
+			   safe_ks.link_modes.supported,
+			   __ETHTOOL_LINK_MODE_MASK_NBITS))
+		return -EINVAL;
+
+	/* set autoneg back to what it currently is */
+	copy_ks.base.autoneg = safe_ks.base.autoneg;
+
+	/* If copy_ks.base and safe_ks.base are not the same now, then they are
+	 * trying to set something that we do not support.
+	 */
+	if (memcmp(&copy_ks.base, &safe_ks.base,
+		   sizeof(struct ethtool_link_settings)))
+		return -EOPNOTSUPP;
+
+	while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
+		timeout--;
+		if (!timeout)
+			return -EBUSY;
+		usleep_range(1000, 2000);
+	}
+
+	/* Get the current phy config */
+	status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+					      NULL);
+	if (status) {
+		err = -EAGAIN;
+		goto done;
+	}
+
+	/* Copy abilities to config in case autoneg is not
+	 * set below
+	 */
+	memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+	config.abilities = abilities.abilities;
+
+	/* Check autoneg */
+	if (autoneg == AUTONEG_ENABLE) {
+		/* If autoneg was not already enabled */
+		if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
+			/* If autoneg is not supported, return error */
+			if (!ethtool_link_ksettings_test_link_mode(
+				      &safe_ks, supported, Autoneg)) {
+				netdev_info(netdev, "Autoneg not supported on this phy\n");
+				err = -EINVAL;
+				goto done;
+			}
+			/* Autoneg is allowed to change */
+			config.abilities = abilities.abilities |
+					   I40E_AQ_PHY_ENABLE_AN;
+			autoneg_changed = true;
+		}
+	} else {
+		/* If autoneg is currently enabled */
+		if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
+			/* If autoneg is supported 10GBASE_T is the only PHY
+			 * that can disable it, so otherwise return error
+			 */
+			if (ethtool_link_ksettings_test_link_mode(
+				     &safe_ks, supported, Autoneg) &&
+			    hw->phy.link_info.phy_type !=
+			    I40E_PHY_TYPE_10GBASE_T) {
+				netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
+				err = -EINVAL;
+				goto done;
+			}
+			/* Autoneg is allowed to change */
+			config.abilities = abilities.abilities &
+					   ~I40E_AQ_PHY_ENABLE_AN;
+			autoneg_changed = true;
+		}
+	}
+
+	if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  100baseT_Full))
+		config.link_speed |= I40E_LINK_SPEED_100MB;
+	if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  1000baseT_Full) ||
+#ifdef HAVE_ETHTOOL_NEW_10G_BITS
+	    ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  1000baseX_Full) ||
+#endif
+	    ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  1000baseKX_Full))
+		config.link_speed |= I40E_LINK_SPEED_1GB;
+	if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  10000baseT_Full) ||
+	    ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  10000baseKX4_Full) ||
+	    ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  10000baseKR_Full) ||
+#ifdef HAVE_ETHTOOL_NEW_10G_BITS
+	    ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  10000baseCR_Full) ||
+	    ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  10000baseSR_Full) ||
+	    ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  10000baseLR_Full))
+#else
+	    0)
+#endif /* HAVE_ETHTOOL_NEW_10G_BITS */
+		config.link_speed |= I40E_LINK_SPEED_10GB;
+#ifdef HAVE_ETHTOOL_NEW_2500MB_BITS
+	if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  2500baseT_Full))
+		config.link_speed |= I40E_LINK_SPEED_2_5GB;
+#endif /* HAVE_ETHTOOL_NEW_2500MB_BITS */
+#ifdef HAVE_ETHTOOL_5G_BITS
+	if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  5000baseT_Full))
+		config.link_speed |= I40E_LINK_SPEED_5GB;
+#endif /* HAVE_ETHTOOL_5G_BITS */
+	if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  20000baseKR2_Full))
+		config.link_speed |= I40E_LINK_SPEED_20GB;
+#ifdef HAVE_ETHTOOL_25G_BITS
+	if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  25000baseCR_Full) ||
+	    ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  25000baseKR_Full) ||
+	    ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  25000baseSR_Full))
+		config.link_speed |= I40E_LINK_SPEED_25GB;
+#endif /* HAVE_ETHTOOL_25G_BITS */
+	if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  40000baseKR4_Full) ||
+	    ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  40000baseCR4_Full) ||
+	    ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  40000baseSR4_Full) ||
+	    ethtool_link_ksettings_test_link_mode(ks, advertising,
+						  40000baseLR4_Full))
+		config.link_speed |= I40E_LINK_SPEED_40GB;
+
+	/* If speed didn't get set, set it to what it currently is.
+	 * This is needed because if advertise is 0 (as it is when autoneg
+	 * is disabled) then speed won't get set.
+	 */
+	if (!config.link_speed)
+		config.link_speed = abilities.link_speed;
+	if (autoneg_changed || (abilities.link_speed != config.link_speed)) {
+		/* copy over the rest of the abilities */
+		config.phy_type = abilities.phy_type;
+		config.phy_type_ext = abilities.phy_type_ext;
+		config.eee_capability = abilities.eee_capability;
+		config.eeer = abilities.eeer_val;
+		config.low_power_ctrl = abilities.d3_lpan;
+		config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+				    I40E_AQ_PHY_FEC_CONFIG_MASK;
+
+		/* save the requested speeds */
+		hw->phy.link_info.requested_speeds = config.link_speed;
+		/* set link and auto negotiation so changes take effect */
+		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+		/* If link is up put link down */
+		if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) {
+			/* Tell the OS link is going down, the link will go
+			 * back up when fw says it is ready asynchronously
+			 */
+			i40e_print_link_message(vsi, false);
+			netif_carrier_off(netdev);
+			netif_tx_stop_all_queues(netdev);
+		}
+
+		/* make the aq call */
+		status = i40e_aq_set_phy_config(hw, &config, NULL);
+		if (status) {
+			netdev_info(netdev,
+				    "Set phy config failed, err %s aq_err %s\n",
+				    i40e_stat_str(hw, status),
+				    i40e_aq_str(hw, hw->aq.asq_last_status));
+			err = -EAGAIN;
+			goto done;
+		}
+
+		status = i40e_update_link_info(hw);
+		if (status)
+			netdev_dbg(netdev,
+				   "Updating link info failed with err %s aq_err %s\n",
+				   i40e_stat_str(hw, status),
+				   i40e_aq_str(hw, hw->aq.asq_last_status));
+
+	} else {
+		netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
 	}
 
+done:
+	clear_bit(__I40E_CONFIG_BUSY, pf->state);
+
+	return err;
+}
+
+#else /* ETHTOOL_GLINKSETTINGS */
+/**
+ * i40e_get_settings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Reports speed/duplex settings based on media_type.  Since we've backported
+ * the new API constructs to use in the old API, this ends up just being a
+ * wrapper to i40e_get_link_ksettings.
+ **/
+static int i40e_get_settings(struct net_device *netdev,
+			     struct ethtool_cmd *ecmd)
+{
+	struct ethtool_link_ksettings ks;
+
+	i40e_get_link_ksettings(netdev, &ks);
+	_kc_ethtool_ksettings_to_cmd(&ks, ecmd);
+	ecmd->transceiver = XCVR_EXTERNAL;
 	return 0;
 }
 
@@ -686,8 +1332,8 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
  *
  * Set speed/duplex per media_types advertised/forced
  **/
-static int i40e_set_link_ksettings(struct net_device *netdev,
-				   const struct ethtool_link_ksettings *cmd)
+static int i40e_set_settings(struct net_device *netdev,
+			     struct ethtool_cmd *ecmd)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_aq_get_phy_abilities_resp abilities;
@@ -695,15 +1341,14 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
 	struct i40e_pf *pf = np->vsi->back;
 	struct i40e_vsi *vsi = np->vsi;
 	struct i40e_hw *hw = &pf->hw;
-	struct ethtool_link_ksettings safe_cmd;
-	struct ethtool_link_ksettings copy_cmd;
+	struct ethtool_cmd safe_ecmd;
 	i40e_status status = 0;
 	bool change = false;
 	int timeout = 50;
 	int err = 0;
-	u32 autoneg;
+	u8 autoneg;
 	u32 advertise;
-	u32 tmp;
+	u32 old_ethtool_advertising = 0;
 
 	/* Changing port settings is not supported if this isn't the
 	 * port's controlling PF
@@ -731,31 +1376,40 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
 		return -EOPNOTSUPP;
 	}
 
-	/* copy the cmd to copy_cmd to avoid modifying the origin */
-	memcpy(&copy_cmd, cmd, sizeof(struct ethtool_link_ksettings));
-
 	/* get our own copy of the bits to check against */
-	memset(&safe_cmd, 0, sizeof(struct ethtool_link_ksettings));
-	i40e_get_link_ksettings(netdev, &safe_cmd);
+	memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
+	i40e_get_settings(netdev, &safe_ecmd);
 
-	/* save autoneg and speed out of cmd */
-	autoneg = cmd->base.autoneg;
-	ethtool_convert_link_mode_to_legacy_u32(&advertise,
-						cmd->link_modes.advertising);
+	/* save autoneg and speed out of ecmd */
+	autoneg = ecmd->autoneg;
+	advertise = ecmd->advertising;
 
 	/* set autoneg and speed back to what they currently are */
-	copy_cmd.base.autoneg = safe_cmd.base.autoneg;
-	ethtool_convert_link_mode_to_legacy_u32(
-		&tmp, safe_cmd.link_modes.advertising);
-	ethtool_convert_legacy_u32_to_link_mode(
-		copy_cmd.link_modes.advertising, tmp);
-
-	copy_cmd.base.cmd = safe_cmd.base.cmd;
-
-	/* If copy_cmd and safe_cmd are not the same now, then they are
+	ecmd->autoneg = safe_ecmd.autoneg;
+	ecmd->advertising = safe_ecmd.advertising;
+
+	/* Due to a bug in ethtool versions < 3.6 this check is necessary */
+	old_ethtool_advertising = ecmd->supported &
+				  (ADVERTISED_10baseT_Half |
+				   ADVERTISED_10baseT_Full |
+				   ADVERTISED_100baseT_Half |
+				   ADVERTISED_100baseT_Full |
+				   ADVERTISED_1000baseT_Half |
+				   ADVERTISED_1000baseT_Full |
+				   ADVERTISED_2500baseX_Full |
+				   ADVERTISED_10000baseT_Full);
+	old_ethtool_advertising |= (old_ethtool_advertising |
+				   ADVERTISED_20000baseMLD2_Full |
+				   ADVERTISED_20000baseKR2_Full);
+
+	if (advertise == old_ethtool_advertising)
+		netdev_info(netdev, "If you are not setting advertising to %x then you may have an old version of ethtool. Please update.\n",
+			    advertise);
+	ecmd->cmd = safe_ecmd.cmd;
+	/* If ecmd and safe_ecmd are not the same now, then they are
 	 * trying to set something that we do not support
 	 */
-	if (memcmp(&copy_cmd, &safe_cmd, sizeof(struct ethtool_link_ksettings)))
+	if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd)))
 		return -EOPNOTSUPP;
 
 	while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
@@ -784,8 +1438,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
 		/* If autoneg was not already enabled */
 		if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
 			/* If autoneg is not supported, return error */
-			if (!ethtool_link_ksettings_test_link_mode(
-				    &safe_cmd, supported, Autoneg)) {
+			if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
 				netdev_info(netdev, "Autoneg not supported on this phy\n");
 				err = -EINVAL;
 				goto done;
@@ -798,11 +1451,10 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
 	} else {
 		/* If autoneg is currently enabled */
 		if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
-			/* If autoneg is supported 10GBASE_T is the only PHY
+			/* If autoneg is supported 10GBASE_T is the only phy
 			 * that can disable it, so otherwise return error
 			 */
-			if (ethtool_link_ksettings_test_link_mode(
-				    &safe_cmd, supported, Autoneg) &&
+			if (safe_ecmd.supported & SUPPORTED_Autoneg &&
 			    hw->phy.link_info.phy_type !=
 			    I40E_PHY_TYPE_10GBASE_T) {
 				netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
@@ -816,9 +1468,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
 		}
 	}
 
-	ethtool_convert_link_mode_to_legacy_u32(&tmp,
-						safe_cmd.link_modes.supported);
-	if (advertise & ~tmp) {
+	if (advertise & ~safe_ecmd.supported) {
 		err = -EINVAL;
 		goto done;
 	}
@@ -847,7 +1497,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
 	if (!config.link_speed)
 		config.link_speed = abilities.link_speed;
 
-	if (change || (abilities.link_speed != config.link_speed)) {
+	if (change || abilities.link_speed != config.link_speed) {
 		/* copy over the rest of the abilities */
 		config.phy_type = abilities.phy_type;
 		config.phy_type_ext = abilities.phy_type_ext;
@@ -897,6 +1547,157 @@ done:
 	return err;
 }
 
+#endif /* ETHTOOL_GLINKSETTINGS */
+
+static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_aq_get_phy_abilities_resp abilities;
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	i40e_status status = 0;
+	u64 flags = 0;
+	int err = 0;
+
+	flags = READ_ONCE(pf->flags);
+	i40e_set_fec_in_flags(fec_cfg, &flags);
+
+	/* Get the current phy config */
+	memset(&abilities, 0, sizeof(abilities));
+	status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+					      NULL);
+	if (status) {
+		err = -EAGAIN;
+		goto done;
+	}
+
+	if (abilities.fec_cfg_curr_mod_ext_info != fec_cfg) {
+		struct i40e_aq_set_phy_config config;
+
+		memset(&config, 0, sizeof(config));
+		config.phy_type = abilities.phy_type;
+		config.abilities = abilities.abilities;
+		config.phy_type_ext = abilities.phy_type_ext;
+		config.link_speed = abilities.link_speed;
+		config.eee_capability = abilities.eee_capability;
+		config.eeer = abilities.eeer_val;
+		config.low_power_ctrl = abilities.d3_lpan;
+		config.fec_config = fec_cfg & I40E_AQ_PHY_FEC_CONFIG_MASK;
+		status = i40e_aq_set_phy_config(hw, &config, NULL);
+		if (status) {
+			netdev_info(netdev,
+				    "Set phy config failed, err %s aq_err %s\n",
+				    i40e_stat_str(hw, status),
+				    i40e_aq_str(hw, hw->aq.asq_last_status));
+			err = -EAGAIN;
+			goto done;
+		}
+		pf->flags = flags;
+		status = i40e_update_link_info(hw);
+		if (status)
+			/* debug level message only due to relation to the link
+			 * itself rather than to the FEC settings
+			 * (e.g. no physical connection etc.)
+			 */
+			netdev_dbg(netdev,
+				   "Updating link info failed with err %s aq_err %s\n",
+				   i40e_stat_str(hw, status),
+				   i40e_aq_str(hw, hw->aq.asq_last_status));
+	}
+
+done:
+	return err;
+}
+
+#ifdef ETHTOOL_GFECPARAM
+static int i40e_get_fec_param(struct net_device *netdev,
+			      struct ethtool_fecparam *fecparam)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_aq_get_phy_abilities_resp abilities;
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	i40e_status status = 0;
+	int err = 0;
+	u8 fec_cfg;
+
+	/* Get the current phy config */
+	memset(&abilities, 0, sizeof(abilities));
+	status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+					      NULL);
+	if (status) {
+		err = -EAGAIN;
+		goto done;
+	}
+
+	fecparam->fec = 0;
+	fec_cfg = abilities.fec_cfg_curr_mod_ext_info;
+	if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
+		fecparam->fec |= ETHTOOL_FEC_AUTO;
+	else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_RS |
+		 I40E_AQ_SET_FEC_ABILITY_RS))
+		fecparam->fec |= ETHTOOL_FEC_RS;
+	else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_KR |
+		 I40E_AQ_SET_FEC_ABILITY_KR))
+		fecparam->fec |= ETHTOOL_FEC_BASER;
+	if (fec_cfg == 0)
+		fecparam->fec |= ETHTOOL_FEC_OFF;
+
+	if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
+		fecparam->active_fec = ETHTOOL_FEC_BASER;
+	else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
+		fecparam->active_fec = ETHTOOL_FEC_RS;
+	else
+		fecparam->active_fec = ETHTOOL_FEC_OFF;
+done:
+	return err;
+}
+
+static int i40e_set_fec_param(struct net_device *netdev,
+			      struct ethtool_fecparam *fecparam)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	u8 fec_cfg = 0;
+	int err = 0;
+
+	if (hw->device_id != I40E_DEV_ID_25G_SFP28 &&
+	    hw->device_id != I40E_DEV_ID_25G_B) {
+		err = -EPERM;
+		goto done;
+	}
+
+	switch (fecparam->fec) {
+	case ETHTOOL_FEC_AUTO:
+		fec_cfg = I40E_AQ_SET_FEC_AUTO;
+		break;
+	case ETHTOOL_FEC_RS:
+		fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
+			     I40E_AQ_SET_FEC_ABILITY_RS);
+		break;
+	case ETHTOOL_FEC_BASER:
+		fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
+			     I40E_AQ_SET_FEC_ABILITY_KR);
+		break;
+	case ETHTOOL_FEC_OFF:
+	case ETHTOOL_FEC_NONE:
+		fec_cfg = 0;
+		break;
+	default:
+		dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d",
+			 fecparam->fec);
+		err = -EINVAL;
+		goto done;
+	}
+
+	err = i40e_set_fec_cfg(netdev, fec_cfg);
+
+done:
+	return err;
+}
+#endif /* ETHTOOL_GFECPARAM */
+
 static int i40e_nway_reset(struct net_device *netdev)
 {
 	/* restart autonegotiation */
@@ -919,6 +1720,9 @@ static int i40e_nway_reset(struct net_device *netdev)
 
 /**
  * i40e_get_pauseparam -  Get Flow Control status
+ * @netdev: netdevice structure
+ * @pause: buffer to return pause parameters
+ *
  * Return tx/rx-pause status
  **/
 static void i40e_get_pauseparam(struct net_device *netdev,
@@ -927,12 +1731,9 @@ static void i40e_get_pauseparam(struct net_device *netdev,
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_pf *pf = np->vsi->back;
 	struct i40e_hw *hw = &pf->hw;
-	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
 	struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
 
-	pause->autoneg =
-		((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
-		  AUTONEG_ENABLE : AUTONEG_DISABLE);
+	pause->autoneg = hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED;
 
 	/* PFC enabled so report LFC as off */
 	if (dcbx_cfg->pfc.pfcenable) {
@@ -969,6 +1770,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
 	i40e_status status;
 	u8 aq_failures;
 	int err = 0;
+	u32 is_an;
 
 	/* Changing the port's flow control is not supported if this isn't the
 	 * port's controlling PF
@@ -981,22 +1783,21 @@ static int i40e_set_pauseparam(struct net_device *netdev,
 	if (vsi != pf->vsi[pf->lan_vsi])
 		return -EOPNOTSUPP;
 
-	if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
-	    AUTONEG_ENABLE : AUTONEG_DISABLE)) {
+	is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED;
+	if (pause->autoneg != is_an) {
 		netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
 		return -EOPNOTSUPP;
 	}
 
 	/* If we have link and don't have autoneg */
-	if (!test_bit(__I40E_DOWN, pf->state) &&
-	    !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
+	if (!test_bit(__I40E_DOWN, pf->state) && !is_an) {
 		/* Send message that it might not necessarily work*/
 		netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
 	}
 
 	if (dcbx_cfg->pfc.pfcenable) {
 		netdev_info(netdev,
-			    "Priority flow control enabled. Cannot set link flow control.\n");
+			 "Priority flow control enabled. Cannot set link flow control.\n");
 		return -EOPNOTSUPP;
 	}
 
@@ -1009,7 +1810,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
 	else if (!pause->rx_pause && !pause->tx_pause)
 		hw->fc.requested_mode = I40E_FC_NONE;
 	else
-		 return -EINVAL;
+		return -EINVAL;
 
 	/* Tell the OS link is going down, the link will go back up when fw
 	 * says it is ready asynchronously
@@ -1018,46 +1819,165 @@ static int i40e_set_pauseparam(struct net_device *netdev,
 	netif_carrier_off(netdev);
 	netif_tx_stop_all_queues(netdev);
 
-	/* Set the fc mode and only restart an if link is up*/
-	status = i40e_set_fc(hw, &aq_failures, link_up);
+	/* Set the fc mode and only restart an if link is up*/
+	status = i40e_set_fc(hw, &aq_failures, link_up);
+
+	if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
+		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
+			    i40e_stat_str(hw, status),
+			    i40e_aq_str(hw, hw->aq.asq_last_status));
+		err = -EAGAIN;
+	}
+	if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
+		netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
+			    i40e_stat_str(hw, status),
+			    i40e_aq_str(hw, hw->aq.asq_last_status));
+		err = -EAGAIN;
+	}
+	if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
+		netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
+			    i40e_stat_str(hw, status),
+			    i40e_aq_str(hw, hw->aq.asq_last_status));
+		err = -EAGAIN;
+	}
+
+	if (!test_bit(__I40E_DOWN, pf->state) && is_an) {
+		/* Give it a little more time to try to come back */
+		msleep(75);
+		if (!test_bit(__I40E_DOWN, pf->state))
+			return i40e_nway_reset(netdev);
+	}
+
+	return err;
+}
+
+#ifndef HAVE_NDO_SET_FEATURES
+static u32 i40e_get_rx_csum(struct net_device *netdev)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+
+	return pf->flags & I40E_FLAG_RX_CSUM_ENABLED;
+}
+
+static int i40e_set_rx_csum(struct net_device *netdev, u32 data)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+
+	if (data)
+		pf->flags |= I40E_FLAG_RX_CSUM_ENABLED;
+	else
+		pf->flags &= ~I40E_FLAG_RX_CSUM_ENABLED;
+
+	return 0;
+}
+
+static u32 i40e_get_tx_csum(struct net_device *netdev)
+{
+	return (netdev->features & NETIF_F_IP_CSUM) != 0;
+}
+
+static int i40e_set_tx_csum(struct net_device *netdev, u32 data)
+{
+	if (data) {
+#ifdef NETIF_F_IPV6_CSUM
+		netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+#else
+		netdev->features |= NETIF_F_IP_CSUM;
+#endif
+		netdev->features |= NETIF_F_SCTP_CRC;
+	} else {
+#ifdef NETIF_F_IPV6_CSUM
+		netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+				      NETIF_F_SCTP_CRC);
+#else
+		netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SCTP_CRC);
+#endif
+	}
+
+	return 0;
+}
 
-	if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
-		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
-			    i40e_stat_str(hw, status),
-			    i40e_aq_str(hw, hw->aq.asq_last_status));
-		err = -EAGAIN;
-	}
-	if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
-		netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
-			    i40e_stat_str(hw, status),
-			    i40e_aq_str(hw, hw->aq.asq_last_status));
-		err = -EAGAIN;
-	}
-	if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
-		netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
-			    i40e_stat_str(hw, status),
-			    i40e_aq_str(hw, hw->aq.asq_last_status));
-		err = -EAGAIN;
+static int i40e_set_tso(struct net_device *netdev, u32 data)
+{
+	if (data) {
+#ifndef HAVE_NDO_FEATURES_CHECK
+		if (netdev->mtu >= 576) {
+			netdev->features |= NETIF_F_TSO;
+			netdev->features |= NETIF_F_TSO6;
+		} else {
+			netdev_info(netdev, "MTU setting is too low to enable TSO\n");
+		}
+#else
+		netdev->features |= NETIF_F_TSO;
+		netdev->features |= NETIF_F_TSO6;
+#endif
+	} else {
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+		struct i40e_netdev_priv *np = netdev_priv(netdev);
+		/* disable TSO on all VLANs if they're present */
+		if (np->vsi->vlgrp) {
+			int i;
+			struct net_device *v_netdev;
+
+			for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+				v_netdev =
+				       vlan_group_get_device(np->vsi->vlgrp, i);
+				if (v_netdev) {
+					v_netdev->features &= ~NETIF_F_TSO;
+					v_netdev->features &= ~NETIF_F_TSO6;
+					vlan_group_set_device(np->vsi->vlgrp, i,
+							      v_netdev);
+				}
+			}
+		}
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+		netdev->features &= ~NETIF_F_TSO;
+		netdev->features &= ~NETIF_F_TSO6;
 	}
 
-	if (!test_bit(__I40E_DOWN, pf->state)) {
-		/* Give it a little more time to try to come back */
-		msleep(75);
-		if (!test_bit(__I40E_DOWN, pf->state))
-			return i40e_nway_reset(netdev);
-	}
+	return 0;
+}
+#ifdef ETHTOOL_GFLAGS
+static int i40e_set_flags(struct net_device *netdev, u32 data)
+{
+#ifdef ETHTOOL_GRXRINGS
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+#endif
+	u32 supported_flags = 0;
+	bool need_reset = false;
+	int rc;
 
-	return err;
+#ifdef NETIF_F_RXHASH
+	supported_flags |= ETH_FLAG_RXHASH;
+
+#endif
+#ifdef ETHTOOL_GRXRINGS
+	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+		supported_flags |= ETH_FLAG_NTUPLE;
+#endif
+	rc = ethtool_op_set_flags(netdev, data, supported_flags);
+	if (rc)
+		return rc;
+
+	/* if state changes we need to update pf->flags and maybe reset */
+#ifdef ETHTOOL_GRXRINGS
+	need_reset = i40e_set_ntuple(pf, netdev->features);
+#endif /* ETHTOOL_GRXRINGS */
+	if (need_reset)
+		i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
+
+	return 0;
 }
+#endif /* ETHTOOL_GFLAGS */
 
+#endif /* HAVE_NDO_SET_FEATURES */
 static u32 i40e_get_msglevel(struct net_device *netdev)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_pf *pf = np->vsi->back;
-	u32 debug_mask = pf->hw.debug_mask;
-
-	if (debug_mask)
-		netdev_info(netdev, "i40e debug_mask: 0x%08X\n", debug_mask);
 
 	return pf->msg_enable;
 }
@@ -1069,8 +1989,7 @@ static void i40e_set_msglevel(struct net_device *netdev, u32 data)
 
 	if (I40E_DEBUG_USER & data)
 		pf->hw.debug_mask = data;
-	else
-		pf->msg_enable = data;
+	pf->msg_enable = data;
 }
 
 static int i40e_get_regs_len(struct net_device *netdev)
@@ -1277,9 +2196,11 @@ static void i40e_get_drvinfo(struct net_device *netdev,
 		sizeof(drvinfo->fw_version));
 	strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
 		sizeof(drvinfo->bus_info));
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
 	drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
 	if (pf->hw.pf_id == 0)
 		drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
+#endif
 }
 
 static void i40e_get_ringparam(struct net_device *netdev,
@@ -1360,6 +2281,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
 			if (i40e_enabled_xdp_vsi(vsi))
 				vsi->xdp_rings[i]->count = new_tx_count;
 		}
+		vsi->num_tx_desc = new_tx_count;
+		vsi->num_rx_desc = new_rx_count;
 		goto done;
 	}
 
@@ -1373,7 +2296,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
 			       (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
 	if (new_tx_count != vsi->tx_rings[0]->count) {
 		netdev_info(netdev,
-			    "Changing Tx descriptor count from %d to %d.\n",
+			    "Changing Tx descriptor count from %d to %d\n",
 			    vsi->tx_rings[0]->count, new_tx_count);
 		tx_rings = kcalloc(tx_alloc_queue_pairs,
 				   sizeof(struct i40e_ring), GFP_KERNEL);
@@ -1385,7 +2308,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
 		for (i = 0; i < tx_alloc_queue_pairs; i++) {
 			if (!i40e_active_tx_ring_index(vsi, i))
 				continue;
-
+			/* clone ring and setup updated count */
 			tx_rings[i] = *vsi->tx_rings[i];
 			tx_rings[i].count = new_tx_count;
 			/* the desc and bi pointers will be reallocated in the
@@ -1422,7 +2345,6 @@ static int i40e_set_ringparam(struct net_device *netdev,
 		}
 
 		for (i = 0; i < vsi->num_queue_pairs; i++) {
-			struct i40e_ring *ring;
 			u16 unused;
 
 			/* clone ring and setup updated count */
@@ -1433,6 +2355,11 @@ static int i40e_set_ringparam(struct net_device *netdev,
 			 */
 			rx_rings[i].desc = NULL;
 			rx_rings[i].rx_bi = NULL;
+#ifdef HAVE_XDP_BUFF_RXQ
+			/* Clear cloned XDP RX-queue info before setup call */
+			memset(&rx_rings[i].xdp_rxq, 0,
+			       sizeof(rx_rings[i].xdp_rxq));
+#endif
 			/* this is to allow wr32 to have something to write to
 			 * during early allocation of Rx buffers
 			 */
@@ -1444,9 +2371,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
 			/* now allocate the Rx buffers to make sure the OS
 			 * has enough memory, any failure here means abort
 			 */
-			ring = &rx_rings[i];
-			unused = I40E_DESC_UNUSED(ring);
-			err = i40e_alloc_rx_buffers(ring, unused);
+			unused = I40E_DESC_UNUSED(&rx_rings[i]);
+			err = i40e_alloc_rx_buffers(&rx_rings[i], unused);
 rx_unwind:
 			if (err) {
 				do {
@@ -1496,6 +2422,8 @@ rx_unwind:
 		rx_rings = NULL;
 	}
 
+	vsi->num_tx_desc = new_tx_count;
+	vsi->num_rx_desc = new_rx_count;
 	i40e_up(vsi);
 
 free_tx:
@@ -1515,6 +2443,67 @@ done:
 	return err;
 }
 
+/**
+ * i40e_get_stats_count - return the stats count for a device
+ * @netdev: the netdev to return the count for
+ *
+ * Returns the total number of statistics for this netdev. Note that even
+ * though this is a function, it is required that the count for a specific
+ * netdev must never change. Basing the count on static values such as the
+ * maximum number of queues or the device type is ok. However, the API for
+ * obtaining stats is *not* safe against changes based on non-static
+ * values such as the *current* number of queues, or runtime flags.
+ *
+ * If a statistic is not always enabled, return it as part of the count
+ * anyways, always return its string, and report its value as zero.
+ **/
+static int i40e_get_stats_count(struct net_device *netdev)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	int stats_len;
+
+	if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1)
+		stats_len = I40E_PF_STATS_LEN;
+	else
+		stats_len = I40E_VSI_STATS_LEN;
+
+	/* The number of stats reported for a given net_device must remain
+	 * constant throughout the life of that device.
+	 *
+	 * This is because the API for obtaining the size, strings, and stats
+	 * is spread out over three separate ethtool ioctls. There is no safe
+	 * way to lock the number of stats across these calls, so we must
+	 * assume that they will never change.
+	 *
+	 * Due to this, we report the maximum number of queues, even if not
+	 * every queue is currently configured. Since we always allocate
+	 * queues in pairs, we'll just use netdev->num_tx_queues * 2. This
+	 * works because the num_tx_queues is set at device creation and never
+	 * changes.
+	 */
+#ifndef I40E_PF_EXTRA_STATS_OFF
+	/* The same applies to additional stats showing here the network usage
+	 * counters for VFs. In order to handle it in a safe way, we also
+	 * report here, similarly as in the queues case described above,
+	 * the maximum possible, fixed number of these extra stats items.
+	 */
+#endif /* !I40E_PF_EXTRA_STATS_OFF */
+	stats_len += I40E_QUEUE_STATS_LEN * 2 * netdev->real_num_tx_queues;
+#ifdef HAVE_XDP_SUPPORT
+	stats_len += I40E_QUEUE_STATS_XDP_LEN * netdev->real_num_tx_queues;
+#endif
+
+#ifndef I40E_PF_EXTRA_STATS_OFF
+	if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1)
+		stats_len += I40E_PF_STATS_EXTRA_LEN;
+
+#endif /* !I40E_PF_EXTRA_STATS_OFF */
+	return stats_len;
+}
+
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
 static int i40e_get_sset_count(struct net_device *netdev, int sset)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -1525,16 +2514,7 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
 	case ETH_SS_TEST:
 		return I40E_TEST_LEN;
 	case ETH_SS_STATS:
-		if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
-			int len = I40E_PF_STATS_LEN(netdev);
-
-			if ((pf->lan_veb != I40E_NO_VEB) &&
-			    (pf->flags & I40E_FLAG_VEB_STATS_ENABLED))
-				len += I40E_VEB_STATS_TOTAL;
-			return len;
-		} else {
-			return I40E_VSI_STATS_LEN(netdev);
-		}
+		return i40e_get_stats_count(netdev);
 	case ETH_SS_PRIV_FLAGS:
 		return I40E_PRIV_FLAGS_STR_LEN +
 			(pf->hw.pf_id == 0 ? I40E_GL_PRIV_FLAGS_STR_LEN : 0);
@@ -1542,96 +2522,262 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
 		return -EOPNOTSUPP;
 	}
 }
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+
+/**
+ * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
+ * @pf: the PF device structure
+ * @i: the priority value to copy
+ *
+ * The PFC stats are found as arrays in pf->stats, which is not easy to pass
+ * into i40e_add_ethtool_stats. Produce a formatted i40e_pfc_stats structure
+ * of the PFC stats for the given priority.
+ **/
+static inline struct i40e_pfc_stats
+i40e_get_pfc_stats(struct i40e_pf *pf, unsigned int i)
+{
+#define I40E_GET_PFC_STAT(stat, priority) \
+	.stat = pf->stats.stat[priority]
+
+	struct i40e_pfc_stats pfc = {
+		I40E_GET_PFC_STAT(priority_xon_rx, i),
+		I40E_GET_PFC_STAT(priority_xoff_rx, i),
+		I40E_GET_PFC_STAT(priority_xon_tx, i),
+		I40E_GET_PFC_STAT(priority_xoff_tx, i),
+		I40E_GET_PFC_STAT(priority_xon_2_xoff, i),
+	};
+	return pfc;
+}
 
+/**
+ * i40e_get_ethtool_stats - copy stat values into supplied buffer
+ * @netdev: the netdev to collect stats for
+ * @stats: ethtool stats command structure
+ * @data: ethtool supplied buffer
+ *
+ * Copy the stats values for this netdev into the buffer. Expects data to be
+ * pre-allocated to the size returned by i40e_get_stats_count.. Note that all
+ * statistics must be copied in a static order, and the count must not change
+ * for a given netdev. See i40e_get_stats_count for more details.
+ *
+ * If a statistic is not currently valid (such as a disabled queue), this
+ * function reports its value as zero.
+ **/
 static void i40e_get_ethtool_stats(struct net_device *netdev,
 				   struct ethtool_stats *stats, u64 *data)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_ring *tx_ring, *rx_ring;
 	struct i40e_vsi *vsi = np->vsi;
 	struct i40e_pf *pf = vsi->back;
-	unsigned int j;
-	int i = 0;
-	char *p;
-	struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
-	unsigned int start;
+	struct i40e_veb *veb = NULL;
+#ifndef I40E_PF_EXTRA_STATS_OFF
+	unsigned int vsi_idx;
+	unsigned int vf_idx;
+	unsigned int vf_id;
+	bool is_vf_valid;
+#endif /* !I40E_PF_EXTRA_STATS_OFF */
+	unsigned int i;
+	bool veb_stats;
+	u64 *p = data;
 
 	i40e_update_stats(vsi);
 
-	for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
-		p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
-		data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
-			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
-	}
-	for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
-		p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
-		data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
-			    sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
-	}
-	rcu_read_lock();
-	for (j = 0; j < vsi->num_queue_pairs; j++) {
-		tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
+	i40e_add_ethtool_stats(&data, i40e_get_vsi_stats_struct(vsi),
+			       i40e_gstrings_net_stats);
 
-		if (!tx_ring)
-			continue;
+	i40e_add_ethtool_stats(&data, vsi, i40e_gstrings_misc_stats);
 
-		/* process Tx ring statistics */
-		do {
-			start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
-			data[i] = tx_ring->stats.packets;
-			data[i + 1] = tx_ring->stats.bytes;
-		} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
-		i += 2;
-
-		/* Rx ring is the 2nd half of the queue pair */
-		rx_ring = &tx_ring[1];
-		do {
-			start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
-			data[i] = rx_ring->stats.packets;
-			data[i + 1] = rx_ring->stats.bytes;
-		} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
-		i += 2;
+	rcu_read_lock();
+	for (i = 0; i < netdev->real_num_tx_queues; i++) {
+		i40e_add_queue_stats(&data, READ_ONCE(vsi->tx_rings[i]));
+		i40e_add_queue_stats(&data, READ_ONCE(vsi->rx_rings[i]));
+#ifdef HAVE_XDP_SUPPORT
+		i40e_add_rx_queue_xdp_stats(&data, READ_ONCE(vsi->rx_rings[i]));
+#endif
 	}
 	rcu_read_unlock();
+
 	if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
-		return;
+		goto check_data_pointer;
+
+	veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
+		     (pf->lan_veb < I40E_MAX_VEB) &&
+		     (pf->flags & I40E_FLAG_VEB_STATS_ENABLED));
+
+	if (veb_stats) {
+		veb = pf->veb[pf->lan_veb];
+		i40e_update_veb_stats(veb);
+	}
+
+	/* If veb stats aren't enabled, pass NULL instead of the veb so that
+	 * we initialize stats to zero and update the data pointer
+	 * intelligently
+	 */
+	i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
+			       i40e_gstrings_veb_stats);
+
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+		i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
+				       i40e_gstrings_veb_tc_stats);
 
-	if ((pf->lan_veb != I40E_NO_VEB) &&
-	    (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
-		struct i40e_veb *veb = pf->veb[pf->lan_veb];
+	i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
+
+	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+		struct i40e_pfc_stats pfc = i40e_get_pfc_stats(pf, i);
+
+		i40e_add_ethtool_stats(&data, &pfc, i40e_gstrings_pfc_stats);
+	}
+
+#ifndef I40E_PF_EXTRA_STATS_OFF
+	/* As for now, we only process the SRIOV type VSIs (as extra stats to
+	 * PF core stats) which are correlated with VF LAN VSI (hence below,
+	 * in this for-loop instruction block, only VF's LAN VSIs are currently
+	 * processed).
+	 */
+	for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+		is_vf_valid = true;
+		for (vf_idx = 0; vf_idx < pf->num_alloc_vfs; vf_idx++)
+			if (pf->vf[vf_idx].vf_id == vf_id)
+				break;
+		if (vf_idx >= pf->num_alloc_vfs) {
+			dev_info(&pf->pdev->dev,
+				 "In the PF's array, there is no VF instance with VF_ID identifier %d or it is not set/initialized correctly yet\n",
+				 vf_id);
+			is_vf_valid = false;
+			goto check_vf;
+		}
+		vsi_idx = pf->vf[vf_idx].lan_vsi_idx;
 
-		for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
-			p = (char *)veb;
-			p += i40e_gstrings_veb_stats[j].stat_offset;
-			data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
-				     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+		vsi = pf->vsi[vsi_idx];
+		if (!vsi) {
+			/* It means empty field in the PF VSI array... */
+			dev_info(&pf->pdev->dev,
+				 "No LAN VSI instance referenced by VF %d or it is not set/initialized correctly yet\n",
+				 vf_id);
+			is_vf_valid = false;
+			goto check_vf;
+		}
+		if (vsi->vf_id != vf_id) {
+			dev_info(&pf->pdev->dev,
+				 "In the PF's array, there is incorrectly set/initialized LAN VSI or reference to it from VF %d is not set/initialized correctly yet\n",
+				 vf_id);
+			is_vf_valid = false;
+			goto check_vf;
+		}
+		if (vsi->vf_id != pf->vf[vf_idx].vf_id ||
+		    !i40e_find_vsi_from_id(pf, pf->vf[vsi->vf_id].lan_vsi_id)) {
+			/* Disjointed identifiers or broken references VF-VSI */
+			dev_warn(&pf->pdev->dev,
+				 "SRIOV LAN VSI (index %d in PF VSI array) with invalid VF Identifier %d (referenced by VF %d, ordered as %d in VF array)\n",
+				 vsi_idx, pf->vsi[vsi_idx]->vf_id,
+				 pf->vf[vf_idx].vf_id, vf_idx);
+			is_vf_valid = false;
 		}
-		for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
-			data[i++] = veb->tc_stats.tc_tx_packets[j];
-			data[i++] = veb->tc_stats.tc_tx_bytes[j];
-			data[i++] = veb->tc_stats.tc_rx_packets[j];
-			data[i++] = veb->tc_stats.tc_rx_bytes[j];
+check_vf:
+		if (!is_vf_valid) {
+			i40e_add_ethtool_stats(&data, NULL,
+					       i40e_gstrings_eth_stats_extra);
+		} else {
+			i40e_update_eth_stats(vsi);
+			i40e_add_ethtool_stats(&data, vsi,
+					       i40e_gstrings_eth_stats_extra);
 		}
 	}
-	for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
-		p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
-		data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
-			     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+	for (; vf_id < I40E_STATS_EXTRA_COUNT; vf_id++)
+		i40e_add_ethtool_stats(&data, NULL,
+				       i40e_gstrings_eth_stats_extra);
+
+#endif /* !I40E_PF_EXTRA_STATS_OFF */
+check_data_pointer:
+	WARN_ONCE(data - p != i40e_get_stats_count(netdev),
+		  "ethtool stats count mismatch!");
+}
+
+#ifndef I40E_PF_EXTRA_STATS_OFF
+/**
+ * i40e_update_vfid_in_stats - print VF num to stats names
+ * @stats_extra: array of stats structs with stats name strings
+ * @strings_num: number of stats name strings in array above (length)
+ * @vf_id: VF number to update stats name strings with
+ *
+ * Helper function to i40e_get_stat_strings() in case of extra stats.
+ **/
+static inline void
+i40e_update_vfid_in_stats(struct i40e_stats stats_extra[],
+			  int strings_num, int vf_id)
+{
+	int i;
+
+	for (i = 0; i < strings_num; i++) {
+		snprintf(stats_extra[i].stat_string,
+			 I40E_STATS_NAME_VFID_EXTRA_LEN, "vf%03d", vf_id);
+		stats_extra[i].stat_string[I40E_STATS_NAME_VFID_EXTRA_LEN -
+								       1] = '.';
 	}
-	for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
-		data[i++] = pf->stats.priority_xon_tx[j];
-		data[i++] = pf->stats.priority_xoff_tx[j];
+}
+#endif /* !I40E_PF_EXTRA_STATS_OFF */
+/**
+ * i40e_get_stat_strings - copy stat strings into supplied buffer
+ * @netdev: the netdev to collect strings for
+ * @data: supplied buffer to copy strings into
+ *
+ * Copy the strings related to stats for this netdev. Expects data to be
+ * pre-allocated with the size reported by i40e_get_stats_count. Note that the
+ * strings must be copied in a static order and the total count must not
+ * change for a given netdev. See i40e_get_stats_count for more details.
+ **/
+static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	unsigned int i;
+	u8 *p = data;
+
+	i40e_add_stat_strings(&data, i40e_gstrings_net_stats);
+
+	i40e_add_stat_strings(&data, i40e_gstrings_misc_stats);
+
+	for (i = 0; i < netdev->real_num_tx_queues; i++) {
+		i40e_add_stat_strings(&data, i40e_gstrings_queue_stats,
+				      "tx", i);
+		i40e_add_stat_strings(&data, i40e_gstrings_queue_stats,
+				      "rx", i);
+#ifdef HAVE_XDP_SUPPORT
+		i40e_add_stat_strings(&data, i40e_gstrings_rx_queue_xdp_stats,
+				      "rx", i);
+#endif
 	}
-	for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
-		data[i++] = pf->stats.priority_xon_rx[j];
-		data[i++] = pf->stats.priority_xoff_rx[j];
+
+	if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
+		goto check_data_pointer;
+
+	i40e_add_stat_strings(&data, i40e_gstrings_veb_stats);
+
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+		i40e_add_stat_strings(&data, i40e_gstrings_veb_tc_stats, i);
+
+	i40e_add_stat_strings(&data, i40e_gstrings_stats);
+
+	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+		i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
+
+#ifndef I40E_PF_EXTRA_STATS_OFF
+	for (i = 0; i < I40E_STATS_EXTRA_COUNT; i++) {
+		i40e_update_vfid_in_stats
+			(i40e_gstrings_eth_stats_extra,
+			 ARRAY_SIZE(i40e_gstrings_eth_stats_extra), i);
+		i40e_add_stat_strings(&data, i40e_gstrings_eth_stats_extra);
 	}
-	for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
-		data[i++] = pf->stats.priority_xon_2_xoff[j];
+
+#endif /* !I40E_PF_EXTRA_STATS_OFF */
+check_data_pointer:
+	WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
+		  "stat strings count mismatch!");
 }
 
-static void i40e_get_strings(struct net_device *netdev, u32 stringset,
-			     u8 *data)
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
+static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_vsi *vsi = np->vsi;
@@ -1639,107 +2785,47 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
 	char *p = (char *)data;
 	unsigned int i;
 
+	for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+		snprintf(p, ETH_GSTRING_LEN, "%s",
+			 i40e_gstrings_priv_flags[i].flag_string);
+		p += ETH_GSTRING_LEN;
+	}
+	if (pf->hw.pf_id != 0)
+		return;
+	for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) {
+		snprintf(p, ETH_GSTRING_LEN, "%s",
+			 i40e_gl_gstrings_priv_flags[i].flag_string);
+		p += ETH_GSTRING_LEN;
+	}
+}
+#endif
+
+static void i40e_get_strings(struct net_device *netdev, u32 stringset,
+			     u8 *data)
+{
 	switch (stringset) {
 	case ETH_SS_TEST:
 		memcpy(data, i40e_gstrings_test,
 		       I40E_TEST_LEN * ETH_GSTRING_LEN);
 		break;
 	case ETH_SS_STATS:
-		for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
-			snprintf(p, ETH_GSTRING_LEN, "%s",
-				 i40e_gstrings_net_stats[i].stat_string);
-			p += ETH_GSTRING_LEN;
-		}
-		for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
-			snprintf(p, ETH_GSTRING_LEN, "%s",
-				 i40e_gstrings_misc_stats[i].stat_string);
-			p += ETH_GSTRING_LEN;
-		}
-		for (i = 0; i < vsi->num_queue_pairs; i++) {
-			snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i);
-			p += ETH_GSTRING_LEN;
-			snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_bytes", i);
-			p += ETH_GSTRING_LEN;
-			snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_packets", i);
-			p += ETH_GSTRING_LEN;
-			snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_bytes", i);
-			p += ETH_GSTRING_LEN;
-		}
-		if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
-			return;
-
-		if ((pf->lan_veb != I40E_NO_VEB) &&
-		    (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
-			for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
-				snprintf(p, ETH_GSTRING_LEN, "veb.%s",
-					i40e_gstrings_veb_stats[i].stat_string);
-				p += ETH_GSTRING_LEN;
-			}
-			for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-				snprintf(p, ETH_GSTRING_LEN,
-					 "veb.tc_%d_tx_packets", i);
-				p += ETH_GSTRING_LEN;
-				snprintf(p, ETH_GSTRING_LEN,
-					 "veb.tc_%d_tx_bytes", i);
-				p += ETH_GSTRING_LEN;
-				snprintf(p, ETH_GSTRING_LEN,
-					 "veb.tc_%d_rx_packets", i);
-				p += ETH_GSTRING_LEN;
-				snprintf(p, ETH_GSTRING_LEN,
-					 "veb.tc_%d_rx_bytes", i);
-				p += ETH_GSTRING_LEN;
-			}
-		}
-		for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
-			snprintf(p, ETH_GSTRING_LEN, "port.%s",
-				 i40e_gstrings_stats[i].stat_string);
-			p += ETH_GSTRING_LEN;
-		}
-		for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
-			snprintf(p, ETH_GSTRING_LEN,
-				 "port.tx_priority_%d_xon", i);
-			p += ETH_GSTRING_LEN;
-			snprintf(p, ETH_GSTRING_LEN,
-				 "port.tx_priority_%d_xoff", i);
-			p += ETH_GSTRING_LEN;
-		}
-		for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
-			snprintf(p, ETH_GSTRING_LEN,
-				 "port.rx_priority_%d_xon", i);
-			p += ETH_GSTRING_LEN;
-			snprintf(p, ETH_GSTRING_LEN,
-				 "port.rx_priority_%d_xoff", i);
-			p += ETH_GSTRING_LEN;
-		}
-		for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
-			snprintf(p, ETH_GSTRING_LEN,
-				 "port.rx_priority_%d_xon_2_xoff", i);
-			p += ETH_GSTRING_LEN;
-		}
-		/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
+		i40e_get_stat_strings(netdev, data);
 		break;
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
 	case ETH_SS_PRIV_FLAGS:
-		for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
-			snprintf(p, ETH_GSTRING_LEN, "%s",
-				 i40e_gstrings_priv_flags[i].flag_string);
-			p += ETH_GSTRING_LEN;
-		}
-		if (pf->hw.pf_id != 0)
-			break;
-		for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) {
-			snprintf(p, ETH_GSTRING_LEN, "%s",
-				 i40e_gl_gstrings_priv_flags[i].flag_string);
-			p += ETH_GSTRING_LEN;
-		}
+		i40e_get_priv_flag_strings(netdev, data);
 		break;
+#endif
 	default:
 		break;
 	}
 }
 
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
 static int i40e_get_ts_info(struct net_device *dev,
 			    struct ethtool_ts_info *info)
 {
+#ifdef HAVE_PTP_1588_CLOCK
 	struct i40e_pf *pf = i40e_netdev_to_pf(dev);
 
 	/* only report HW timestamping if PTP is enabled */
@@ -1776,9 +2862,13 @@ static int i40e_get_ts_info(struct net_device *dev,
 				    BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
 
 	return 0;
+#else /* HAVE_PTP_1588_CLOCK */
+	return ethtool_op_get_ts_info(dev, info);
+#endif /* HAVE_PTP_1588_CLOCK */
 }
 
-static int i40e_link_test(struct net_device *netdev, u64 *data)
+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
+static u64 i40e_link_test(struct net_device *netdev, u64 *data)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_pf *pf = np->vsi->back;
@@ -1787,7 +2877,7 @@ static int i40e_link_test(struct net_device *netdev, u64 *data)
 
 	netif_info(pf, hw, netdev, "link test\n");
 	status = i40e_get_link_status(&pf->hw, &link_up);
-	if (status) {
+	if (status != I40E_SUCCESS) {
 		netif_err(pf, drv, netdev, "link query timed out, please retry test\n");
 		*data = 1;
 		return *data;
@@ -1801,7 +2891,7 @@ static int i40e_link_test(struct net_device *netdev, u64 *data)
 	return *data;
 }
 
-static int i40e_reg_test(struct net_device *netdev, u64 *data)
+static u64 i40e_reg_test(struct net_device *netdev, u64 *data)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_pf *pf = np->vsi->back;
@@ -1812,7 +2902,7 @@ static int i40e_reg_test(struct net_device *netdev, u64 *data)
 	return *data;
 }
 
-static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
+static u64 i40e_eeprom_test(struct net_device *netdev, u64 *data)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_pf *pf = np->vsi->back;
@@ -1826,7 +2916,7 @@ static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
 	return *data;
 }
 
-static int i40e_intr_test(struct net_device *netdev, u64 *data)
+static u64 i40e_intr_test(struct net_device *netdev, u64 *data)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_pf *pf = np->vsi->back;
@@ -1845,6 +2935,13 @@ static int i40e_intr_test(struct net_device *netdev, u64 *data)
 	return *data;
 }
 
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+static int i40e_diag_test_count(struct net_device *netdev)
+{
+	return I40E_TEST_LEN;
+}
+
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
 static inline bool i40e_active_vfs(struct i40e_pf *pf)
 {
 	struct i40e_vf *vfs = pf->vf;
@@ -1993,11 +3090,12 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 	return 0;
 }
 
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
 static int i40e_set_phys_id(struct net_device *netdev,
 			    enum ethtool_phys_id_state state)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	i40e_status ret = 0;
+	i40e_status ret = I40E_SUCCESS;
 	struct i40e_pf *pf = np->vsi->back;
 	struct i40e_hw *hw = &pf->hw;
 	int blink_freq = 2;
@@ -2008,7 +3106,9 @@ static int i40e_set_phys_id(struct net_device *netdev,
 		if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
 			pf->led_status = i40e_led_get(hw);
 		} else {
-			i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL);
+			if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
+				i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL,
+						      NULL);
 			ret = i40e_led_get_phy(hw, &temp_status,
 					       &pf->phy_led_val);
 			pf->led_status = temp_status;
@@ -2033,7 +3133,8 @@ static int i40e_set_phys_id(struct net_device *netdev,
 			ret = i40e_led_set_phy(hw, false, pf->led_status,
 					       (pf->phy_led_val |
 					       I40E_PHY_LED_MODE_ORIG));
-			i40e_aq_set_phy_debug(hw, 0, NULL);
+			if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
+				i40e_aq_set_phy_debug(hw, 0, NULL);
 		}
 		break;
 	default:
@@ -2044,6 +3145,53 @@ static int i40e_set_phys_id(struct net_device *netdev,
 		else
 			return 0;
 }
+#else /* HAVE_ETHTOOL_SET_PHYS_ID */
+static int i40e_phys_id(struct net_device *netdev, u32 data)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	i40e_status ret = I40E_SUCCESS;
+	u16 temp_status;
+	int i;
+
+	if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
+		pf->led_status = i40e_led_get(hw);
+	} else {
+		ret = i40e_led_get_phy(hw, &temp_status,
+				       &pf->phy_led_val);
+		pf->led_status = temp_status;
+	}
+
+	if (!data || data > 300)
+		data = 300;
+
+	/* 10GBaseT PHY controls led's through PHY, not MAC */
+	for (i = 0; i < (data * 1000); i += 400) {
+		if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS))
+			i40e_led_set(hw, 0xF, false);
+		else
+			ret = i40e_led_set_phy(hw, true, pf->led_status, 0);
+		msleep_interruptible(200);
+		if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS))
+			i40e_led_set(hw, 0x0, false);
+		else
+			ret = i40e_led_set_phy(hw, false, pf->led_status, 0);
+		msleep_interruptible(200);
+	}
+	if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS))
+		i40e_led_set(hw, pf->led_status, false);
+	else
+		ret = i40e_led_set_phy(hw, false, pf->led_status,
+				       (pf->led_status |
+				       I40E_PHY_LED_MODE_ORIG));
+
+	if (ret)
+		return -ENOENT;
+	else
+		return 0;
+}
+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
 
 /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
  * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
@@ -2071,27 +3219,25 @@ static int __i40e_get_coalesce(struct net_device *netdev,
 	ec->tx_max_coalesced_frames_irq = vsi->work_limit;
 	ec->rx_max_coalesced_frames_irq = vsi->work_limit;
 
-	/* rx and tx usecs has per queue value. If user doesn't specify the queue,
-	 * return queue 0's value to represent.
+	/* rx and tx usecs has per queue value. If user doesn't specify the
+	 * queue, return queue 0's value to represent.
 	 */
-	if (queue < 0) {
+	if (queue < 0)
 		queue = 0;
-	} else if (queue >= vsi->num_queue_pairs) {
+	else if (queue >= vsi->num_queue_pairs)
 		return -EINVAL;
-	}
 
 	rx_ring = vsi->rx_rings[queue];
 	tx_ring = vsi->tx_rings[queue];
 
-	if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
+	if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
 		ec->use_adaptive_rx_coalesce = 1;
 
-	if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
+	if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
 		ec->use_adaptive_tx_coalesce = 1;
 
-	ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
-	ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
-
+	ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
+	ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
 
 	/* we use the _usecs_high to store/set the interrupt rate limit
 	 * that the hardware supports, that almost but not quite
@@ -2120,6 +3266,7 @@ static int i40e_get_coalesce(struct net_device *netdev,
 	return __i40e_get_coalesce(netdev, ec, -1);
 }
 
+#ifdef ETHTOOL_PERQUEUE
 /**
  * i40e_get_per_queue_coalesce - gets coalesce settings for particular queue
  * @netdev: netdev structure
@@ -2134,6 +3281,7 @@ static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
 	return __i40e_get_coalesce(netdev, ec, queue);
 }
 
+#endif /* ETHTOOL_PERQUEUE */
 /**
  * i40e_set_itr_per_queue - set ITR values for specific queue
  * @vsi: the VSI to set values for
@@ -2142,45 +3290,93 @@ static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
  *
  * Change the ITR settings for a specific queue.
  **/
-
 static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
 				   struct ethtool_coalesce *ec,
 				   int queue)
 {
+	struct i40e_ring *rx_ring = vsi->rx_rings[queue];
+	struct i40e_ring *tx_ring = vsi->tx_rings[queue];
 	struct i40e_pf *pf = vsi->back;
 	struct i40e_hw *hw = &pf->hw;
 	struct i40e_q_vector *q_vector;
-	u16 vector, intrl;
+	u16 intrl;
 
 	intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);
 
-	vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs;
-	vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs;
+	rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
+	tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
 
 	if (ec->use_adaptive_rx_coalesce)
-		vsi->rx_rings[queue]->rx_itr_setting |= I40E_ITR_DYNAMIC;
+		rx_ring->itr_setting |= I40E_ITR_DYNAMIC;
 	else
-		vsi->rx_rings[queue]->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+		rx_ring->itr_setting &= ~I40E_ITR_DYNAMIC;
 
 	if (ec->use_adaptive_tx_coalesce)
-		vsi->tx_rings[queue]->tx_itr_setting |= I40E_ITR_DYNAMIC;
+		tx_ring->itr_setting |= I40E_ITR_DYNAMIC;
 	else
-		vsi->tx_rings[queue]->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+		tx_ring->itr_setting &= ~I40E_ITR_DYNAMIC;
+
+	q_vector = rx_ring->q_vector;
+	q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
 
-	q_vector = vsi->rx_rings[queue]->q_vector;
-	q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[queue]->rx_itr_setting);
-	vector = vsi->base_vector + q_vector->v_idx;
-	wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
+	q_vector = tx_ring->q_vector;
+	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
 
-	q_vector = vsi->tx_rings[queue]->q_vector;
-	q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[queue]->tx_itr_setting);
-	vector = vsi->base_vector + q_vector->v_idx;
-	wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
+	/* The interrupt handler itself will take care of programming
+	 * the Tx and Rx ITR values based on the values we have entered
+	 * into the q_vector, no need to write the values now.
+	 */
 
-	wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl);
+	wr32(hw, I40E_PFINT_RATEN(q_vector->reg_idx), intrl);
 	i40e_flush(hw);
 }
 
+/**
+ * i40e_is_coalesce_param_invalid - check for unsupported coalesce parameters
+ * @netdev: pointer to the netdev associated with this query
+ * @ec: ethtool structure to fill with driver's coalesce settings
+ *
+ * Print netdev info if driver doesn't support one of the parameters
+ * and return error. When any parameters will be implemented, remove only
+ * this parameter from param array.
+ */
+static
+int i40e_is_coalesce_param_invalid(struct net_device *netdev,
+				   struct ethtool_coalesce *ec)
+{
+	struct i40e_ethtool_not_used {
+		u32 value;
+		const char *name;
+	} param[] = {
+		{ec->stats_block_coalesce_usecs, "stats-block-usecs"},
+		{ec->rate_sample_interval, "sample-interval"},
+		{ec->pkt_rate_low, "pkt-rate-low"},
+		{ec->pkt_rate_high, "pkt-rate-high"},
+		{ec->rx_max_coalesced_frames, "rx-frames"},
+		{ec->rx_coalesce_usecs_irq, "rx-usecs-irq"},
+		{ec->tx_max_coalesced_frames, "tx-frames"},
+		{ec->tx_coalesce_usecs_irq, "tx-usecs-irq"},
+		{ec->rx_coalesce_usecs_low, "rx-usecs-low"},
+		{ec->rx_max_coalesced_frames_low, "rx-frames-low"},
+		{ec->tx_coalesce_usecs_low, "tx-usecs-low"},
+		{ec->tx_max_coalesced_frames_low, "tx-frames-low"},
+		{ec->rx_max_coalesced_frames_high, "rx-frames-high"},
+		{ec->tx_max_coalesced_frames_high, "tx-frames-high"}
+	};
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(param); i++) {
+		if (param[i].value) {
+			netdev_info(netdev,
+				    "Setting %s not supported\n",
+				    param[i].name);
+			return -EOPNOTSUPP;
+		}
+	}
+
+	return 0;
+}
+
 /**
  * __i40e_set_coalesce - set coalesce settings for particular queue
  * @netdev: the netdev to change
@@ -2199,15 +3395,18 @@ static int __i40e_set_coalesce(struct net_device *netdev,
 	struct i40e_pf *pf = vsi->back;
 	int i;
 
+	if (i40e_is_coalesce_param_invalid(netdev, ec))
+		return -EOPNOTSUPP;
+
 	if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
 		vsi->work_limit = ec->tx_max_coalesced_frames_irq;
 
 	if (queue < 0) {
-		cur_rx_itr = vsi->rx_rings[0]->rx_itr_setting;
-		cur_tx_itr = vsi->tx_rings[0]->tx_itr_setting;
+		cur_rx_itr = vsi->rx_rings[0]->itr_setting;
+		cur_tx_itr = vsi->tx_rings[0]->itr_setting;
 	} else if (queue < vsi->num_queue_pairs) {
-		cur_rx_itr = vsi->rx_rings[queue]->rx_itr_setting;
-		cur_tx_itr = vsi->tx_rings[queue]->tx_itr_setting;
+		cur_rx_itr = vsi->rx_rings[queue]->itr_setting;
+		cur_tx_itr = vsi->tx_rings[queue]->itr_setting;
 	} else {
 		netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
 			   vsi->num_queue_pairs - 1);
@@ -2235,7 +3434,7 @@ static int __i40e_set_coalesce(struct net_device *netdev,
 		return -EINVAL;
 	}
 
-	if (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)) {
+	if (ec->rx_coalesce_usecs > I40E_MAX_ITR) {
 		netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
 		return -EINVAL;
 	}
@@ -2246,16 +3445,16 @@ static int __i40e_set_coalesce(struct net_device *netdev,
 		return -EINVAL;
 	}
 
-	if (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)) {
+	if (ec->tx_coalesce_usecs > I40E_MAX_ITR) {
 		netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
 		return -EINVAL;
 	}
 
 	if (ec->use_adaptive_rx_coalesce && !cur_rx_itr)
-		ec->rx_coalesce_usecs = I40E_MIN_ITR << 1;
+		ec->rx_coalesce_usecs = I40E_MIN_ITR;
 
 	if (ec->use_adaptive_tx_coalesce && !cur_tx_itr)
-		ec->tx_coalesce_usecs = I40E_MIN_ITR << 1;
+		ec->tx_coalesce_usecs = I40E_MIN_ITR;
 
 	intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high);
 	vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg);
@@ -2264,8 +3463,8 @@ static int __i40e_set_coalesce(struct net_device *netdev,
 			   vsi->int_rate_limit);
 	}
 
-	/* rx and tx usecs has per queue value. If user doesn't specify the queue,
-	 * apply to all queues.
+	/* rx and tx usecs has per queue value. If user doesn't specify the
+	 * queue, apply to all queues.
 	 */
 	if (queue < 0) {
 		for (i = 0; i < vsi->num_queue_pairs; i++)
@@ -2290,6 +3489,20 @@ static int i40e_set_coalesce(struct net_device *netdev,
 	return __i40e_set_coalesce(netdev, ec, -1);
 }
 
+#ifdef ETHTOOL_SRXNTUPLE
+/* We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid
+ * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag
+ * was defined that this function was present.
+ */
+static int i40e_set_rx_ntuple(struct net_device *dev,
+			      struct ethtool_rx_ntuple *cmd)
+{
+	return -EOPNOTSUPP;
+}
+
+#endif /* ETHTOOL_SRXNTUPLE */
+
+#ifdef ETHTOOL_PERQUEUE
 /**
  * i40e_set_per_queue_coalesce - set specific queue's coalesce settings
  * @netdev: the netdev to change
@@ -2303,7 +3516,9 @@ static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
 {
 	return __i40e_set_coalesce(netdev, ec, queue);
 }
+#endif /* ETHTOOL_PERQUEUE */
 
+#ifdef ETHTOOL_GRXRINGS
 /**
  * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
  * @pf: pointer to the physical function struct
@@ -2385,7 +3600,7 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
 /**
  * i40e_check_mask - Check whether a mask field is set
  * @mask: the full mask value
- * @field; mask of the field to check
+ * @field: mask of the field to check
  *
  * If the given mask is fully set, return positive value. If the mask for the
  * field is fully unset, return zero. Otherwise return a negative error code.
@@ -2436,18 +3651,69 @@ static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
 	value = be64_to_cpu(*((__be64 *)fsp->h_ext.data));
 	mask = be64_to_cpu(*((__be64 *)fsp->m_ext.data));
 
-#define I40E_USERDEF_FLEX_WORD		GENMASK_ULL(15, 0)
-#define I40E_USERDEF_FLEX_OFFSET	GENMASK_ULL(31, 16)
+#define I40E_USERDEF_CLOUD_FILTER	BIT_ULL(63)
+
+#define I40E_USERDEF_CLOUD_RESERVED	GENMASK_ULL(62, 32)
+#define I40E_USERDEF_TUNNEL_TYPE	GENMASK_ULL(31, 24)
+#define I40E_USERDEF_TENANT_ID		GENMASK_ULL(23, 0)
+
+#define I40E_USERDEF_RESERVED		GENMASK_ULL(62, 32)
 #define I40E_USERDEF_FLEX_FILTER	GENMASK_ULL(31, 0)
 
-	valid = i40e_check_mask(mask, I40E_USERDEF_FLEX_FILTER);
-	if (valid < 0) {
-		return -EINVAL;
-	} else if (valid) {
-		data->flex_word = value & I40E_USERDEF_FLEX_WORD;
-		data->flex_offset =
-			(value & I40E_USERDEF_FLEX_OFFSET) >> 16;
-		data->flex_filter = true;
+#define I40E_USERDEF_FLEX_OFFSET	GENMASK_ULL(31, 16)
+#define I40E_USERDEF_FLEX_WORD		GENMASK_ULL(15, 0)
+
+	if ((mask & I40E_USERDEF_CLOUD_FILTER) &&
+	    (value & I40E_USERDEF_CLOUD_FILTER))
+		data->cloud_filter = true;
+
+	if (data->cloud_filter) {
+		/* Make sure that the reserved bits are not set */
+		valid = i40e_check_mask(mask, I40E_USERDEF_CLOUD_RESERVED);
+		if (valid < 0) {
+			return -EINVAL;
+		} else if (valid) {
+			if ((value & I40E_USERDEF_CLOUD_RESERVED) != 0)
+				return -EINVAL;
+		}
+
+		/* These fields are only valid if this is a cloud filter */
+		valid = i40e_check_mask(mask, I40E_USERDEF_TENANT_ID);
+		if (valid < 0) {
+			return -EINVAL;
+		} else if (valid) {
+			data->tenant_id = value & I40E_USERDEF_TENANT_ID;
+			data->tenant_id_valid = true;
+		}
+
+		valid = i40e_check_mask(mask, I40E_USERDEF_TUNNEL_TYPE);
+		if (valid < 0) {
+			return -EINVAL;
+		} else if (valid) {
+			data->tunnel_type =
+				(value & I40E_USERDEF_TUNNEL_TYPE) >> 24;
+			data->tunnel_type_valid = true;
+		}
+	} else {
+		/* Make sure that the reserved bits are not set */
+		valid = i40e_check_mask(mask, I40E_USERDEF_RESERVED);
+		if (valid < 0) {
+			return -EINVAL;
+		} else if (valid) {
+			if ((value & I40E_USERDEF_RESERVED) != 0)
+				return -EINVAL;
+		}
+
+		/* These fields are only valid if this isn't a cloud filter */
+		valid = i40e_check_mask(mask, I40E_USERDEF_FLEX_FILTER);
+		if (valid < 0) {
+			return -EINVAL;
+		} else if (valid) {
+			data->flex_word = value & I40E_USERDEF_FLEX_WORD;
+			data->flex_offset =
+				(value & I40E_USERDEF_FLEX_OFFSET) >> 16;
+			data->flex_filter = true;
+		}
 	}
 
 	return 0;
@@ -2456,6 +3722,7 @@ static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
 /**
  * i40e_fill_rx_flow_user_data - Fill in user-defined data field
  * @fsp: pointer to rx_flow specification
+ * @data: pointer to return userdef data
  *
  * Reads the userdef data structure and properly fills in the user defined
  * fields of the rx_flow_spec.
@@ -2465,10 +3732,25 @@ static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
 {
 	u64 value = 0, mask = 0;
 
-	if (data->flex_filter) {
-		value |= data->flex_word;
-		value |= (u64)data->flex_offset << 16;
-		mask |= I40E_USERDEF_FLEX_FILTER;
+	if (data->cloud_filter) {
+		value |= I40E_USERDEF_CLOUD_FILTER;
+		mask |= I40E_USERDEF_CLOUD_FILTER;
+
+		if (data->tenant_id_valid) {
+			value |= data->tenant_id;
+			mask |= I40E_USERDEF_TENANT_ID;
+		}
+
+		if (data->tunnel_type_valid) {
+			value |= (u64)data->tunnel_type << 24;
+			mask |= I40E_USERDEF_TUNNEL_TYPE;
+		}
+	} else {
+		if (data->flex_filter) {
+			value |= data->flex_word;
+			value |= (u64)data->flex_offset << 16;
+			mask |= I40E_USERDEF_FLEX_FILTER;
+		}
 	}
 
 	if (value || mask)
@@ -2479,7 +3761,7 @@ static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
 }
 
 /**
- * i40e_get_ethtool_fdir_all - Populates the rule count of a command
+ * i40e_get_rx_filter_ids - Populates the rule count of a command
  * @pf: Pointer to the physical function struct
  * @cmd: The command to get or set Rx flow classification rules
  * @rule_locs: Array of used rule locations
@@ -2489,23 +3771,34 @@ static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
  *
  * Returns 0 on success or -EMSGSIZE if entry not found
  **/
-static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
-				     struct ethtool_rxnfc *cmd,
-				     u32 *rule_locs)
+static int i40e_get_rx_filter_ids(struct i40e_pf *pf,
+				  struct ethtool_rxnfc *cmd,
+				  u32 *rule_locs)
 {
-	struct i40e_fdir_filter *rule;
+	struct i40e_fdir_filter *f_rule;
+	struct i40e_cloud_filter *c_rule;
 	struct hlist_node *node2;
-	int cnt = 0;
+	unsigned int cnt = 0;
 
 	/* report total rule count */
 	cmd->data = i40e_get_fd_cnt_all(pf);
 
-	hlist_for_each_entry_safe(rule, node2,
+	hlist_for_each_entry_safe(f_rule, node2,
 				  &pf->fdir_filter_list, fdir_node) {
 		if (cnt == cmd->rule_cnt)
 			return -EMSGSIZE;
 
-		rule_locs[cnt] = rule->fd_id;
+		rule_locs[cnt] = f_rule->fd_id;
+		cnt++;
+	}
+
+	/* find the cloud filter rule ids */
+	hlist_for_each_entry_safe(c_rule, node2,
+				  &pf->cloud_filter_list, cloud_node) {
+		if (cnt == cmd->rule_cnt)
+			return -EMSGSIZE;
+
+		rule_locs[cnt] = c_rule->id;
 		cnt++;
 	}
 
@@ -2588,16 +3881,16 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
 
 no_input_set:
 	if (input_set & I40E_L3_SRC_MASK)
-		fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFF);
+		fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF);
 
 	if (input_set & I40E_L3_DST_MASK)
-		fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFF);
+		fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFFFFFF);
 
 	if (input_set & I40E_L4_SRC_MASK)
-		fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFFFFFF);
+		fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFF);
 
 	if (input_set & I40E_L4_DST_MASK)
-		fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFFFFFF);
+		fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFF);
 
 	if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
 		fsp->ring_cookie = RX_CLS_FLOW_DISC;
@@ -2630,15 +3923,114 @@ no_input_set:
 	return 0;
 }
 
+#define VXLAN_PORT	8472
+
+/**
+ * i40e_get_cloud_filter_entry - get a cloud filter by loc
+ * @pf: pointer to the physical function struct
+ * @cmd: The command to get or set Rx flow classification rules
+ *
+ * get cloud filter by loc.
+ * Returns 0 if success.
+ **/
+static int i40e_get_cloud_filter_entry(struct i40e_pf *pf,
+				       struct ethtool_rxnfc *cmd)
+{
+	struct ethtool_rx_flow_spec *fsp =
+			(struct ethtool_rx_flow_spec *)&cmd->fs;
+	static const u8 mac_broadcast[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+	struct i40e_cloud_filter *rule, *filter = NULL;
+	struct i40e_rx_flow_userdef userdef = {0};
+	struct hlist_node *node2;
+
+	hlist_for_each_entry_safe(rule, node2,
+				  &pf->cloud_filter_list, cloud_node) {
+		/* filter found */
+		if (rule->id == fsp->location)
+			filter = rule;
+
+		/* bail out if we've passed the likely location in the list */
+		if (rule->id >= fsp->location)
+			break;
+
+	}
+	if (!filter) {
+		dev_info(&pf->pdev->dev, "No cloud filter with loc %d\n",
+			fsp->location);
+		return -ENOENT;
+	}
+
+	userdef.cloud_filter = true;
+
+	fsp->ring_cookie = filter->queue_id;
+	if (filter->seid != pf->vsi[pf->lan_vsi]->seid) {
+		struct i40e_vsi *vsi;
+
+		vsi = i40e_find_vsi_from_seid(pf, filter->seid);
+		if (vsi && vsi->type == I40E_VSI_SRIOV) {
+			/* VFs are zero-indexed by the driver, but ethtool
+			 * expects them to be one-indexed, so add one here
+			 */
+			u64 ring_vf = vsi->vf_id + 1;
+
+			ring_vf <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+			fsp->ring_cookie |= ring_vf;
+		}
+	}
+
+	ether_addr_copy(fsp->h_u.ether_spec.h_dest, filter->outer_mac);
+	ether_addr_copy(fsp->h_u.ether_spec.h_source, filter->inner_mac);
+
+	if (filter->flags & I40E_CLOUD_FIELD_OMAC)
+		ether_addr_copy(fsp->m_u.ether_spec.h_dest, mac_broadcast);
+	if (filter->flags & I40E_CLOUD_FIELD_IMAC)
+		ether_addr_copy(fsp->m_u.ether_spec.h_source, mac_broadcast);
+	if (filter->flags & I40E_CLOUD_FIELD_IVLAN)
+		fsp->h_ext.vlan_tci = filter->inner_vlan;
+	if (filter->flags & I40E_CLOUD_FIELD_TEN_ID) {
+		userdef.tenant_id_valid = true;
+		userdef.tenant_id = filter->tenant_id;
+	}
+	if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE) {
+		userdef.tunnel_type_valid = true;
+		userdef.tunnel_type = filter->tunnel_type;
+	}
+
+	if (filter->flags & I40E_CLOUD_FIELD_IIP) {
+		if (i40e_is_l4mode_enabled())  {
+			fsp->flow_type = UDP_V4_FLOW;
+			fsp->h_u.udp_ip4_spec.pdst = filter->dst_port;
+		} else {
+			fsp->flow_type = IP_USER_FLOW;
+		}
+
+		fsp->h_u.usr_ip4_spec.ip4dst = filter->inner_ip[0];
+		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+	} else {
+		fsp->flow_type = ETHER_FLOW;
+	}
+
+	i40e_fill_rx_flow_user_data(fsp, &userdef);
+
+	fsp->flow_type |= FLOW_EXT;
+
+	return 0;
+}
+
 /**
  * i40e_get_rxnfc - command to get RX flow classification rules
  * @netdev: network interface device structure
  * @cmd: ethtool rxnfc command
+ * @rule_locs: pointer to store rule data
  *
  * Returns Success if the command is supported.
  **/
 static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+			  void *rule_locs)
+#else
 			  u32 *rule_locs)
+#endif
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_vsi *vsi = np->vsi;
@@ -2647,7 +4039,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
 
 	switch (cmd->cmd) {
 	case ETHTOOL_GRXRINGS:
-		cmd->data = vsi->num_queue_pairs;
+		cmd->data = vsi->rss_size;
 		ret = 0;
 		break;
 	case ETHTOOL_GRXFH:
@@ -2655,15 +4047,23 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
 		break;
 	case ETHTOOL_GRXCLSRLCNT:
 		cmd->rule_cnt = pf->fdir_pf_active_filters;
+		cmd->rule_cnt += pf->num_cloud_filters;
 		/* report total rule count */
 		cmd->data = i40e_get_fd_cnt_all(pf);
 		ret = 0;
 		break;
 	case ETHTOOL_GRXCLSRULE:
 		ret = i40e_get_ethtool_fdir_entry(pf, cmd);
+		/* if no such fdir filter then try the cloud list */
+		if (ret)
+			ret = i40e_get_cloud_filter_entry(pf, cmd);
 		break;
 	case ETHTOOL_GRXCLSRLALL:
-		ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
+#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+		ret = i40e_get_rx_filter_ids(pf, cmd, (u32 *)rule_locs);
+#else
+		ret = i40e_get_rx_filter_ids(pf, cmd, rule_locs);
+#endif
 		break;
 	default:
 		break;
@@ -2675,7 +4075,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
 /**
  * i40e_get_rss_hash_bits - Read RSS Hash bits from register
  * @nfc: pointer to user request
- * @i_setc bits currently set
+ * @i_setc: bits currently set
  *
  * Returns value of bits to be set per user request
  **/
@@ -2720,7 +4120,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
 /**
  * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
  * @pf: pointer to the physical function struct
- * @cmd: ethtool rxnfc command
+ * @nfc: ethtool rxnfc command
  *
  * Returns Success if the flow input set is supported.
  **/
@@ -2829,12 +4229,325 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
 	return 0;
 }
 
+/**
+ * i40e_cloud_filter_mask2flags- Convert cloud filter details to filter type
+ * @pf: pointer to the physical function struct
+ * @fsp: RX flow classification rules
+ * @userdef: pointer to userdef field data
+ * @flags: Resultant combination of all the fields to decide the tuple
+ *
+ * The general trick in setting these flags is that if the mask field for
+ * a value is non-zero, then the field itself was set to something, so we
+ * use this to tell us what has been selected.
+ *
+ * Returns 0 if a valid filter type was identified.
+ **/
+static int i40e_cloud_filter_mask2flags(struct i40e_pf *pf,
+					struct ethtool_rx_flow_spec *fsp,
+					struct i40e_rx_flow_userdef *userdef,
+					u8 *flags)
+{
+	u8 i = 0;
+
+	*flags = 0;
+
+	switch (fsp->flow_type & ~FLOW_EXT) {
+	case ETHER_FLOW:
+		/* use is_broadcast and is_zero to check for all 0xf or 0 */
+		if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) {
+			i |= I40E_CLOUD_FIELD_OMAC;
+		} else if (is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) {
+			i &= ~I40E_CLOUD_FIELD_OMAC;
+		} else {
+			dev_info(&pf->pdev->dev, "Bad ether dest mask %pM\n",
+				 fsp->m_u.ether_spec.h_dest);
+			return I40E_ERR_CONFIG;
+		}
+
+		if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) {
+			i |= I40E_CLOUD_FIELD_IMAC;
+		} else if (is_zero_ether_addr(fsp->m_u.ether_spec.h_source)) {
+			i &= ~I40E_CLOUD_FIELD_IMAC;
+		} else {
+			dev_info(&pf->pdev->dev, "Bad ether source mask %pM\n",
+				 fsp->m_u.ether_spec.h_source);
+			return I40E_ERR_CONFIG;
+		}
+		break;
+
+	case IP_USER_FLOW:
+		if (pf->hw.mac.type == I40E_MAC_X722) {
+			dev_info(&pf->pdev->dev, "Failed to set filter. Destination IP filters are not supported for this device.\n");
+			return I40E_ERR_CONFIG;
+		}
+		if (fsp->m_u.usr_ip4_spec.ip4dst == cpu_to_be32(0xffffffff)) {
+			i |= I40E_CLOUD_FIELD_IIP;
+		} else if (!fsp->m_u.usr_ip4_spec.ip4dst) {
+			i &= ~I40E_CLOUD_FIELD_IIP;
+		} else {
+			dev_info(&pf->pdev->dev, "Bad ip dst mask 0x%08x\n",
+				 be32_to_cpu(fsp->m_u.usr_ip4_spec.ip4dst));
+			return I40E_ERR_CONFIG;
+		}
+		break;
+
+	case UDP_V4_FLOW:
+		if (fsp->m_u.udp_ip4_spec.pdst == cpu_to_be16(0xffff)) {
+			i |= I40E_CLOUD_FIELD_IIP;
+		} else {
+			dev_info(&pf->pdev->dev, "Bad UDP dst mask 0x%04x\n",
+				 be32_to_cpu(fsp->m_u.udp_ip4_spec.pdst));
+			return I40E_ERR_CONFIG;
+		}
+		break;
+
+	default:
+		return I40E_ERR_CONFIG;
+	}
+
+	switch (be16_to_cpu(fsp->m_ext.vlan_tci)) {
+	case 0xffff:
+		if (fsp->h_ext.vlan_tci & cpu_to_be16(~0x7fff)) {
+			dev_info(&pf->pdev->dev, "Bad vlan %u\n",
+				 be16_to_cpu(fsp->h_ext.vlan_tci));
+			return I40E_ERR_CONFIG;
+		}
+		i |= I40E_CLOUD_FIELD_IVLAN;
+		break;
+	case 0:
+		i &= ~I40E_CLOUD_FIELD_IVLAN;
+		break;
+	default:
+		dev_info(&pf->pdev->dev, "Bad vlan mask %u\n",
+			 be16_to_cpu(fsp->m_ext.vlan_tci));
+		return I40E_ERR_CONFIG;
+	}
+
+	/* We already know that we're a cloud filter, so we don't need to
+	 * re-check that.
+	 */
+	if (userdef->tenant_id_valid) {
+		if (userdef->tenant_id == 0)
+			i &= ~I40E_CLOUD_FIELD_TEN_ID;
+		else
+			i |= I40E_CLOUD_FIELD_TEN_ID;
+	}
+
+	/* Make sure the flags produce a valid type */
+	if (i40e_get_cloud_filter_type(i, NULL)) {
+		dev_info(&pf->pdev->dev, "Invalid mask config, flags = %d\n",
+			 i);
+		return I40E_ERR_CONFIG;
+	}
+
+	*flags = i;
+	return I40E_SUCCESS;
+}
+
+/* i40e_add_cloud_filter_ethtool needs i40e_del_fdir_ethtool() */
+static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
+			       struct ethtool_rxnfc *cmd);
+
+/**
+ * i40e_add_cloud_filter_ethtool - Add cloud filter
+ * @vsi: pointer to the VSI structure
+ * @cmd: The command to get or set Rx flow classification rules
+ * @userdef: pointer to userdef field data
+ *
+ * Add cloud filter for a specific flow spec.
+ * Returns 0 if the filter were successfully added.
+ **/
+static int i40e_add_cloud_filter_ethtool(struct i40e_vsi *vsi,
+					 struct ethtool_rxnfc *cmd,
+					 struct i40e_rx_flow_userdef *userdef)
+{
+	struct i40e_cloud_filter *rule, *parent, *filter = NULL;
+	struct ethtool_rx_flow_spec *fsp;
+	u16 dest_seid = 0, q_index = 0;
+	struct i40e_pf *pf = vsi->back;
+	struct hlist_node *node2;
+	u32 ring, vf;
+	u8 flags = 0;
+	int ret;
+
+	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+		return -EOPNOTSUPP;
+
+	if (pf->flags & I40E_FLAG_MFP_ENABLED)
+		return -EOPNOTSUPP;
+
+	if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+	    test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
+		return -EBUSY;
+
+	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+	/* The ring_cookie is a mask of queue index and VF id we wish to
+	 * target. This is the same for regular flow director filters.
+	 */
+	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+		dev_warn(&pf->pdev->dev, "Cloud filters do not support the drop action.\n");
+		return -EOPNOTSUPP;
+	}
+
+	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+	vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+
+	if (!vf) {
+		if (ring >= vsi->num_queue_pairs)
+			return -EINVAL;
+		dest_seid = vsi->seid;
+	} else {
+		/* VFs are zero-indexed, so we subtract one here */
+		vf--;
+
+		if (vf >= pf->num_alloc_vfs)
+			return -EINVAL;
+		if (!i40e_is_l4mode_enabled() &&
+		    ring >= pf->vf[vf].num_queue_pairs)
+			return -EINVAL;
+		dest_seid = pf->vsi[pf->vf[vf].lan_vsi_idx]->seid;
+	}
+	q_index = ring;
+
+	ret = i40e_cloud_filter_mask2flags(pf, fsp, userdef, &flags);
+	if (ret)
+		return -EINVAL;
+
+	/* if filter exists with same id, delete the old one */
+	parent = NULL;
+	hlist_for_each_entry_safe(rule, node2,
+				  &pf->cloud_filter_list, cloud_node) {
+		/* filter exists with the id */
+		if (rule->id == fsp->location)
+			filter = rule;
+
+		/* bail out if we've passed the likely location in the list */
+		if (rule->id >= fsp->location)
+			break;
+
+		/* track where we left off */
+		parent = rule;
+	}
+	if (filter && (filter->id == fsp->location)) {
+		/* found it in the cloud list, so remove it */
+		ret = i40e_add_del_cloud_filter_ex(pf, filter, false);
+		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
+			return ret;
+		hlist_del(&filter->cloud_node);
+		kfree(filter);
+		pf->num_cloud_filters--;
+	} else {
+		/* not in the cloud list, so check the PF's fdir list */
+		(void)i40e_del_fdir_entry(pf->vsi[pf->lan_vsi], cmd);
+	}
+
+	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+	if (!filter)
+		return -ENOMEM;
+
+	switch (fsp->flow_type & ~FLOW_EXT) {
+	case ETHER_FLOW:
+		ether_addr_copy(filter->outer_mac,
+				fsp->h_u.ether_spec.h_dest);
+		ether_addr_copy(filter->inner_mac,
+				fsp->h_u.ether_spec.h_source);
+		break;
+
+	case IP_USER_FLOW:
+		if (flags & I40E_CLOUD_FIELD_TEN_ID) {
+			dev_info(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
+			kfree(filter);
+			return I40E_ERR_CONFIG;
+		}
+		filter->inner_ip[0] = fsp->h_u.usr_ip4_spec.ip4dst;
+		break;
+
+	case UDP_V4_FLOW:
+		filter->dst_port = fsp->h_u.udp_ip4_spec.pdst;
+		break;
+
+	default:
+		dev_info(&pf->pdev->dev, "unknown flow type 0x%x\n",
+			 (fsp->flow_type & ~FLOW_EXT));
+		kfree(filter);
+		return I40E_ERR_CONFIG;
+	}
+
+	if (userdef->tenant_id_valid)
+		filter->tenant_id = userdef->tenant_id;
+	else
+		filter->tenant_id = 0;
+	if (userdef->tunnel_type_valid)
+		filter->tunnel_type = userdef->tunnel_type;
+	else
+		filter->tunnel_type = I40E_CLOUD_TNL_TYPE_NONE;
+
+	filter->id = fsp->location;
+	filter->seid = dest_seid;
+	filter->queue_id = q_index;
+	filter->flags = flags;
+	filter->inner_vlan = fsp->h_ext.vlan_tci;
+
+	ret = i40e_add_del_cloud_filter_ex(pf, filter, true);
+	if (ret) {
+		kfree(filter);
+		return ret;
+	}
+
+	/* add filter to the ordered list */
+	INIT_HLIST_NODE(&filter->cloud_node);
+	if (parent)
+		hlist_add_behind(&filter->cloud_node, &parent->cloud_node);
+	else
+		hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
+	pf->num_cloud_filters++;
+
+	return 0;
+}
+
+/**
+ * i40e_del_cloud_filter_ethtool - del vxlan filter
+ * @pf: pointer to the physical function struct
+ * @cmd: RX flow classification rules
+ *
+ * Delete vxlan filter for a specific flow spec.
+ * Returns 0 if the filter was successfully deleted.
+ **/
+static int i40e_del_cloud_filter_ethtool(struct i40e_pf *pf,
+					 struct ethtool_rxnfc *cmd)
+{
+	struct i40e_cloud_filter *rule, *filter = NULL;
+	struct ethtool_rx_flow_spec *fsp;
+	struct hlist_node *node2;
+
+	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+	hlist_for_each_entry_safe(rule, node2,
+				  &pf->cloud_filter_list, cloud_node) {
+		/* filter found */
+		if (rule->id == fsp->location)
+			filter = rule;
+
+		/* bail out if we've passed the likely location in the list */
+		if (rule->id >= fsp->location)
+			break;
+	}
+	if (!filter)
+		return -ENOENT;
+
+	/* remove filter from the list even if failed to remove from device */
+	(void)i40e_add_del_cloud_filter_ex(pf, filter, false);
+	hlist_del(&filter->cloud_node);
+	kfree(filter);
+	pf->num_cloud_filters--;
+
+	return 0;
+}
 /**
  * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
  * @vsi: Pointer to the targeted VSI
  * @input: The filter to update or NULL to indicate deletion
  * @sw_idx: Software index to the filter
- * @cmd: The command to get or set Rx flow classification rules
  *
  * This function updates (or deletes) a Flow Director entry from
  * the hlist of the corresponding PF
@@ -2843,38 +4556,38 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
  **/
 static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
 					  struct i40e_fdir_filter *input,
-					  u16 sw_idx,
-					  struct ethtool_rxnfc *cmd)
+					  u16 sw_idx)
 {
 	struct i40e_fdir_filter *rule, *parent;
 	struct i40e_pf *pf = vsi->back;
 	struct hlist_node *node2;
-	int err = -EINVAL;
+	int err = -ENOENT;
 
 	parent = NULL;
 	rule = NULL;
 
 	hlist_for_each_entry_safe(rule, node2,
 				  &pf->fdir_filter_list, fdir_node) {
-		/* hash found, or no matching entry */
+		/* rule id found, or passed its spot in the list */
 		if (rule->fd_id >= sw_idx)
 			break;
 		parent = rule;
 	}
 
-	/* if there is an old rule occupying our place remove it */
+	/* is there is an old rule occupying our target filter slot? */
 	if (rule && (rule->fd_id == sw_idx)) {
 		/* Remove this rule, since we're either deleting it, or
 		 * replacing it.
 		 */
 		err = i40e_add_del_fdir(vsi, rule, false);
 		hlist_del(&rule->fdir_node);
-		kfree(rule);
 		pf->fdir_pf_active_filters--;
+
+		kfree(rule);
 	}
 
 	/* If we weren't given an input, this is a delete, so just return the
-	 * error code indicating if there was an entry at the requested slot
+	 * error code indicating if there was an entry at the requested slot.
 	 */
 	if (!input)
 		return err;
@@ -2882,12 +4595,11 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
 	/* Otherwise, install the new rule as requested */
 	INIT_HLIST_NODE(&input->fdir_node);
 
-	/* add filter to the list */
+	/* add filter to the ordered list */
 	if (parent)
 		hlist_add_behind(&input->fdir_node, &parent->fdir_node);
 	else
-		hlist_add_head(&input->fdir_node,
-			       &pf->fdir_filter_list);
+		hlist_add_head(&input->fdir_node, &pf->fdir_filter_list);
 
 	/* update counts */
 	pf->fdir_pf_active_filters++;
@@ -2983,7 +4695,7 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
 		return -EBUSY;
 
-	ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
+	ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location);
 
 	i40e_prune_flex_pit_list(pf);
 
@@ -3119,7 +4831,7 @@ static int i40e_add_flex_offset(struct list_head *flex_pit_list,
  * __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table
  * @pf: Pointer to the PF structure
  * @flex_pit_list: list of flexible src offsets in use
- * #flex_pit_start: index to first entry for this section of the table
+ * @flex_pit_start: index to first entry for this section of the table
  *
  * In order to handle flexible data, the hardware uses a table of values
  * called the FLX_PIT table. This table is used to indicate which sections of
@@ -3233,7 +4945,7 @@ static void i40e_reprogram_flex_pit(struct i40e_pf *pf)
 
 /**
  * i40e_flow_str - Converts a flow_type into a human readable string
- * @flow_type: the flow type from a flow specification
+ * @fsp: the flow specification
  *
  * Currently only flow types we support are included here, and the string
  * value attempts to match what ethtool would use to configure this flow type.
@@ -3648,22 +5360,113 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
 
 	i40e_write_fd_input_set(pf, index, new_mask);
 
-	/* Add the new offset and update table, if necessary */
-	if (new_flex_offset) {
-		err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset,
-					   pit_index);
-		if (err)
-			return err;
+	/* IP_USER_FLOW filters match both IPv4/Other and IPv4/Fragmented
+	 * frames. If we're programming the input set for IPv4/Other, we also
+	 * need to program the IPv4/Fragmented input set. Since we don't have
+	 * separate support, we'll always assume and enforce that the two flow
+	 * types must have matching input sets.
+	 */
+	if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)
+		i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+					new_mask);
+
+	/* Add the new offset and update table, if necessary */
+	if (new_flex_offset) {
+		err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset,
+					   pit_index);
+		if (err)
+			return err;
+
+		if (flex_l3) {
+			err = i40e_add_flex_offset(&pf->l3_flex_pit_list,
+						   src_offset,
+						   pit_index);
+			if (err)
+				return err;
+		}
+
+		i40e_reprogram_flex_pit(pf);
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_match_fdir_filter - Return true of two filters match
+ * @a: pointer to filter struct
+ * @b: pointer to filter struct
+ *
+ * Returns true if the two filters match exactly the same criteria. I.e. they
+ * match the same flow type and have the same parameters. We don't need to
+ * check any input-set since all filters of the same flow type must use the
+ * same input set.
+ **/
+static bool i40e_match_fdir_filter(struct i40e_fdir_filter *a,
+				   struct i40e_fdir_filter *b)
+{
+	/* The filters do not match if any of these criteria differ. */
+	if (a->dst_ip != b->dst_ip ||
+	    a->src_ip != b->src_ip ||
+	    a->dst_port != b->dst_port ||
+	    a->src_port != b->src_port ||
+	    a->flow_type != b->flow_type ||
+	    a->ip4_proto != b->ip4_proto)
+		return false;
+
+	return true;
+}
+
+/**
+ * i40e_disallow_matching_filters - Check that new filters differ
+ * @vsi: pointer to the targeted VSI
+ * @input: new filter to check
+ *
+ * Due to hardware limitations, it is not possible for two filters that match
+ * similar criteria to be programmed at the same time. This is true for a few
+ * reasons:
+ *
+ * (a) all filters matching a particular flow type must use the same input
+ * set, that is they must match the same criteria.
+ * (b) different flow types will never match the same packet, as the flow type
+ * is decided by hardware before checking which rules apply.
+ * (c) hardware has no way to distinguish which order filters apply in.
+ *
+ * Due to this, we can't really support using the location data to order
+ * filters in the hardware parsing. It is technically possible for the user to
+ * request two filters matching the same criteria but which select different
+ * queues. In this case, rather than keep both filters in the list, we reject
+ * the 2nd filter when the user requests adding it.
+ *
+ * This avoids needing to track location for programming the filter to
+ * hardware, and ensures that we avoid some strange scenarios involving
+ * deleting filters which match the same criteria.
+ **/
+static int i40e_disallow_matching_filters(struct i40e_vsi *vsi,
+					  struct i40e_fdir_filter *input)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_fdir_filter *rule;
+	struct hlist_node *node2;
+
+	/* Loop through every filter, and check that it doesn't match */
+	hlist_for_each_entry_safe(rule, node2,
+				  &pf->fdir_filter_list, fdir_node) {
+		/* Don't check the filters match if they share the same fd_id,
+		 * since the new filter is actually just updating the target
+		 * of the old filter.
+		 */
+		if (rule->fd_id == input->fd_id)
+			continue;
 
-		if (flex_l3) {
-			err = i40e_add_flex_offset(&pf->l3_flex_pit_list,
-						   src_offset,
-						   pit_index);
-			if (err)
-				return err;
+		/* If any filters match, then print a warning message to the
+		 * kernel message buffer and bail out.
+		 */
+		if (i40e_match_fdir_filter(rule, input)) {
+			dev_warn(&pf->pdev->dev,
+				 "Existing user defined filter %d already matches this flow.\n",
+				 rule->fd_id);
+			return -EINVAL;
 		}
-
-		i40e_reprogram_flex_pit(pf);
 	}
 
 	return 0;
@@ -3695,7 +5498,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
 		return -EOPNOTSUPP;
 
-	if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)
+	if (test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
 		return -ENOSPC;
 
 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
@@ -3711,6 +5514,9 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
 	if (i40e_parse_rx_flow_user_data(fsp, &userdef))
 		return -EINVAL;
 
+	if (userdef.cloud_filter)
+		return i40e_add_cloud_filter_ethtool(vsi, cmd, &userdef);
+
 	/* Extended MAC field is not supported */
 	if (fsp->flow_type & FLOW_MAC_EXT)
 		return -EINVAL;
@@ -3752,7 +5558,6 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
 	}
 
 	input = kzalloc(sizeof(*input), GFP_KERNEL);
-
 	if (!input)
 		return -ENOMEM;
 
@@ -3762,8 +5567,6 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
 	input->dest_ctl = dest_ctl;
 	input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
 	input->cnt_index  = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
-	input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
-	input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
 	input->flow_type = fsp->flow_type & ~FLOW_EXT;
 	input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
 
@@ -3781,19 +5584,29 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
 		input->flex_offset = userdef.flex_offset;
 	}
 
-	ret = i40e_add_del_fdir(vsi, input, true);
+	/* Avoid programming two filters with identical match criteria. */
+	ret = i40e_disallow_matching_filters(vsi, input);
 	if (ret)
-		goto free_input;
+		goto free_filter_memory;
 
-	/* Add the input filter to the fdir_input_list, possibly replacing
+	/* Add the input filter to the fdir_filter_list, possibly replacing
 	 * a previous filter. Do not free the input structure after adding it
-	 * to the list as this would cause a use-after-free bug.
+	 * to the list as this would cause a use after free bug.
 	 */
-	i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
+	i40e_update_ethtool_fdir_entry(vsi, input, fsp->location);
+
+	(void)i40e_del_cloud_filter_ethtool(pf, cmd);
+	ret = i40e_add_del_fdir(vsi, input, true);
+
+	if (ret)
+		goto remove_sw_rule;
 
 	return 0;
 
-free_input:
+remove_sw_rule:
+	hlist_del(&input->fdir_node);
+	pf->fdir_pf_active_filters--;
+free_filter_memory:
 	kfree(input);
 	return ret;
 }
@@ -3816,12 +5629,17 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
 	case ETHTOOL_SRXFH:
 		ret = i40e_set_rss_hash_opt(pf, cmd);
 		break;
+
 	case ETHTOOL_SRXCLSRLINS:
 		ret = i40e_add_fdir_ethtool(vsi, cmd);
 		break;
+
 	case ETHTOOL_SRXCLSRLDEL:
 		ret = i40e_del_fdir_entry(vsi, cmd);
+		if (ret == -ENOENT)
+			ret = i40e_del_cloud_filter_ethtool(pf, cmd);
 		break;
+
 	default:
 		break;
 	}
@@ -3829,6 +5647,8 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
 	return ret;
 }
 
+#endif /* ETHTOOL_GRXRINGS */
+#ifdef ETHTOOL_SCHANNELS
 /**
  * i40e_max_channels - get Max number of combined channels supported
  * @vsi: vsi pointer
@@ -3841,7 +5661,7 @@ static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
 
 /**
  * i40e_get_channels - Get the current channels enabled and max supported etc.
- * @netdev: network interface device structure
+ * @dev: network interface device structure
  * @ch: ethtool channels structure
  *
  * We don't support separate tx and rx queues as channels. The other count
@@ -3850,7 +5670,7 @@ static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
  * q_vectors since we support a lot more queue pairs than q_vectors.
  **/
 static void i40e_get_channels(struct net_device *dev,
-			       struct ethtool_channels *ch)
+			      struct ethtool_channels *ch)
 {
 	struct i40e_netdev_priv *np = netdev_priv(dev);
 	struct i40e_vsi *vsi = np->vsi;
@@ -3869,14 +5689,14 @@ static void i40e_get_channels(struct net_device *dev,
 
 /**
  * i40e_set_channels - Set the new channels count.
- * @netdev: network interface device structure
+ * @dev: network interface device structure
  * @ch: ethtool channels structure
  *
  * The new channels count may not be the same as requested by the user
  * since it gets rounded down to a power of 2 value.
  **/
 static int i40e_set_channels(struct net_device *dev,
-			      struct ethtool_channels *ch)
+			     struct ethtool_channels *ch)
 {
 	const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
 	struct i40e_netdev_priv *np = netdev_priv(dev);
@@ -3892,6 +5712,12 @@ static int i40e_set_channels(struct net_device *dev,
 	if (vsi->type != I40E_VSI_MAIN)
 		return -EINVAL;
 
+	/* We do not support setting channels via ethtool when TCs are
+	 * configured through mqprio
+	 */
+	if (pf->flags & I40E_FLAG_TC_MQPRIO)
+		return -EINVAL;
+
 	/* verify they are not requesting separate vectors */
 	if (!count || ch->rx_count || ch->tx_count)
 		return -EINVAL;
@@ -3937,6 +5763,9 @@ static int i40e_set_channels(struct net_device *dev,
 		return -EINVAL;
 }
 
+#endif /* ETHTOOL_SCHANNELS */
+
+#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)
 /**
  * i40e_get_rxfh_key_size - get the RSS hash key size
  * @netdev: network interface device structure
@@ -3959,8 +5788,22 @@ static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
 	return I40E_HLUT_ARRAY_SIZE;
 }
 
+/**
+ * i40e_get_rxfh - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function
+ *
+ * Reads the indirection table directly from the hardware. Returns 0 on
+ * success.
+ **/
+#ifdef HAVE_RXFH_HASHFUNC
 static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
 			 u8 *hfunc)
+#else
+static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+#endif
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_vsi *vsi = np->vsi;
@@ -3968,9 +5811,11 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
 	int ret;
 	u16 i;
 
+#ifdef HAVE_RXFH_HASHFUNC
 	if (hfunc)
 		*hfunc = ETH_RSS_HASH_TOP;
 
+#endif
 	if (!indir)
 		return 0;
 
@@ -3995,12 +5840,22 @@ out:
  * @netdev: network interface device structure
  * @indir: indirection table
  * @key: hash key
+ * @hfunc: hash function to use
  *
  * Returns -EINVAL if the table specifies an invalid queue id, otherwise
  * returns 0 after programming the table.
  **/
+#ifdef HAVE_RXFH_HASHFUNC
 static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
 			 const u8 *key, const u8 hfunc)
+#else
+#ifdef HAVE_RXFH_NONCONST
+static int i40e_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+#else
+static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
+			 const u8 *key)
+#endif /* HAVE_RXFH_NONCONST */
+#endif /* HAVE_RXFH_HASHFUNC */
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_vsi *vsi = np->vsi;
@@ -4008,8 +5863,18 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
 	u8 *seed = NULL;
 	u16 i;
 
+#ifdef HAVE_RXFH_HASHFUNC
 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
 		return -EOPNOTSUPP;
+#endif
+
+	/* Verify user input. */
+	if (indir) {
+		for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) {
+			if (indir[i] >= vsi->rss_size)
+				return -EINVAL;
+		}
+	}
 
 	if (key) {
 		if (!vsi->rss_hkey_user) {
@@ -4038,7 +5903,9 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
 	return i40e_config_rss(vsi, seed, vsi->rss_lut_user,
 			       I40E_HLUT_ARRAY_SIZE);
 }
+#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */
 
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
 /**
  * i40e_get_priv_flags - report device private flags
  * @dev: network interface device structure
@@ -4088,9 +5955,12 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
 static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
 {
 	struct i40e_netdev_priv *np = netdev_priv(dev);
+	u64 orig_flags, new_flags, changed_flags;
+	enum i40e_admin_queue_err adq_err;
 	struct i40e_vsi *vsi = np->vsi;
 	struct i40e_pf *pf = vsi->back;
-	u64 orig_flags, new_flags, changed_flags;
+	bool is_reset_needed;
+	i40e_status status;
 	u32 i, j;
 
 	orig_flags = READ_ONCE(pf->flags);
@@ -4132,6 +6002,12 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
 	}
 
 flags_complete:
+	changed_flags = orig_flags ^ new_flags;
+
+	is_reset_needed = !!(changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
+		I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED |
+		I40E_FLAG_DISABLE_FW_LLDP));
+
 	/* Before we finalize any flag changes, we need to perform some
 	 * checks to ensure that the changes are supported and safe.
 	 */
@@ -4141,19 +6017,30 @@ flags_complete:
 	    !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE))
 		return -EOPNOTSUPP;
 
-	/* Compare and exchange the new flags into place. If we failed, that
-	 * is if cmpxchg64 returns anything but the old value, this means that
-	 * something else has modified the flags variable since we copied it
-	 * originally. We'll just punt with an error and log something in the
-	 * message buffer.
+	/* If the driver detected FW LLDP was disabled on init, this flag could
+	 * be set, however we do not support _changing_ the flag:
+	 * - on XL710 if NPAR is enabled or FW API version < 1.7
+	 * - on X722 with FW API version < 1.6
+	 * There are situations where older FW versions/NPAR enabled PFs could
+	 * disable LLDP, however we _must_ not allow the user to enable/disable
+	 * LLDP with this flag on unsupported FW versions.
 	 */
-	if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) {
-		dev_warn(&pf->pdev->dev,
-			 "Unable to update pf->flags as it was modified by another thread...\n");
-		return -EAGAIN;
+	if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+		if (!(pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) {
+			dev_warn(&pf->pdev->dev,
+				 "Device does not support changing FW LLDP\n");
+			return -EOPNOTSUPP;
+		}
 	}
 
-	changed_flags = orig_flags ^ new_flags;
+	if (((changed_flags & I40E_FLAG_RS_FEC) ||
+	     (changed_flags & I40E_FLAG_BASE_R_FEC)) &&
+	    pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
+	    pf->hw.device_id != I40E_DEV_ID_25G_B) {
+		dev_warn(&pf->pdev->dev,
+			 "Device does not support changing FEC configuration\n");
+		return -EOPNOTSUPP;
+	}
 
 	/* Process any additional changes needed as a result of flag changes.
 	 * The changed_flags value reflects the list of bits that were
@@ -4162,8 +6049,8 @@ flags_complete:
 
 	/* Flush current ATR settings if ATR was disabled */
 	if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) &&
-	    !(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) {
-		pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
+	    !(new_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+		set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
 		set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
 	}
 
@@ -4171,11 +6058,11 @@ flags_complete:
 		u16 sw_flags = 0, valid_flags = 0;
 		int ret;
 
-		if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+		if (!(new_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
 			sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
 		valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
 		ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
-						NULL);
+						0, NULL);
 		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
 			dev_info(&pf->pdev->dev,
 				 "couldn't set switch config bits, err %s aq_err %s\n",
@@ -4186,17 +6073,399 @@ flags_complete:
 		}
 	}
 
+	if ((changed_flags & I40E_FLAG_RS_FEC) ||
+	    (changed_flags & I40E_FLAG_BASE_R_FEC)) {
+		u8 fec_cfg = 0;
+
+		if (new_flags & I40E_FLAG_RS_FEC &&
+		    new_flags & I40E_FLAG_BASE_R_FEC) {
+			fec_cfg = I40E_AQ_SET_FEC_AUTO;
+		} else if (new_flags & I40E_FLAG_RS_FEC) {
+			fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
+				   I40E_AQ_SET_FEC_ABILITY_RS);
+		} else if (new_flags & I40E_FLAG_BASE_R_FEC) {
+			fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
+				   I40E_AQ_SET_FEC_ABILITY_KR);
+		}
+		if (i40e_set_fec_cfg(dev, fec_cfg))
+			dev_warn(&pf->pdev->dev, "Cannot change FEC config\n");
+	}
+
+	if ((changed_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
+	    (orig_flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN)) {
+		dev_err(&pf->pdev->dev,
+			"Setting link-down-on-close not supported on this port\n");
+		return -EOPNOTSUPP;
+	}
+
+	if ((changed_flags & new_flags &
+	     I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
+	    (new_flags & I40E_FLAG_MFP_ENABLED))
+		dev_warn(&pf->pdev->dev,
+			 "Turning on link-down-on-close flag may affect other partitions\n");
+
+	if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+		if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+			struct i40e_dcbx_config *dcbcfg;
+
+			i40e_aq_stop_lldp(&pf->hw, true, false, NULL);
+			i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
+			/* reset local_dcbx_config to default */
+			dcbcfg = &pf->hw.local_dcbx_config;
+			dcbcfg->etscfg.willing = 1;
+			dcbcfg->etscfg.maxtcs = 0;
+			dcbcfg->etscfg.tcbwtable[0] = 100;
+			for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++)
+				dcbcfg->etscfg.tcbwtable[i] = 0;
+			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+				dcbcfg->etscfg.prioritytable[i] = 0;
+			dcbcfg->etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
+			dcbcfg->pfc.willing = 1;
+			dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+		} else {
+			status = i40e_aq_start_lldp(&pf->hw, false, NULL);
+			if (status != I40E_SUCCESS) {
+				adq_err = pf->hw.aq.asq_last_status;
+				switch (adq_err) {
+				case I40E_AQ_RC_EEXIST:
+					dev_warn(&pf->pdev->dev,
+						 "FW LLDP agent is already running\n");
+					is_reset_needed = false;
+					break;
+				case I40E_AQ_RC_EPERM:
+					dev_warn(&pf->pdev->dev,
+						 "Device configuration forbids SW from starting the LLDP agent.\n");
+					return (-EINVAL);
+				default:
+					dev_warn(&pf->pdev->dev,
+						 "Starting FW LLDP agent failed: error: %s, %s\n",
+						 i40e_stat_str(&pf->hw,
+							       status),
+						 i40e_aq_str(&pf->hw,
+							     adq_err));
+					return (-EINVAL);
+				}
+			}
+		}
+	}
+
+	/* Now that we've checked to ensure that the new flags are valid, load
+	 * them into place. Since we only modify flags either (a) during
+	 * initialization or (b) while holding the RTNL lock, we don't need
+	 * anything fancy here.
+	 */
+	pf->flags = new_flags;
+
 	/* Issue reset to cause things to take effect, as additional bits
 	 * are added we will need to create a mask of bits requiring reset
 	 */
-	if ((changed_flags & I40E_FLAG_VEB_STATS_ENABLED) ||
-	    ((changed_flags & I40E_FLAG_LEGACY_RX) && netif_running(dev)))
+	if (is_reset_needed)
 		i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
 
 	return 0;
 }
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+
+#ifdef ETHTOOL_GMODULEINFO
+
+/**
+ * i40e_get_module_info - get (Q)SFP+ module type info
+ * @netdev: network interface device structure
+ * @modinfo: module EEPROM size and layout information structure
+ **/
+static int i40e_get_module_info(struct net_device *netdev,
+				struct ethtool_modinfo *modinfo)
+{
+	i40e_status status;
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	u32 sff8472_comp = 0;
+	u32 sff8472_swap = 0;
+	u32 sff8636_rev = 0;
+	u8 type;
+
+	/* Check if firmware supports reading module EEPROM. */
+	if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
+		netdev_err(vsi->netdev, "Module EEPROM memory read not supported. Please update the NVM image.\n");
+		return -EINVAL;
+	}
+
+	status = i40e_update_link_info(hw);
+	if (status)
+		return -EIO;
+
+	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
+		netdev_err(vsi->netdev, "Cannot read module EEPROM memory. No module connected.\n");
+		return -EINVAL;
+	}
+
+	type = hw->phy.link_info.module_type[0];
+
+	switch (type) {
+	case I40E_MODULE_TYPE_SFP:
+		status = i40e_aq_get_phy_register(hw,
+				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+				I40E_I2C_EEPROM_DEV_ADDR, true,
+				I40E_MODULE_SFF_8472_COMP,
+				&sff8472_comp, NULL);
+		if (status)
+			return -EIO;
+
+		status = i40e_aq_get_phy_register(hw,
+				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+				I40E_I2C_EEPROM_DEV_ADDR, true,
+				I40E_MODULE_SFF_8472_SWAP,
+				&sff8472_swap, NULL);
+		if (status)
+			return -EIO;
+
+		/* Check if the module requires address swap to access
+		 * the other EEPROM memory page.
+		 */
+		if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
+			netdev_warn(vsi->netdev, "Module address swap to access page 0xA2 is not supported.\n");
+			modinfo->type = ETH_MODULE_SFF_8079;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+		} else if (sff8472_comp &&
+			   (sff8472_swap & I40E_MODULE_SFF_DIAG_CAPAB)) {
+			modinfo->type = ETH_MODULE_SFF_8472;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+		} else {
+			/* Module is not SFF-8472 compliant */
+			modinfo->type = ETH_MODULE_SFF_8079;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+		}
+		break;
+	case I40E_MODULE_TYPE_QSFP_PLUS:
+		/* Read from memory page 0. */
+		status = i40e_aq_get_phy_register(hw,
+				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+				0, true,
+				I40E_MODULE_REVISION_ADDR,
+				&sff8636_rev, NULL);
+		if (status)
+			return -EIO;
+		/* Determine revision compliance byte */
+		if (sff8636_rev > 0x02) {
+			/* Module is SFF-8636 compliant */
+			modinfo->type = ETH_MODULE_SFF_8636;
+			modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+		} else {
+			modinfo->type = ETH_MODULE_SFF_8436;
+			modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+		}
+		break;
+	case I40E_MODULE_TYPE_QSFP28:
+		modinfo->type = ETH_MODULE_SFF_8636;
+		modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+		break;
+	default:
+		netdev_err(vsi->netdev, "Module type unrecognized\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * i40e_get_module_eeprom - fills buffer with (Q)SFP+ module memory contents
+ * @netdev: network interface device structure
+ * @ee: EEPROM dump request structure
+ * @data: buffer to be filled with EEPROM contents
+ **/
+static int i40e_get_module_eeprom(struct net_device *netdev,
+				  struct ethtool_eeprom *ee,
+				  u8 *data)
+{
+	i40e_status status;
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	bool is_sfp = false;
+	u32 value = 0;
+	int i;
+
+	if (!ee || !ee->len || !data)
+		return -EINVAL;
+
+	if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
+		is_sfp = true;
+
+	for (i = 0; i < ee->len; i++) {
+		u32 offset = i + ee->offset;
+		u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
+
+		/* Check if we need to access the other memory page */
+		if (is_sfp) {
+			if (offset >= ETH_MODULE_SFF_8079_LEN) {
+				offset -= ETH_MODULE_SFF_8079_LEN;
+				addr = I40E_I2C_EEPROM_DEV_ADDR2;
+			}
+		} else {
+			while (offset >= ETH_MODULE_SFF_8436_LEN) {
+				/* Compute memory page number and offset. */
+				offset -= ETH_MODULE_SFF_8436_LEN / 2;
+				addr++;
+			}
+		}
+
+		status = i40e_aq_get_phy_register(hw,
+				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+				addr, true, offset, &value, NULL);
+		if (status)
+			return -EIO;
+		data[i] = value;
+	}
+	return 0;
+}
+#endif /* ETHTOOL_GMODULEINFO */
+
+#ifdef ETHTOOL_GEEE
+static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_aq_get_phy_abilities_resp phy_cfg;
+	i40e_status status = 0;
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+
+	/* Get initial PHY capabilities */
+	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL);
+	if (status)
+		return -EAGAIN;
+
+	/* Check whether NIC configuration is compatible with Energy Efficient
+	 * Ethernet (EEE) mode.
+	 */
+	if (phy_cfg.eee_capability == 0)
+		return -EOPNOTSUPP;
+
+	edata->supported = SUPPORTED_Autoneg;
+	edata->lp_advertised = edata->supported;
+
+	/* Get current configuration */
+	status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL);
+	if (status)
+		return -EAGAIN;
+
+	edata->advertised = phy_cfg.eee_capability ? SUPPORTED_Autoneg : 0U;
+	edata->eee_enabled = !!edata->advertised;
+	edata->tx_lpi_enabled = pf->stats.tx_lpi_status;
+
+	edata->eee_active = pf->stats.tx_lpi_status && pf->stats.rx_lpi_status;
+
+	return 0;
+}
+#endif /* ETHTOOL_GEEE */
+
+#ifdef ETHTOOL_SEEE
+static int i40e_is_eee_param_supported(struct net_device *netdev,
+				       struct ethtool_eee *edata)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_ethtool_not_used {
+		u32 value;
+		const char *name;
+	} param[] = {
+		{edata->advertised & ~SUPPORTED_Autoneg, "advertise"},
+		{edata->tx_lpi_timer, "tx-timer"},
+		{edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"}
+	};
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(param); i++) {
+		if (param[i].value) {
+			netdev_info(netdev,
+				    "EEE setting %s not supported\n",
+				    param[i].name);
+			return -EOPNOTSUPP;
+		}
+	}
+
+	return 0;
+}
+
+static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_aq_get_phy_abilities_resp abilities;
+	i40e_status status = I40E_SUCCESS;
+	struct i40e_aq_set_phy_config config;
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	__le16 eee_capability;
+
+	/* Deny parameters we don't support */
+	if (i40e_is_eee_param_supported(netdev, edata))
+		return -EOPNOTSUPP;
+
+	/* Get initial PHY capabilities */
+	status = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
+					      NULL);
+	if (status)
+		return -EAGAIN;
+
+	/* Check whether NIC configuration is compatible with Energy Efficient
+	 * Ethernet (EEE) mode.
+	 */
+	if (abilities.eee_capability == 0)
+		return -EOPNOTSUPP;
+
+	/* Cache initial EEE capability */
+	eee_capability = abilities.eee_capability;
+
+	/* Get current PHY configuration */
+	status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+					      NULL);
+	if (status)
+		return -EAGAIN;
+
+	/* Cache current PHY configuration */
+	config.phy_type = abilities.phy_type;
+	config.link_speed = abilities.link_speed;
+	config.abilities = abilities.abilities |
+			   I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+	config.eeer = abilities.eeer_val;
+	config.low_power_ctrl = abilities.d3_lpan;
+	config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+			    I40E_AQ_PHY_FEC_CONFIG_MASK;
+
+	/* Set desired EEE state */
+	if (edata->eee_enabled) {
+		config.eee_capability = eee_capability;
+		config.eeer |= I40E_PRTPM_EEER_TX_LPI_EN_MASK;
+	} else {
+		config.eee_capability = 0;
+		config.eeer &= ~I40E_PRTPM_EEER_TX_LPI_EN_MASK;
+	}
+
+	/* Apply modified PHY configuration */
+	status = i40e_aq_set_phy_config(hw, &config, NULL);
+	if (status)
+		return -EAGAIN;
+
+	return 0;
+}
+#endif /* ETHTOOL_SEEE */
+
+static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
+	.get_drvinfo		= i40e_get_drvinfo,
+	.set_eeprom		= i40e_set_eeprom,
+	.get_eeprom_len		= i40e_get_eeprom_len,
+	.get_eeprom		= i40e_get_eeprom,
+};
 
 static const struct ethtool_ops i40e_ethtool_ops = {
+#ifndef ETHTOOL_GLINKSETTINGS
+	.get_settings		= i40e_get_settings,
+	.set_settings		= i40e_set_settings,
+#endif
 	.get_drvinfo		= i40e_get_drvinfo,
 	.get_regs_len		= i40e_get_regs_len,
 	.get_regs		= i40e_get_regs,
@@ -4213,31 +6482,141 @@ static const struct ethtool_ops i40e_ethtool_ops = {
 	.set_pauseparam		= i40e_set_pauseparam,
 	.get_msglevel		= i40e_get_msglevel,
 	.set_msglevel		= i40e_set_msglevel,
+#ifndef HAVE_NDO_SET_FEATURES
+	.get_rx_csum		= i40e_get_rx_csum,
+	.set_rx_csum		= i40e_set_rx_csum,
+	.get_tx_csum		= i40e_get_tx_csum,
+	.set_tx_csum		= i40e_set_tx_csum,
+	.get_sg			= ethtool_op_get_sg,
+	.set_sg			= ethtool_op_set_sg,
+	.get_tso		= ethtool_op_get_tso,
+	.set_tso		= i40e_set_tso,
+#ifdef ETHTOOL_GFLAGS
+	.get_flags		= ethtool_op_get_flags,
+	.set_flags		= i40e_set_flags,
+#endif
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef ETHTOOL_GRXRINGS
 	.get_rxnfc		= i40e_get_rxnfc,
 	.set_rxnfc		= i40e_set_rxnfc,
+#ifdef ETHTOOL_SRXNTUPLE
+	.set_rx_ntuple		= i40e_set_rx_ntuple,
+#endif
+#endif
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+	.self_test_count	= i40e_diag_test_count,
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
 	.self_test		= i40e_diag_test,
 	.get_strings		= i40e_get_strings,
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#ifdef ETHTOOL_GEEE
+	.get_eee		= i40e_get_eee,
+#endif /* ETHTOOL_GEEE */
+#ifdef ETHTOOL_SEEE
+	.set_eee		= i40e_set_eee,
+#endif /* ETHTOOL_SEEE */
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
 	.set_phys_id		= i40e_set_phys_id,
+#else
+	.phys_id		= i40e_phys_id,
+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+	.get_stats_count	= i40e_get_stats_count,
+#else /* HAVE_ETHTOOL_GET_SSET_COUNT */
 	.get_sset_count		= i40e_get_sset_count,
+	.get_priv_flags		= i40e_get_priv_flags,
+	.set_priv_flags		= i40e_set_priv_flags,
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
 	.get_ethtool_stats	= i40e_get_ethtool_stats,
+#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
+	.get_perm_addr		= ethtool_op_get_perm_addr,
+#endif
 	.get_coalesce		= i40e_get_coalesce,
 	.set_coalesce		= i40e_set_coalesce,
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)
 	.get_rxfh_key_size	= i40e_get_rxfh_key_size,
 	.get_rxfh_indir_size	= i40e_get_rxfh_indir_size,
 	.get_rxfh		= i40e_get_rxfh,
 	.set_rxfh		= i40e_set_rxfh,
+#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */
+#ifdef ETHTOOL_SCHANNELS
 	.get_channels		= i40e_get_channels,
 	.set_channels		= i40e_set_channels,
+#endif
+#ifdef ETHTOOL_GMODULEINFO
+	.get_module_info	= i40e_get_module_info,
+	.get_module_eeprom	= i40e_get_module_eeprom,
+#endif
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
 	.get_ts_info		= i40e_get_ts_info,
-	.get_priv_flags		= i40e_get_priv_flags,
-	.set_priv_flags		= i40e_set_priv_flags,
+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
+#ifdef ETHTOOL_PERQUEUE
 	.get_per_queue_coalesce	= i40e_get_per_queue_coalesce,
 	.set_per_queue_coalesce	= i40e_set_per_queue_coalesce,
-	.get_link_ksettings	= i40e_get_link_ksettings,
-	.set_link_ksettings	= i40e_set_link_ksettings,
+#endif /* ETHTOOL_PERQUEUE */
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#ifdef ETHTOOL_GLINKSETTINGS
+	.get_link_ksettings = i40e_get_link_ksettings,
+	.set_link_ksettings = i40e_set_link_ksettings,
+#endif /* ETHTOOL_GLINKSETTINGS */
+#ifdef ETHTOOL_GFECPARAM
+	.get_fecparam = i40e_get_fec_param,
+	.set_fecparam = i40e_set_fec_param,
+#endif /* ETHTOOL_GFECPARAM */
+#ifdef HAVE_DDP_PROFILE_UPLOAD_SUPPORT
+	.flash_device = i40e_ddp_flash,
+#endif /* DDP_PROFILE_UPLOAD_SUPPORT */
+};
+
+#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+static const struct ethtool_ops_ext i40e_ethtool_ops_ext = {
+	.size			= sizeof(struct ethtool_ops_ext),
+	.get_ts_info		= i40e_get_ts_info,
+	.set_phys_id		= i40e_set_phys_id,
+	.get_channels		= i40e_get_channels,
+	.set_channels		= i40e_set_channels,
+#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)
+	.get_rxfh_key_size	= i40e_get_rxfh_key_size,
+	.get_rxfh_indir_size	= i40e_get_rxfh_indir_size,
+	.get_rxfh		= i40e_get_rxfh,
+	.set_rxfh		= i40e_set_rxfh,
+#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */
+#ifdef ETHTOOL_GEEE
+	.get_eee		= i40e_get_eee,
+#endif /* ETHTOOL_GEEE */
+#ifdef ETHTOOL_SEEE
+	.set_eee		= i40e_set_eee,
+#endif /* ETHTOOL_SEEE */
+#ifdef ETHTOOL_GMODULEINFO
+	.get_module_info	= i40e_get_module_info,
+	.get_module_eeprom	= i40e_get_module_eeprom,
+#endif
 };
 
 void i40e_set_ethtool_ops(struct net_device *netdev)
 {
-	netdev->ethtool_ops = &i40e_ethtool_ops;
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf		*pf = np->vsi->back;
+
+	if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+		netdev->ethtool_ops = &i40e_ethtool_recovery_mode_ops;
+	} else {
+		netdev->ethtool_ops = &i40e_ethtool_ops;
+		set_ethtool_ops_ext(netdev, &i40e_ethtool_ops_ext);
+	}
+}
+#else
+void i40e_set_ethtool_ops(struct net_device *netdev)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf		*pf = np->vsi->back;
+
+	if (test_bit(__I40E_RECOVERY_MODE, pf->state))
+		netdev->ethtool_ops = &i40e_ethtool_recovery_mode_ops;
+	else
+		netdev->ethtool_ops = &i40e_ethtool_ops;
 }
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#endif /* SIOCETHTOOL */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool_stats.h b/drivers/net/ethernet/intel/i40e/i40e_ethtool_stats.h
new file mode 100644
index 000000000..dc26d38ee
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool_stats.h
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
+
+/* ethtool statistics helpers */
+
+/**
+ * struct i40e_stats - definition for an ethtool statistic
+ * @stat_string: statistic name to display in ethtool -S output
+ * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
+ * @stat_offset: offsetof() the stat from a base pointer
+ *
+ * This structure defines a statistic to be added to the ethtool stats buffer.
+ * It defines a statistic as offset from a common base pointer. Stats should
+ * be defined in constant arrays using the I40E_STAT macro, with every element
+ * of the array using the same _type for calculating the sizeof_stat and
+ * stat_offset.
+ *
+ * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
+ * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
+ * the i40e_add_ethtool_stat() helper function.
+ *
+ * The @stat_string is interpreted as a format string, allowing formatted
+ * values to be inserted while looping over multiple structures for a given
+ * statistics array. Thus, every statistic string in an array should have the
+ * same type and number of format specifiers, to be formatted by variadic
+ * arguments to the i40e_add_stat_string() helper function.
+ **/
+struct i40e_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+/* Helper macro to define an i40e_stat structure with proper size and type.
+ * Use this when defining constant statistics arrays. Note that @_type expects
+ * only a type name and is used multiple times.
+ */
+#define I40E_STAT(_type, _name, _stat) { \
+	.stat_string = _name, \
+	.sizeof_stat = sizeof_field(_type, _stat), \
+	.stat_offset = offsetof(_type, _stat) \
+}
+
+/* Helper macro for defining some statistics directly copied from the netdev
+ * stats structure.
+ */
+#ifdef HAVE_NDO_GET_STATS64
+#define I40E_NETDEV_STAT(_net_stat) \
+	I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
+#else
+#define I40E_NETDEV_STAT(_net_stat) \
+	I40E_STAT(struct net_device_stats, #_net_stat, _net_stat)
+#endif
+
+/* Helper macro for defining some statistics related to queues */
+#define I40E_QUEUE_STAT(_name, _stat) \
+	I40E_STAT(struct i40e_ring, _name, _stat)
+
+/* Stats associated with a Tx or Rx ring */
+static const struct i40e_stats i40e_gstrings_queue_stats[] = {
+	I40E_QUEUE_STAT("%s-%u.packets", stats.packets),
+	I40E_QUEUE_STAT("%s-%u.bytes", stats.bytes),
+};
+
+#ifdef HAVE_XDP_SUPPORT
+/* Stats associated with Rx ring's XDP prog */
+static const struct i40e_stats i40e_gstrings_rx_queue_xdp_stats[] = {
+	I40E_QUEUE_STAT("%s-%u.xdp.pass", xdp_stats.xdp_pass),
+	I40E_QUEUE_STAT("%s-%u.xdp.drop", xdp_stats.xdp_drop),
+	I40E_QUEUE_STAT("%s-%u.xdp.tx", xdp_stats.xdp_tx),
+	I40E_QUEUE_STAT("%s-%u.xdp.unknown", xdp_stats.xdp_unknown),
+	I40E_QUEUE_STAT("%s-%u.xdp.redirect", xdp_stats.xdp_redirect),
+	I40E_QUEUE_STAT("%s-%u.xdp.redirect_fail", xdp_stats.xdp_redirect_fail),
+};
+#endif
+
+/**
+ * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer
+ * @data: location to store the stat value
+ * @pointer: basis for where to copy from
+ * @stat: the stat definition
+ *
+ * Copies the stat data defined by the pointer and stat structure pair into
+ * the memory supplied as data. Used to implement i40e_add_ethtool_stats and
+ * i40e_add_queue_stats. If the pointer is null, data will be zero'd.
+ */
+static void
+i40e_add_one_ethtool_stat(u64 *data, void *pointer,
+			  const struct i40e_stats *stat)
+{
+	char *p;
+
+	if (!pointer) {
+		/* ensure that the ethtool data buffer is zero'd for any stats
+		 * which don't have a valid pointer.
+		 */
+		*data = 0;
+		return;
+	}
+
+	p = (char *)pointer + stat->stat_offset;
+	switch (stat->sizeof_stat) {
+	case sizeof(u64):
+		*data = *((u64 *)p);
+		break;
+	case sizeof(u32):
+		*data = *((u32 *)p);
+		break;
+	case sizeof(u16):
+		*data = *((u16 *)p);
+		break;
+	case sizeof(u8):
+		*data = *((u8 *)p);
+		break;
+	default:
+		WARN_ONCE(1, "unexpected stat size for %s",
+			  stat->stat_string);
+		*data = 0;
+	}
+}
+
+/**
+ * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer
+ * @data: ethtool stats buffer
+ * @pointer: location to copy stats from
+ * @stats: array of stats to copy
+ * @size: the size of the stats definition
+ *
+ * Copy the stats defined by the stats array using the pointer as a base into
+ * the data buffer supplied by ethtool. Updates the data pointer to point to
+ * the next empty location for successive calls to __i40e_add_ethtool_stats.
+ * If pointer is null, set the data values to zero and update the pointer to
+ * skip these stats.
+ **/
+static void
+__i40e_add_ethtool_stats(u64 **data, void *pointer,
+			 const struct i40e_stats stats[],
+			 const unsigned int size)
+{
+	unsigned int i;
+
+	for (i = 0; i < size; i++)
+		i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]);
+}
+
+/**
+ * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer
+ * @data: ethtool stats buffer
+ * @pointer: location where stats are stored
+ * @stats: static const array of stat definitions
+ *
+ * Macro to ease the use of __i40e_add_ethtool_stats by taking a static
+ * constant stats array and passing the ARRAY_SIZE(). This avoids typos by
+ * ensuring that we pass the size associated with the given stats array.
+ *
+ * The parameter @stats is evaluated twice, so parameters with side effects
+ * should be avoided.
+ **/
+#define i40e_add_ethtool_stats(data, pointer, stats) \
+	__i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
+
+/**
+ * i40e_add_queue_stats - copy queue statistics into supplied buffer
+ * @data: ethtool stats buffer
+ * @ring: the ring to copy
+ *
+ * Queue statistics must be copied while protected by
+ * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats.
+ * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the
+ * ring pointer is null, zero out the queue stat values and update the data
+ * pointer. Otherwise safely copy the stats from the ring into the supplied
+ * buffer and update the data pointer when finished.
+ *
+ * This function expects to be called while under rcu_read_lock().
+ **/
+static void
+i40e_add_queue_stats(u64 **data, struct i40e_ring *ring)
+{
+	const unsigned int size = ARRAY_SIZE(i40e_gstrings_queue_stats);
+	const struct i40e_stats *stats = i40e_gstrings_queue_stats;
+#ifdef HAVE_NDO_GET_STATS64
+	unsigned int start;
+#endif
+	unsigned int i;
+
+	/* To avoid invalid statistics values, ensure that we keep retrying
+	 * the copy until we get a consistent value according to
+	 * u64_stats_fetch_retry_irq. But first, make sure our ring is
+	 * non-null before attempting to access its syncp.
+	 */
+#ifdef HAVE_NDO_GET_STATS64
+	do {
+		start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp);
+#endif
+		for (i = 0; i < size; i++) {
+			i40e_add_one_ethtool_stat(&(*data)[i], ring,
+						  &stats[i]);
+		}
+#ifdef HAVE_NDO_GET_STATS64
+	} while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start));
+#endif
+
+	/* Once we successfully copy the stats in, update the data pointer */
+	*data += size;
+}
+
+#ifdef HAVE_XDP_SUPPORT
+/**
+ * i40e_add_rx_queue_xdp_stats - copy XDP statistics into supplied buffer
+ * @data: ethtool stats buffer
+ * @rx_ring: the rx ring to copy
+ *
+ * RX queue XDP statistics must be copied while protected by
+ * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats.
+ * Assumes that queue stats are defined in i40e_gstrings_rx_queue_xdp_stats. If
+ * the ring pointer is null, zero out the queue stat values and update the data
+ * pointer. Otherwise safely copy the stats from the ring into the supplied
+ * buffer and update the data pointer when finished.
+ *
+ * This function expects to be called while under rcu_read_lock().
+ **/
+static void
+i40e_add_rx_queue_xdp_stats(u64 **data, struct i40e_ring *rx_ring)
+{
+	const unsigned int xdp_size =
+		ARRAY_SIZE(i40e_gstrings_rx_queue_xdp_stats);
+	const struct i40e_stats *xdp_stats = i40e_gstrings_rx_queue_xdp_stats;
+#ifdef HAVE_NDO_GET_STATS64
+	unsigned int start;
+#endif
+	unsigned int i;
+
+	/* To avoid invalid statistics values, ensure that we keep retrying
+	 * the copy until we get a consistent value according to
+	 * u64_stats_fetch_retry_irq. But first, make sure our ring is
+	 * non-null before attempting to access its syncp.
+	 */
+#ifdef HAVE_NDO_GET_STATS64
+	do {
+		start = !rx_ring ? 0 :
+			u64_stats_fetch_begin_irq(&rx_ring->syncp);
+#endif
+	for (i = 0; i < xdp_size; i++) {
+		i40e_add_one_ethtool_stat(&(*data)[i], rx_ring,
+					  &xdp_stats[i]);
+	}
+#ifdef HAVE_NDO_GET_STATS64
+	} while (rx_ring && u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+#endif
+
+	/* Once we successfully copy the stats in, update the data pointer */
+	*data += xdp_size;
+}
+#endif
+
+/**
+ * __i40e_add_stat_strings - copy stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ * @size: size of the stats array
+ *
+ * Format and copy the strings described by stats into the buffer pointed at
+ * by p.
+ **/
+static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
+				    const unsigned int size, ...)
+{
+	unsigned int i;
+
+	for (i = 0; i < size; i++) {
+		va_list args;
+
+		va_start(args, size);
+		vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
+		*p += ETH_GSTRING_LEN;
+		va_end(args);
+	}
+}
+
+/**
+ * 40e_add_stat_strings - copy stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ *
+ * Format and copy the strings described by the const static stats value into
+ * the buffer pointed at by p.
+ *
+ * The parameter @stats is evaluated twice, so parameters with side effects
+ * should be avoided. Additionally, stats must be an array such that
+ * ARRAY_SIZE can be called on it.
+ **/
+#define i40e_add_stat_strings(p, stats, ...) \
+	__i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_filters.c b/drivers/net/ethernet/intel/i40e/i40e_filters.c
new file mode 100644
index 000000000..ce301a41e
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_filters.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
+
+#include "i40e_filters.h"
+
+/**
+ * __i40e_del_filter - Remove a specific filter from the VSI
+ * @vsi: VSI to remove from
+ * @f: the filter to remove from the list
+ *
+ * This function should be called instead of i40e_del_filter only if you know
+ * the exact filter you will remove already, such as via i40e_find_filter or
+ * i40e_find_mac.
+ *
+ * NOTE: This function is expected to be called with mac_filter_hash_lock
+ * being held.
+ * ANOTHER NOTE: This function MUST be called from within the context of
+ * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
+ * instead of list_for_each_entry().
+ **/
+void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
+{
+	if (!f)
+		return;
+
+	/* If the filter was never added to firmware then we can just delete it
+	 * directly and we don't want to set the status to remove or else an
+	 * admin queue command will unnecessarily fire.
+	 */
+	if (f->state == I40E_FILTER_FAILED || f->state == I40E_FILTER_NEW) {
+		hash_del(&f->hlist);
+		kfree(f);
+	} else {
+		f->state = I40E_FILTER_REMOVE;
+	}
+
+	vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+	set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
+}
+
diff --git a/drivers/net/ethernet/intel/i40e/i40e_filters.h b/drivers/net/ethernet/intel/i40e/i40e_filters.h
new file mode 100644
index 000000000..7f618fdab
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_filters.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
+
+#ifndef _I40E_FILTERS_H_
+#define _I40E_FILTERS_H_
+
+#include "i40e.h"
+
+void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f);
+
+#endif /* _I40E_FILTERS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_helper.h b/drivers/net/ethernet/intel/i40e/i40e_helper.h
new file mode 100644
index 000000000..4e25e230c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_helper.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
+
+#ifndef _I40E_HELPER_H_
+#define _I40E_HELPER_H_
+
+#include "i40e_alloc.h"
+
+/**
+ * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+inline int i40e_allocate_dma_mem_d(struct i40e_hw *hw,
+				   struct i40e_dma_mem *mem,
+				   __always_unused enum i40e_memory_type mtype,
+				   u64 size, u32 alignment)
+{
+	struct i40e_pf *nf = (struct i40e_pf *)hw->back;
+
+	mem->size = ALIGN(size, alignment);
+#ifdef HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM
+	mem->va = dma_alloc_coherent(&nf->pdev->dev, mem->size,
+				     &mem->pa, GFP_KERNEL);
+#else
+	mem->va = dma_zalloc_coherent(&nf->pdev->dev, mem->size,
+				      &mem->pa, GFP_KERNEL);
+#endif
+	if (!mem->va)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
+ * i40e_free_dma_mem_d - OS specific memory free for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to free
+ **/
+inline int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+	struct i40e_pf *nf = (struct i40e_pf *)hw->back;
+
+	dma_free_coherent(&nf->pdev->dev, mem->size, mem->va, mem->pa);
+	mem->va = NULL;
+	mem->pa = 0;
+	mem->size = 0;
+
+	return 0;
+}
+
+/**
+ * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to fill out
+ * @size: size of memory requested
+ **/
+inline int i40e_allocate_virt_mem_d(struct i40e_hw *hw,
+				    struct i40e_virt_mem *mem,
+				    u32 size)
+{
+	mem->size = size;
+	mem->va = kzalloc(size, GFP_KERNEL);
+
+	if (!mem->va)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
+ * i40e_free_virt_mem_d - OS specific memory free for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to free
+ **/
+inline int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
+{
+	/* it's ok to kfree a NULL pointer */
+	kfree(mem->va);
+	mem->va = NULL;
+	mem->size = 0;
+
+	return 0;
+}
+
+/* prototype */
+inline void i40e_destroy_spinlock_d(struct i40e_spinlock *sp);
+inline void i40e_acquire_spinlock_d(struct i40e_spinlock *sp);
+inline void i40e_release_spinlock_d(struct i40e_spinlock *sp);
+
+/**
+ * i40e_init_spinlock_d - OS specific spinlock init for shared code
+ * @sp: pointer to a spinlock declared in driver space
+ **/
+static inline void i40e_init_spinlock_d(struct i40e_spinlock *sp)
+{
+	mutex_init((struct mutex *)sp);
+}
+
+/**
+ * i40e_acquire_spinlock_d - OS specific spinlock acquire for shared code
+ * @sp: pointer to a spinlock declared in driver space
+ **/
+inline void i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
+{
+	mutex_lock((struct mutex *)sp);
+}
+
+/**
+ * i40e_release_spinlock_d - OS specific spinlock release for shared code
+ * @sp: pointer to a spinlock declared in driver space
+ **/
+inline void i40e_release_spinlock_d(struct i40e_spinlock *sp)
+{
+	mutex_unlock((struct mutex *)sp);
+}
+
+/**
+ * i40e_destroy_spinlock_d - OS specific spinlock destroy for shared code
+ * @sp: pointer to a spinlock declared in driver space
+ **/
+inline void i40e_destroy_spinlock_d(struct i40e_spinlock *sp)
+{
+	mutex_destroy((struct mutex *)sp);
+}
+#endif /* _I40E_HELPER_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index a7c7b1d9b..5fb10c407 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -1,35 +1,14 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #include "i40e_osdep.h"
 #include "i40e_register.h"
 #include "i40e_status.h"
 #include "i40e_alloc.h"
 #include "i40e_hmc.h"
+#ifndef I40E_NO_TYPE_HEADER
 #include "i40e_type.h"
+#endif
 
 /**
  * i40e_add_sd_table_entry - Adds a segment descriptor to the table
@@ -45,11 +24,11 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
 					      enum i40e_sd_entry_type type,
 					      u64 direct_mode_sz)
 {
-	enum i40e_memory_type mem_type __attribute__((unused));
+	i40e_status ret_code = I40E_SUCCESS;
 	struct i40e_hmc_sd_entry *sd_entry;
+	enum   i40e_memory_type mem_type;
 	bool dma_mem_alloc_done = false;
 	struct i40e_dma_mem mem;
-	i40e_status ret_code = I40E_SUCCESS;
 	u64 alloc_len;
 
 	if (NULL == hmc_info->sd_table.sd_entry) {
@@ -89,9 +68,13 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
 			sd_entry->u.pd_table.pd_entry =
 				(struct i40e_hmc_pd_entry *)
 				sd_entry->u.pd_table.pd_entry_virt_mem.va;
-			sd_entry->u.pd_table.pd_page_addr = mem;
+			i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr,
+				    &mem, sizeof(struct i40e_dma_mem),
+				    I40E_NONDMA_TO_NONDMA);
 		} else {
-			sd_entry->u.bp.addr = mem;
+			i40e_memcpy(&sd_entry->u.bp.addr,
+				    &mem, sizeof(struct i40e_dma_mem),
+				    I40E_NONDMA_TO_NONDMA);
 			sd_entry->u.bp.sd_pd_index = sd_index;
 		}
 		/* initialize the sd entry */
@@ -104,7 +87,7 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
 	if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
 		I40E_INC_BP_REFCNT(&sd_entry->u.bp);
 exit:
-	if (ret_code)
+	if (I40E_SUCCESS != ret_code)
 		if (dma_mem_alloc_done)
 			i40e_free_dma_mem(hw, &mem);
 
@@ -133,7 +116,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
 					      u32 pd_index,
 					      struct i40e_dma_mem *rsrc_pg)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 	struct i40e_hmc_pd_table *pd_table;
 	struct i40e_hmc_pd_entry *pd_entry;
 	struct i40e_dma_mem mem;
@@ -171,7 +154,8 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
 			pd_entry->rsrc_pg = false;
 		}
 
-		pd_entry->bp.addr = *page;
+		i40e_memcpy(&pd_entry->bp.addr, page,
+			    sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
 		pd_entry->bp.sd_pd_index = pd_index;
 		pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
 		/* Set page address and valid bit */
@@ -181,7 +165,8 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
 		pd_addr += rel_pd_idx;
 
 		/* Add the backing page physical address in the pd entry */
-		memcpy(pd_addr, &page_desc, sizeof(u64));
+		i40e_memcpy(pd_addr, &page_desc, sizeof(u64),
+			    I40E_NONDMA_TO_DMA);
 
 		pd_entry->sd_index = sd_idx;
 		pd_entry->valid = true;
@@ -197,7 +182,6 @@ exit:
  * @hw: pointer to our HW structure
  * @hmc_info: pointer to the HMC configuration information structure
  * @idx: the page index
- * @is_pf: distinguishes a VF from a PF
  *
  * This function:
  *	1. Marks the entry in pd tabe (for paged address mode) or in sd table
@@ -212,7 +196,7 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
 					struct i40e_hmc_info *hmc_info,
 					u32 idx)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 	struct i40e_hmc_pd_entry *pd_entry;
 	struct i40e_hmc_pd_table *pd_table;
 	struct i40e_hmc_sd_entry *sd_entry;
@@ -245,13 +229,13 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
 	I40E_DEC_PD_REFCNT(pd_table);
 	pd_addr = (u64 *)pd_table->pd_page_addr.va;
 	pd_addr += rel_pd_idx;
-	memset(pd_addr, 0, sizeof(u64));
+	i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM);
 	I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
 
 	/* free memory here */
 	if (!pd_entry->rsrc_pg)
-		ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr);
-	if (ret_code)
+		ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+	if (I40E_SUCCESS != ret_code)
 		goto exit;
 	if (!pd_table->ref_cnt)
 		i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
@@ -267,7 +251,7 @@ exit:
 i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
 					     u32 idx)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 	struct i40e_hmc_sd_entry *sd_entry;
 
 	/* get the entry and decrease its ref counter */
@@ -305,7 +289,7 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
 	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
 
-	return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr);
+	return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
 }
 
 /**
@@ -316,7 +300,7 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
 i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
 					       u32 idx)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 	struct i40e_hmc_sd_entry *sd_entry;
 
 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
@@ -353,5 +337,5 @@ i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
 	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
 
-	return  i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr);
+	return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index d90669211..ddfef1603 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifndef _I40E_HMC_H_
 #define _I40E_HMC_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index daa920442..4e55a095e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #include "i40e_osdep.h"
 #include "i40e_register.h"
@@ -101,7 +78,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
 					u32 fcoe_filt_num)
 {
 	struct i40e_hmc_obj_info *obj, *full_obj;
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 	u64 l2fpm_size;
 	u32 size_exp;
 
@@ -136,7 +113,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
 		hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
 			  txq_num, obj->max_cnt, ret_code);
-		goto init_lan_hmc_out;
+		goto free_hmc_out;
 	}
 
 	/* aggregate values into the full LAN object for later */
@@ -159,7 +136,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
 		hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
 			  rxq_num, obj->max_cnt, ret_code);
-		goto init_lan_hmc_out;
+		goto free_hmc_out;
 	}
 
 	/* aggregate values into the full LAN object for later */
@@ -182,7 +159,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
 		hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
 			  fcoe_cntx_num, obj->max_cnt, ret_code);
-		goto init_lan_hmc_out;
+		goto free_hmc_out;
 	}
 
 	/* aggregate values into the full LAN object for later */
@@ -205,7 +182,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
 		hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
 			  fcoe_filt_num, obj->max_cnt, ret_code);
-		goto init_lan_hmc_out;
+		goto free_hmc_out;
 	}
 
 	/* aggregate values into the full LAN object for later */
@@ -226,7 +203,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
 					  (sizeof(struct i40e_hmc_sd_entry) *
 					  hw->hmc.sd_table.sd_cnt));
 		if (ret_code)
-			goto init_lan_hmc_out;
+			goto free_hmc_out;
 		hw->hmc.sd_table.sd_entry =
 			(struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
 	}
@@ -234,6 +211,11 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
 	full_obj->size = l2fpm_size;
 
 init_lan_hmc_out:
+	return ret_code;
+free_hmc_out:
+	if (hw->hmc.hmc_obj_virt_mem.va)
+		i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+
 	return ret_code;
 }
 
@@ -255,9 +237,9 @@ static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
 						 struct i40e_hmc_info *hmc_info,
 						 u32 idx)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 
-	if (!i40e_prep_remove_pd_page(hmc_info, idx))
+	if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
 		ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
 
 	return ret_code;
@@ -282,9 +264,9 @@ static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
 					       struct i40e_hmc_info *hmc_info,
 					       u32 idx)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 
-	if (!i40e_prep_remove_sd_bp(hmc_info, idx))
+	if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
 		ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
 
 	return ret_code;
@@ -301,7 +283,7 @@ static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
 static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
 				struct i40e_hmc_lan_create_obj_info *info)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 	struct i40e_hmc_sd_entry *sd_entry;
 	u32 pd_idx1 = 0, pd_lmt1 = 0;
 	u32 pd_idx = 0, pd_lmt = 0;
@@ -371,7 +353,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
 		ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
 						   info->entry_type,
 						   sd_size);
-		if (ret_code)
+		if (I40E_SUCCESS != ret_code)
 			goto exit_sd_error;
 		sd_entry = &info->hmc_info->sd_table.sd_entry[j];
 		if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
@@ -388,7 +370,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
 				ret_code = i40e_add_pd_table_entry(hw,
 								info->hmc_info,
 								i, NULL);
-				if (ret_code) {
+				if (I40E_SUCCESS != ret_code) {
 					pd_error = true;
 					break;
 				}
@@ -461,9 +443,9 @@ i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
 					     enum i40e_hmc_model model)
 {
 	struct i40e_hmc_lan_create_obj_info info;
-	i40e_status ret_code = 0;
 	u8 hmc_fn_id = hw->hmc.hmc_fn_id;
 	struct i40e_hmc_obj_info *obj;
+	i40e_status ret_code = I40E_SUCCESS;
 
 	/* Initialize part of the create object info struct */
 	info.hmc_info = &hw->hmc;
@@ -479,9 +461,9 @@ i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
 		/* Make one big object, a single SD */
 		info.count = 1;
 		ret_code = i40e_create_lan_hmc_object(hw, &info);
-		if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+		if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
 			goto try_type_paged;
-		else if (ret_code)
+		else if (ret_code != I40E_SUCCESS)
 			goto configure_lan_hmc_out;
 		/* else clause falls through the break */
 		break;
@@ -491,7 +473,7 @@ try_type_paged:
 		/* Make one big object in the PD table */
 		info.count = 1;
 		ret_code = i40e_create_lan_hmc_object(hw, &info);
-		if (ret_code)
+		if (ret_code != I40E_SUCCESS)
 			goto configure_lan_hmc_out;
 		break;
 	default:
@@ -545,7 +527,7 @@ configure_lan_hmc_out:
 static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
 				struct i40e_hmc_lan_delete_obj_info *info)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 	struct i40e_hmc_pd_table *pd_table;
 	u32 pd_idx, pd_lmt, rel_pd_idx;
 	u32 sd_idx, sd_lmt;
@@ -610,7 +592,7 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
 			&info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
 		if (pd_table->pd_entry[rel_pd_idx].valid) {
 			ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
-			if (ret_code)
+			if (I40E_SUCCESS != ret_code)
 				goto exit;
 		}
 	}
@@ -631,12 +613,12 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
 		switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
 		case I40E_SD_TYPE_DIRECT:
 			ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
-			if (ret_code)
+			if (I40E_SUCCESS != ret_code)
 				goto exit;
 			break;
 		case I40E_SD_TYPE_PAGED:
 			ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
-			if (ret_code)
+			if (I40E_SUCCESS != ret_code)
 				goto exit;
 			break;
 		default:
@@ -681,7 +663,7 @@ i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
 
 #define I40E_HMC_STORE(_struct, _ele)		\
 	offsetof(struct _struct, _ele),		\
-	FIELD_SIZEOF(struct _struct, _ele)
+	sizeof_field(struct _struct, _ele)
 
 struct i40e_context_ele {
 	u16 offset;
@@ -774,13 +756,13 @@ static void i40e_write_byte(u8 *hmc_bits,
 	/* get the current bits from the target bit string */
 	dest = hmc_bits + (ce_info->lsb / 8);
 
-	memcpy(&dest_byte, dest, sizeof(dest_byte));
+	i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
 
 	dest_byte &= ~mask;	/* get the bits not changing */
 	dest_byte |= src_byte;	/* add in the new bits */
 
 	/* put it all back */
-	memcpy(dest, &dest_byte, sizeof(dest_byte));
+	i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
 }
 
 /**
@@ -818,13 +800,13 @@ static void i40e_write_word(u8 *hmc_bits,
 	/* get the current bits from the target bit string */
 	dest = hmc_bits + (ce_info->lsb / 8);
 
-	memcpy(&dest_word, dest, sizeof(dest_word));
+	i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
 
-	dest_word &= ~(cpu_to_le16(mask));	/* get the bits not changing */
-	dest_word |= cpu_to_le16(src_word);	/* add in the new bits */
+	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
+	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
 
 	/* put it all back */
-	memcpy(dest, &dest_word, sizeof(dest_word));
+	i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
 }
 
 /**
@@ -870,13 +852,13 @@ static void i40e_write_dword(u8 *hmc_bits,
 	/* get the current bits from the target bit string */
 	dest = hmc_bits + (ce_info->lsb / 8);
 
-	memcpy(&dest_dword, dest, sizeof(dest_dword));
+	i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
 
-	dest_dword &= ~(cpu_to_le32(mask));	/* get the bits not changing */
-	dest_dword |= cpu_to_le32(src_dword);	/* add in the new bits */
+	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
+	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
 
 	/* put it all back */
-	memcpy(dest, &dest_dword, sizeof(dest_dword));
+	i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
 }
 
 /**
@@ -922,13 +904,13 @@ static void i40e_write_qword(u8 *hmc_bits,
 	/* get the current bits from the target bit string */
 	dest = hmc_bits + (ce_info->lsb / 8);
 
-	memcpy(&dest_qword, dest, sizeof(dest_qword));
+	i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
 
-	dest_qword &= ~(cpu_to_le64(mask));	/* get the bits not changing */
-	dest_qword |= cpu_to_le64(src_qword);	/* add in the new bits */
+	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
+	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
 
 	/* put it all back */
-	memcpy(dest, &dest_qword, sizeof(dest_qword));
+	i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
 }
 
 /**
@@ -942,9 +924,10 @@ static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
 					enum i40e_hmc_lan_rsrc_type hmc_type)
 {
 	/* clean the bit array */
-	memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size);
+	i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
+		    I40E_DMA_MEM);
 
-	return 0;
+	return I40E_SUCCESS;
 }
 
 /**
@@ -981,12 +964,12 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes,
 		}
 	}
 
-	return 0;
+	return I40E_SUCCESS;
 }
 
 /**
  * i40e_hmc_get_object_va - retrieves an object's virtual address
- * @hmc_info: pointer to i40e_hmc_info struct
+ * @hw: pointer to the hw structure
  * @object_base: pointer to u64 to get the va
  * @rsrc_type: the hmc resource type
  * @obj_idx: hmc object index
@@ -995,24 +978,20 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes,
  * base pointer.  This function is used for LAN Queue contexts.
  **/
 static
-i40e_status i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
+i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw,
 					u8 **object_base,
 					enum i40e_hmc_lan_rsrc_type rsrc_type,
 					u32 obj_idx)
 {
 	u32 obj_offset_in_sd, obj_offset_in_pd;
-	i40e_status ret_code = 0;
+	struct i40e_hmc_info     *hmc_info = &hw->hmc;
 	struct i40e_hmc_sd_entry *sd_entry;
 	struct i40e_hmc_pd_entry *pd_entry;
 	u32 pd_idx, pd_lmt, rel_pd_idx;
+	i40e_status ret_code = I40E_SUCCESS;
 	u64 obj_offset_in_fpm;
 	u32 sd_idx, sd_lmt;
 
-	if (NULL == hmc_info) {
-		ret_code = I40E_ERR_BAD_PTR;
-		hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n");
-		goto exit;
-	}
 	if (NULL == hmc_info->hmc_obj) {
 		ret_code = I40E_ERR_BAD_PTR;
 		hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
@@ -1070,8 +1049,7 @@ i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
 	i40e_status err;
 	u8 *context_bytes;
 
-	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
-				     I40E_HMC_LAN_TX, queue);
+	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
 	if (err < 0)
 		return err;
 
@@ -1091,8 +1069,7 @@ i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
 	i40e_status err;
 	u8 *context_bytes;
 
-	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
-				     I40E_HMC_LAN_TX, queue);
+	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
 	if (err < 0)
 		return err;
 
@@ -1111,8 +1088,7 @@ i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
 	i40e_status err;
 	u8 *context_bytes;
 
-	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
-				     I40E_HMC_LAN_RX, queue);
+	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
 	if (err < 0)
 		return err;
 
@@ -1132,8 +1108,7 @@ i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
 	i40e_status err;
 	u8 *context_bytes;
 
-	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
-				     I40E_HMC_LAN_RX, queue);
+	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
 	if (err < 0)
 		return err;
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index e74128db5..00b7d6de4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifndef _I40E_LAN_HMC_H_
 #define _I40E_LAN_HMC_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 78198072f..7ad12f421 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,38 +1,29 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
-#include <linux/etherdevice.h>
-#include <linux/of_net.h>
-#include <linux/pci.h>
+#ifdef HAVE_XDP_SUPPORT
 #include <linux/bpf.h>
-
+#endif
 /* Local includes */
 #include "i40e.h"
+#include "i40e_helper.h"
 #include "i40e_diag.h"
+#ifdef HAVE_VXLAN_RX_OFFLOAD
+#if IS_ENABLED(CONFIG_VXLAN)
+#include <net/vxlan.h>
+#endif
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+#ifdef HAVE_GRE_ENCAP_OFFLOAD
+#include <net/gre.h>
+#endif /* HAVE_GRE_ENCAP_OFFLOAD */
+#ifdef HAVE_GENEVE_RX_OFFLOAD
+#if IS_ENABLED(CONFIG_GENEVE)
+#include <net/geneve.h>
+#endif
+#endif /* HAVE_GENEVE_RX_OFFLOAD */
+#ifdef HAVE_UDP_ENC_RX_OFFLOAD
 #include <net/udp_tunnel.h>
+#endif
 /* All i40e tracepoints are defined by the include below, which
  * must be included exactly once across the whole kernel with
  * CREATE_TRACE_POINTS defined
@@ -40,20 +31,25 @@
 #define CREATE_TRACE_POINTS
 #include "i40e_trace.h"
 
-const char i40e_driver_name[] = "i40e";
+char i40e_driver_name[] = "i40e";
 static const char i40e_driver_string[] =
-			"Intel(R) Ethernet Connection XL710 Network Driver";
+		"Intel(R) 40-10 Gigabit Ethernet Connection Network Driver";
 
-#define DRV_KERN "-k"
+#ifndef DRV_VERSION_LOCAL
+#define DRV_VERSION_LOCAL
+#endif /* DRV_VERSION_LOCAL */
+
+#define DRV_VERSION_DESC ""
 
 #define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 1
-#define DRV_VERSION_BUILD 14
+#define DRV_VERSION_MINOR 11
+#define DRV_VERSION_BUILD 29
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
-	     __stringify(DRV_VERSION_MINOR) "." \
-	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
+	__stringify(DRV_VERSION_MINOR) "." \
+	__stringify(DRV_VERSION_BUILD) \
+	DRV_VERSION_DESC __stringify(DRV_VERSION_LOCAL)
 const char i40e_driver_version_str[] = DRV_VERSION;
-static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
+static const char i40e_copyright[] = "Copyright(c) 2013 - 2020 Intel Corporation.";
 
 /* a bit of forward declarations */
 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
@@ -64,12 +60,20 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
 static int i40e_setup_misc_vector(struct i40e_pf *pf);
 static void i40e_determine_queue_usage(struct i40e_pf *pf);
 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+static void i40e_clear_rss_config_user(struct i40e_vsi *vsi);
 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
 static int i40e_reset(struct i40e_pf *pf);
 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
+static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
+static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
+static bool i40e_check_recovery_mode(struct i40e_pf *pf);
+static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
+static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up);
+static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
-
+static int i40e_get_capabilities(struct i40e_pf *pf,
+				 enum i40e_admin_queue_opc list_type);
 /* i40e_pci_tbl - PCI Device ID Table
  *
  * Last entry must be all 0s
@@ -85,16 +89,22 @@ static const struct pci_device_id i40e_pci_tbl[] = {
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_5G_BASE_T_BC), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
-	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
-	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
 	/* required last entry */
@@ -103,86 +113,38 @@ static const struct pci_device_id i40e_pci_tbl[] = {
 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
 
 #define I40E_MAX_VF_COUNT 128
+#define OPTION_UNSET    -1
+#define I40E_PARAM_INIT { [0 ... I40E_MAX_NIC] = OPTION_UNSET}
+#define I40E_MAX_NIC 64
+#if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE)
+#ifdef CONFIG_PCI_IOV
+static int max_vfs[I40E_MAX_NIC+1] = I40E_PARAM_INIT;
+module_param_array_named(max_vfs, max_vfs, int, NULL, 0);
+MODULE_PARM_DESC(max_vfs,
+	"Number of Virtual Functions: 0 = disable (default), 1-"
+	__stringify(I40E_MAX_VF_COUNT) " = enable "
+	"this many VFs");
+#endif /* CONFIG_PCI_IOV */
+#endif /* HAVE_SRIOV_CONFIGURE */
+
 static int debug = -1;
-module_param(debug, uint, 0);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+static int l4mode = L4_MODE_DISABLED;
+module_param(l4mode, int, 0000);
+MODULE_PARM_DESC(l4mode, "L4 cloud filter mode: 0=UDP,1=TCP,2=Both,-1=Disabled(default)");
+
 
 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
-MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
+MODULE_DESCRIPTION("Intel(R) 40-10 Gigabit Ethernet Connection Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
 static struct workqueue_struct *i40e_wq;
 
-/**
- * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
- * @hw:   pointer to the HW structure
- * @mem:  ptr to mem struct to fill out
- * @size: size of memory requested
- * @alignment: what to align the allocation to
- **/
-int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
-			    u64 size, u32 alignment)
-{
-	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
-
-	mem->size = ALIGN(size, alignment);
-	mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
-				      &mem->pa, GFP_KERNEL);
-	if (!mem->va)
-		return -ENOMEM;
-
-	return 0;
-}
-
-/**
- * i40e_free_dma_mem_d - OS specific memory free for shared code
- * @hw:   pointer to the HW structure
- * @mem:  ptr to mem struct to free
- **/
-int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
-{
-	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
-
-	dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
-	mem->va = NULL;
-	mem->pa = 0;
-	mem->size = 0;
-
-	return 0;
-}
-
-/**
- * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
- * @hw:   pointer to the HW structure
- * @mem:  ptr to mem struct to fill out
- * @size: size of memory requested
- **/
-int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
-			     u32 size)
-{
-	mem->size = size;
-	mem->va = kzalloc(size, GFP_KERNEL);
-
-	if (!mem->va)
-		return -ENOMEM;
-
-	return 0;
-}
-
-/**
- * i40e_free_virt_mem_d - OS specific memory free for shared code
- * @hw:   pointer to the HW structure
- * @mem:  ptr to mem struct to free
- **/
-int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
+bool i40e_is_l4mode_enabled(void)
 {
-	/* it's ok to kfree a NULL pointer */
-	kfree(mem->va);
-	mem->va = NULL;
-	mem->size = 0;
-
-	return 0;
+	return l4mode > L4_MODE_DISABLED;
 }
 
 /**
@@ -206,8 +168,8 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
 
 	if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
 		dev_info(&pf->pdev->dev,
-			 "param err: pile=%p needed=%d id=0x%04x\n",
-			 pile, needed, id);
+			 "param err: pile=%s needed=%d id=0x%04x\n",
+			 pile ? "<valid>" : "<null>", needed, id);
 		return -EINVAL;
 	}
 
@@ -274,8 +236,8 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
 
 /**
  * i40e_find_vsi_from_id - searches for the vsi with the given id
- * @pf - the pf structure to search for the vsi
- * @id - id of the vsi it is searching for
+ * @pf: the pf structure to search for the vsi
+ * @id: id of the vsi it is searching for
  **/
 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
 {
@@ -288,6 +250,22 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
 	return NULL;
 }
 
+/**
+ * i40e_find_vsi_from_seid - searches for the vsi with the given seid
+ * @pf: the pf structure to search for the vsi
+ * @seid: seid of the vsi it is searching for
+ **/
+struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_pf *pf, u16 seid)
+{
+	int i;
+
+	for (i = 0; i < pf->num_alloc_vsi; i++)
+		if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
+			return pf->vsi[i];
+
+	return NULL;
+}
+
 /**
  * i40e_service_event_schedule - Schedule the service task to wake up
  * @pf: board private structure
@@ -296,20 +274,27 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
  **/
 void i40e_service_event_schedule(struct i40e_pf *pf)
 {
-	if (!test_bit(__I40E_DOWN, pf->state) &&
-	    !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+	if ((!test_bit(__I40E_DOWN, pf->state) &&
+	     !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
+	      test_bit(__I40E_RECOVERY_MODE, pf->state))
 		queue_work(i40e_wq, &pf->service_task);
 }
 
 /**
  * i40e_tx_timeout - Respond to a Tx Hang
  * @netdev: network interface device structure
+ * @txqueue: stuck queue
  *
  * If any port has noticed a Tx timeout, it is likely that the whole
  * device is munged, not just the one netdev port, so go for the full
  * reset.
  **/
+#ifdef HAVE_TX_TIMEOUT_TXQUEUE
+static void
+i40e_tx_timeout(struct net_device *netdev, __always_unused unsigned int txqueue)
+#else
 static void i40e_tx_timeout(struct net_device *netdev)
+#endif
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_vsi *vsi = np->vsi;
@@ -350,12 +335,23 @@ static void i40e_tx_timeout(struct net_device *netdev)
 		}
 	}
 
+#ifdef CONFIG_DEBUG_FS
+	if (vsi->block_tx_timeout) {
+		netdev_info(netdev, "tx_timeout recovery disabled\n");
+		return;
+	}
+#endif
+
 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
 		pf->tx_timeout_recovery_level = 1;  /* reset after some time */
 	else if (time_before(jiffies,
 		      (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
 		return;   /* don't do any new action before the next timeout */
 
+	/* don't kick off another recovery if one is already pending */
+	if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
+		return;
+
 	if (tx_ring) {
 		head = i40e_get_head(tx_ring);
 		/* Read interrupt register */
@@ -387,7 +383,9 @@ static void i40e_tx_timeout(struct net_device *netdev)
 		set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
 		break;
 	default:
-		netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
+		set_bit(__I40E_DOWN_REQUESTED, pf->state);
+		set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
 		break;
 	}
 
@@ -402,11 +400,23 @@ static void i40e_tx_timeout(struct net_device *netdev)
  * Returns the address of the device statistics structure.
  * The statistics are actually updated from the service task.
  **/
+#ifdef HAVE_NDO_GET_STATS64
 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
 {
 	return &vsi->net_stats;
 }
+#else
+struct net_device_stats *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
+{
+	/* It is possible for a VSIs to not have a netdev */
+	if (vsi->netdev)
+		return &vsi->netdev->stats;
+	else
+		return &vsi->net_stats;
+}
+#endif
 
+#ifdef HAVE_NDO_GET_STATS64
 /**
  * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
  * @ring: Tx ring to get statistics from
@@ -431,12 +441,19 @@ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
 /**
  * i40e_get_netdev_stats_struct - Get statistics for netdev interface
  * @netdev: network interface device structure
+ * @stats: data structure to store statistics
  *
  * Returns the address of the device statistics structure.
  * The statistics are actually updated from the service task.
  **/
+#ifdef HAVE_VOID_NDO_GET_STATS64
 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
-				  struct rtnl_link_stats64 *stats)
+					 struct rtnl_link_stats64 *stats)
+#else
+static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
+					     struct net_device *netdev,
+					     struct rtnl_link_stats64 *stats)
+#endif /* HAVE_VOID_NDO_GET_STATS64 */
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_ring *tx_ring, *rx_ring;
@@ -445,21 +462,29 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
 	int i;
 
 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
+#ifdef HAVE_VOID_NDO_GET_STATS64
 		return;
+#else
+		return stats;
+#endif /* HAVE_VOID_NDO_GET_STATS_64 */
 
 	if (!vsi->tx_rings)
+#ifdef HAVE_VOID_NDO_GET_STATS64
 		return;
+#else
+		return stats;
+#endif /* HAVE_VOID_NDO_GET_STATS_64 */
 
 	rcu_read_lock();
 	for (i = 0; i < vsi->num_queue_pairs; i++) {
 		u64 bytes, packets;
 		unsigned int start;
 
-		tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+		tx_ring = READ_ONCE(vsi->tx_rings[i]);
 		if (!tx_ring)
 			continue;
-		i40e_get_netdev_stats_struct_tx(tx_ring, stats);
 
+		i40e_get_netdev_stats_struct_tx(tx_ring, stats);
 		rx_ring = &tx_ring[1];
 
 		do {
@@ -484,7 +509,21 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
 	stats->rx_dropped	= vsi_stats->rx_dropped;
 	stats->rx_crc_errors	= vsi_stats->rx_crc_errors;
 	stats->rx_length_errors	= vsi_stats->rx_length_errors;
+#ifndef HAVE_VOID_NDO_GET_STATS64
+
+		return stats;
+#endif
+}
+#else
+static struct net_device_stats *i40e_get_netdev_stats_struct(
+						      struct net_device *netdev)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+
+	return i40e_get_vsi_stats_struct(vsi);
 }
+#endif /* HAVE_NDO_GET_STATS64 */
 
 /**
  * i40e_vsi_reset_stats - Resets all stats of the given vsi
@@ -492,7 +531,11 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
  **/
 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
 {
+#ifdef HAVE_NDO_GET_STATS64
 	struct rtnl_link_stats64 *ns;
+#else
+	struct net_device_stats *ns;
+#endif
 	int i;
 
 	if (!vsi)
@@ -509,6 +552,10 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
 			       sizeof(vsi->rx_rings[i]->stats));
 			memset(&vsi->rx_rings[i]->rx_stats, 0,
 			       sizeof(vsi->rx_rings[i]->rx_stats));
+#ifdef HAVE_XDP_SUPPORT
+			memset(&vsi->rx_rings[i]->xdp_stats, 0,
+			       sizeof(vsi->rx_rings[i]->xdp_stats));
+#endif
 			memset(&vsi->tx_rings[i]->stats, 0,
 			       sizeof(vsi->tx_rings[i]->stats));
 			memset(&vsi->tx_rings[i]->tx_stats, 0,
@@ -536,10 +583,30 @@ void i40e_pf_reset_stats(struct i40e_pf *pf)
 			       sizeof(pf->veb[i]->stats));
 			memset(&pf->veb[i]->stats_offsets, 0,
 			       sizeof(pf->veb[i]->stats_offsets));
+			memset(&pf->veb[i]->tc_stats, 0,
+			       sizeof(pf->veb[i]->tc_stats));
+			memset(&pf->veb[i]->tc_stats_offsets, 0,
+			       sizeof(pf->veb[i]->tc_stats_offsets));
 			pf->veb[i]->stat_offsets_loaded = false;
 		}
 	}
 	pf->hw_csum_rx_error = 0;
+#ifdef I40E_ADD_PROBES
+	pf->tcp_segs = 0;
+	pf->tx_tcp_cso = 0;
+	pf->tx_udp_cso = 0;
+	pf->tx_sctp_cso = 0;
+	pf->tx_ip4_cso = 0;
+	pf->rx_tcp_cso = 0;
+	pf->rx_udp_cso = 0;
+	pf->rx_sctp_cso = 0;
+	pf->rx_ip4_cso = 0;
+	pf->rx_tcp_cso_err = 0;
+	pf->rx_udp_cso_err = 0;
+	pf->rx_sctp_cso_err = 0;
+	pf->rx_ip4_cso_err = 0;
+	pf->hw_csum_rx_outer = 0;
+#endif
 }
 
 /**
@@ -599,6 +666,20 @@ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
 		*stat = (u32)((new_data + BIT_ULL(32)) - *offset);
 }
 
+/**
+ * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
+ * @hw: ptr to the hardware info
+ * @reg: the hw reg to read and clear
+ * @stat: ptr to the stat
+ **/
+static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
+{
+	u32 new_data = rd32(hw, reg);
+
+	wr32(hw, reg, 1); /* must write a nonzero value to clear register */
+	*stat += new_data;
+}
+
 /**
  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
  * @vsi: the VSI to be updated
@@ -624,9 +705,6 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
 	i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
 			   vsi->stat_offsets_loaded,
 			   &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
-	i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
-			   vsi->stat_offsets_loaded,
-			   &oes->tx_errors, &es->tx_errors);
 
 	i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
 			   I40E_GLV_GORCL(stat_idx),
@@ -668,7 +746,7 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
  * i40e_update_veb_stats - Update Switch component statistics
  * @veb: the VEB being updated
  **/
-static void i40e_update_veb_stats(struct i40e_veb *veb)
+void i40e_update_veb_stats(struct i40e_veb *veb)
 {
 	struct i40e_pf *pf = veb->pf;
 	struct i40e_hw *hw = &pf->hw;
@@ -688,11 +766,10 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
 	i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
 			   veb->stat_offsets_loaded,
 			   &oes->tx_discards, &es->tx_discards);
-	if (hw->revision_id > 0)
-		i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
-				   veb->stat_offsets_loaded,
-				   &oes->rx_unknown_protocol,
-				   &es->rx_unknown_protocol);
+	i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
+			   veb->stat_offsets_loaded,
+			   &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
+
 	i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
 			   veb->stat_offsets_loaded,
 			   &oes->rx_bytes, &es->rx_bytes);
@@ -756,15 +833,22 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
 {
 	struct i40e_pf *pf = vsi->back;
+#ifdef HAVE_NDO_GET_STATS64
 	struct rtnl_link_stats64 *ons;
 	struct rtnl_link_stats64 *ns;   /* netdev stats */
+#else
+	struct net_device_stats *ons;
+	struct net_device_stats *ns;   /* netdev stats */
+#endif
 	struct i40e_eth_stats *oes;
 	struct i40e_eth_stats *es;     /* device's eth stats */
 	u32 tx_restart, tx_busy;
 	struct i40e_ring *p;
 	u32 rx_page, rx_buf;
 	u64 bytes, packets;
+#ifdef HAVE_NDO_GET_STATS64
 	unsigned int start;
+#endif
 	u64 tx_linearize;
 	u64 tx_force_wb;
 	u64 rx_p, rx_b;
@@ -791,13 +875,17 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
 	rcu_read_lock();
 	for (q = 0; q < vsi->num_queue_pairs; q++) {
 		/* locate Tx ring */
-		p = ACCESS_ONCE(vsi->tx_rings[q]);
+		p = READ_ONCE(vsi->tx_rings[q]);
 
+#ifdef HAVE_NDO_GET_STATS64
 		do {
 			start = u64_stats_fetch_begin_irq(&p->syncp);
+#endif
 			packets = p->stats.packets;
 			bytes = p->stats.bytes;
+#ifdef HAVE_NDO_GET_STATS64
 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
+#endif
 		tx_b += bytes;
 		tx_p += packets;
 		tx_restart += p->tx_stats.restart_queue;
@@ -807,11 +895,15 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
 
 		/* Rx queue is part of the same block as Tx queue */
 		p = &p[1];
+#ifdef HAVE_NDO_GET_STATS64
 		do {
 			start = u64_stats_fetch_begin_irq(&p->syncp);
+#endif
 			packets = p->stats.packets;
 			bytes = p->stats.bytes;
+#ifdef HAVE_NDO_GET_STATS64
 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
+#endif
 		rx_b += bytes;
 		rx_p += packets;
 		rx_buf += p->rx_stats.alloc_buff_failed;
@@ -858,7 +950,7 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
 	struct i40e_hw_port_stats *nsd = &pf->stats;
 	struct i40e_hw *hw = &pf->hw;
-	u32 val;
+
 	int i;
 
 	i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
@@ -1040,41 +1132,31 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
 			   &osd->rx_jabber, &nsd->rx_jabber);
 
 	/* FDIR stats */
-	i40e_stat_update32(hw,
-			   I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
-			   pf->stat_offsets_loaded,
-			   &osd->fd_atr_match, &nsd->fd_atr_match);
-	i40e_stat_update32(hw,
-			   I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
-			   pf->stat_offsets_loaded,
-			   &osd->fd_sb_match, &nsd->fd_sb_match);
-	i40e_stat_update32(hw,
-		      I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
-		      pf->stat_offsets_loaded,
-		      &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
-
-	val = rd32(hw, I40E_PRTPM_EEE_STAT);
-	nsd->tx_lpi_status =
-		       (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
-			I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
-	nsd->rx_lpi_status =
-		       (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
-			I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
-	i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
-			   pf->stat_offsets_loaded,
-			   &osd->tx_lpi_count, &nsd->tx_lpi_count);
-	i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
-			   pf->stat_offsets_loaded,
-			   &osd->rx_lpi_count, &nsd->rx_lpi_count);
+	i40e_stat_update_and_clear32(hw,
+			I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
+			&nsd->fd_atr_match);
+	i40e_stat_update_and_clear32(hw,
+			I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
+			&nsd->fd_sb_match);
+	i40e_stat_update_and_clear32(hw,
+			I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
+			&nsd->fd_atr_tunnel_match);
+
+	i40e_get_phy_lpi_status(hw, nsd);
+	i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
+			     &osd->tx_lpi_count, &nsd->tx_lpi_count,
+			     &osd->rx_lpi_count, &nsd->rx_lpi_count);
+	i40e_get_lpi_duration(hw, nsd,
+			      &nsd->tx_lpi_duration, &nsd->rx_lpi_duration);
 
 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
-	    !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED))
+	    !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
 		nsd->fd_sb_status = true;
 	else
 		nsd->fd_sb_status = false;
 
 	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
-	    !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
+	    !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
 		nsd->fd_atr_status = true;
 	else
 		nsd->fd_atr_status = false;
@@ -1098,6 +1180,25 @@ void i40e_update_stats(struct i40e_vsi *vsi)
 	i40e_update_vsi_stats(vsi);
 }
 
+/**
+ * i40e_count_filters - counts VSI mac filters
+ * @vsi: the VSI to be searched
+ *
+ * Returns count of mac filters
+ **/
+int i40e_count_filters(struct i40e_vsi *vsi)
+{
+	struct i40e_mac_filter *f;
+	struct hlist_node *h;
+	int bkt;
+	int cnt = 0;
+
+	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+		++cnt;
+
+	return cnt;
+}
+
 /**
  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
  * @vsi: the VSI to be searched
@@ -1106,8 +1207,8 @@ void i40e_update_stats(struct i40e_vsi *vsi)
  *
  * Returns ptr to the filter object or NULL
  **/
-static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
-						const u8 *macaddr, s16 vlan)
+struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
+					 const u8 *macaddr, s16 vlan)
 {
 	struct i40e_mac_filter *f;
 	u64 key;
@@ -1142,7 +1243,7 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
 
 	key = i40e_addr_to_hkey(macaddr);
 	hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
-		if ((ether_addr_equal(macaddr, f->macaddr)))
+		if (ether_addr_equal(macaddr, f->macaddr))
 			return f;
 	}
 	return NULL;
@@ -1172,11 +1273,11 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
 	 *    i40e_sync_filters_subtask.
 	 *
 	 * Thus, we can simply use a boolean value, has_vlan_filters which we
-	 * will set to true when we add a VLAN filter in i40e_add_filter. Then
+	 * will set to true when we add a vlan filter in i40e_add_filter. Then
 	 * we have to perform the full search after deleting filters in
 	 * i40e_sync_filters_subtask, but we already have to search
 	 * filters here and can perform the check at the same time. This
-	 * results in avoiding embedding a loop for VLAN mode inside another
+	 * results in avoiding embedding a loop for vlan mode inside another
 	 * loop over all the filters, and should maintain correctness as noted
 	 * above.
 	 */
@@ -1185,7 +1286,7 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
 
 /**
  * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
- * @vsi: the VSI to configure
+ * @vsi: the vsi to configure
  * @tmp_add_list: list of filters ready to be added
  * @tmp_del_list: list of filters ready to be deleted
  * @vlan_filters: the number of active VLAN filters
@@ -1250,7 +1351,7 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
 	/* Update the remaining active filters */
 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
 		/* Combine the checks for whether a filter needs to be changed
-		 * and then determine the new VLAN inside the if block, in
+		 * and then determine the new vlan inside the if block, in
 		 * order to avoid duplicating code for adding the new filter
 		 * then deleting the old filter.
 		 */
@@ -1353,28 +1454,22 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
 			return NULL;
 
 		/* Update the boolean indicating if we need to function in
-		 * VLAN mode.
+		 * vlan mode.
 		 */
 		if (vlan >= 0)
 			vsi->has_vlan_filter = true;
 
 		ether_addr_copy(f->macaddr, macaddr);
 		f->vlan = vlan;
-		/* If we're in overflow promisc mode, set the state directly
-		 * to failed, so we don't bother to try sending the filter
-		 * to the hardware.
-		 */
-		if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))
-			f->state = I40E_FILTER_FAILED;
-		else
-			f->state = I40E_FILTER_NEW;
+		f->state = I40E_FILTER_NEW;
+
 		INIT_HLIST_NODE(&f->hlist);
 
 		key = i40e_addr_to_hkey(macaddr);
 		hash_add(vsi->mac_filter_hash, &f->hlist, key);
 
 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
-		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+		set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
 	}
 
 	/* If we're asked to add a filter that has been marked for removal, it
@@ -1392,46 +1487,10 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
 }
 
 /**
- * __i40e_del_filter - Remove a specific filter from the VSI
- * @vsi: VSI to remove from
- * @f: the filter to remove from the list
- *
- * This function should be called instead of i40e_del_filter only if you know
- * the exact filter you will remove already, such as via i40e_find_filter or
- * i40e_find_mac.
- *
- * NOTE: This function is expected to be called with mac_filter_hash_lock
- * being held.
- * ANOTHER NOTE: This function MUST be called from within the context of
- * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
- * instead of list_for_each_entry().
- **/
-void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
-{
-	if (!f)
-		return;
-
-	/* If the filter was never added to firmware then we can just delete it
-	 * directly and we don't want to set the status to remove or else an
-	 * admin queue command will unnecessarily fire.
-	 */
-	if ((f->state == I40E_FILTER_FAILED) ||
-	    (f->state == I40E_FILTER_NEW)) {
-		hash_del(&f->hlist);
-		kfree(f);
-	} else {
-		f->state = I40E_FILTER_REMOVE;
-	}
-
-	vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
-	vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
-}
-
-/**
- * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
+ * i40e_del_filter - Remove a mac/vlan filter from the VSI
  * @vsi: the VSI to be searched
  * @macaddr: the MAC address
- * @vlan: the VLAN
+ * @vlan: the vlan
  *
  * NOTE: This function is expected to be called with mac_filter_hash_lock
  * being held.
@@ -1504,8 +1563,6 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
 	bool found = false;
 	int bkt;
 
-	WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
-	     "Missing mac_filter_hash_lock\n");
 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
 		if (ether_addr_equal(macaddr, f->macaddr)) {
 			__i40e_del_filter(vsi, f);
@@ -1543,8 +1600,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
 		return 0;
 	}
 
-	if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
-	    test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
+	if (test_bit(__I40E_DOWN, pf->state) ||
+	    test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
 		return -EADDRNOTAVAIL;
 
 	if (ether_addr_equal(hw->mac.addr, addr->sa_data))
@@ -1553,16 +1610,22 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
 	else
 		netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
 
+	/* Copy the address first, so that we avoid a possible race with
+	 * .set_rx_mode(). If we copy after changing the address in the filter
+	 * list, we might open ourselves to a narrow race window where
+	 * .set_rx_mode could delete our dev_addr filter and prevent traffic
+	 * from passing.
+	 */
+	ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
 	spin_lock_bh(&vsi->mac_filter_hash_lock);
 	i40e_del_mac_filter(vsi, netdev->dev_addr);
 	i40e_add_mac_filter(vsi, addr->sa_data);
 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
-	ether_addr_copy(netdev->dev_addr, addr->sa_data);
 	if (vsi->type == I40E_VSI_MAIN) {
 		i40e_status ret;
 
-		ret = i40e_aq_mac_address_write(&vsi->back->hw,
-						I40E_AQC_WRITE_TYPE_LAA_WOL,
+		ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
 						addr->sa_data, NULL);
 		if (ret)
 			netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
@@ -1573,77 +1636,250 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
 	/* schedule our worker thread which will take care of
 	 * applying the new filter changes
 	 */
-	i40e_service_event_schedule(vsi->back);
+	i40e_service_event_schedule(pf);
 	return 0;
 }
 
 /**
- * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
- * @vsi: the VSI being setup
- * @ctxt: VSI context structure
- * @enabled_tc: Enabled TCs bitmap
- * @is_add: True if called before Add VSI
- *
- * Setup VSI queue mapping for enabled traffic classes.
+ * i40e_config_rss_aq - Prepare for RSS using AQ commands
+ * @vsi: vsi structure
+ * @seed: RSS hash seed
+ * @lut: Buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
  **/
-static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
-				     struct i40e_vsi_context *ctxt,
-				     u8 enabled_tc,
-				     bool is_add)
+static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+			      u8 *lut, u16 lut_size)
 {
 	struct i40e_pf *pf = vsi->back;
-	u16 sections = 0;
-	u8 netdev_tc = 0;
-	u16 numtc = 0;
-	u16 qcount;
-	u8 offset;
-	u16 qmap;
-	int i;
-	u16 num_tc_qps = 0;
-
-	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
-	offset = 0;
+	struct i40e_hw *hw = &pf->hw;
+	int ret = 0;
 
-	if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
-		/* Find numtc from enabled TC bitmap */
-		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-			if (enabled_tc & BIT(i)) /* TC is enabled */
-				numtc++;
-		}
-		if (!numtc) {
-			dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
-			numtc = 1;
+	if (seed) {
+		struct i40e_aqc_get_set_rss_key_data *seed_dw =
+			(struct i40e_aqc_get_set_rss_key_data *)seed;
+		ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "Cannot set RSS key, err %s aq_err %s\n",
+				 i40e_stat_str(hw, ret),
+				 i40e_aq_str(hw, hw->aq.asq_last_status));
+			return ret;
 		}
-	} else {
-		/* At least TC0 is enabled in case of non-DCB case */
-		numtc = 1;
 	}
+	if (lut) {
+		bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
 
-	vsi->tc_config.numtc = numtc;
-	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
-	/* Number of queues per enabled TC */
-	qcount = vsi->alloc_queue_pairs;
+		ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "Cannot set RSS lut, err %s aq_err %s\n",
+				 i40e_stat_str(hw, ret),
+				 i40e_aq_str(hw, hw->aq.asq_last_status));
+			return ret;
+		}
+	}
+	return ret;
+}
 
-	num_tc_qps = qcount / numtc;
-	num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
+/**
+ * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
+ * @vsi: VSI structure
+ **/
+static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
+	u8 seed[I40E_HKEY_ARRAY_SIZE];
+	u8 *lut;
+	int ret;
 
-	/* Setup queue offset/count for all TCs for given VSI */
-	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		/* See if the given TC is enabled for the given VSI */
-		if (vsi->tc_config.enabled_tc & BIT(i)) {
-			/* TC is enabled */
-			int pow, num_qps;
+	if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
+		return 0;
+	if (!vsi->rss_size)
+		vsi->rss_size = min_t(int, pf->alloc_rss_size,
+				      vsi->num_queue_pairs);
+	if (!vsi->rss_size)
+		return -EINVAL;
+	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+	if (!lut)
+		return -ENOMEM;
 
-			switch (vsi->type) {
-			case I40E_VSI_MAIN:
-				qcount = min_t(int, pf->alloc_rss_size,
-					       num_tc_qps);
-				break;
-			case I40E_VSI_FDIR:
-			case I40E_VSI_SRIOV:
-			case I40E_VSI_VMDQ2:
-			default:
-				qcount = num_tc_qps;
+	/* Use the user configured hash keys and lookup table if there is one,
+	 * otherwise use default
+	 */
+	if (vsi->rss_lut_user)
+		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+	else
+		i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
+	if (vsi->rss_hkey_user)
+		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
+	else
+		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+	ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
+	kfree(lut);
+	return ret;
+}
+
+#ifdef __TC_MQPRIO_MODE_MAX
+/**
+ * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
+ * @vsi: the VSI being configured,
+ * @ctxt: VSI context structure
+ * @enabled_tc: number of traffic classes to enable
+ *
+ * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
+ **/
+static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
+					   struct i40e_vsi_context *ctxt,
+					   u8 enabled_tc)
+{
+	u16 qcount = 0, max_qcount, qmap, sections = 0;
+	int i, override_q, pow, num_qps, ret;
+	u8 netdev_tc = 0, offset = 0;
+
+	if (vsi->type != I40E_VSI_MAIN)
+		return -EINVAL;
+	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+	sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
+	vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
+	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
+	num_qps = vsi->mqprio_qopt.qopt.count[0];
+
+	/* find the next higher power-of-2 of num queue pairs */
+	pow = ilog2(num_qps);
+	if (!is_power_of_2(num_qps))
+		pow++;
+	qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+		(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+
+	/* Setup queue offset/count for all TCs for given VSI */
+	max_qcount = vsi->mqprio_qopt.qopt.count[0];
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		/* See if the given TC is enabled for the given VSI */
+		if (vsi->tc_config.enabled_tc & BIT(i)) {
+			offset = vsi->mqprio_qopt.qopt.offset[i];
+			qcount = vsi->mqprio_qopt.qopt.count[i];
+			if (qcount > max_qcount)
+				max_qcount = qcount;
+			vsi->tc_config.tc_info[i].qoffset = offset;
+			vsi->tc_config.tc_info[i].qcount = qcount;
+			vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
+		} else {
+			/* TC is not enabled so set the offset to
+			 * default queue and allocate one queue
+			 * for the given TC.
+			 */
+			vsi->tc_config.tc_info[i].qoffset = 0;
+			vsi->tc_config.tc_info[i].qcount = 1;
+			vsi->tc_config.tc_info[i].netdev_tc = 0;
+		}
+	}
+
+	/* Set actual Tx/Rx queue pairs */
+	vsi->num_queue_pairs = offset + qcount;
+
+	/* Setup queue TC[0].qmap for given VSI context */
+	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
+	ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+	ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
+	ctxt->info.valid_sections |= cpu_to_le16(sections);
+
+	/* Reconfigure RSS for main VSI with max queue count */
+	vsi->rss_size = max_qcount;
+	ret = i40e_vsi_config_rss(vsi);
+	if (ret) {
+		dev_info(&vsi->back->pdev->dev,
+			 "Failed to reconfig rss for num_queues (%u)\n",
+			 max_qcount);
+		return ret;
+	}
+	vsi->reconfig_rss = true;
+	dev_dbg(&vsi->back->pdev->dev,
+		"Reconfigured rss with num_queues (%u)\n", max_qcount);
+
+	/* Find queue count available for channel VSIs and starting offset
+	 * for channel VSIs
+	 */
+	override_q = vsi->mqprio_qopt.qopt.count[0];
+	if (override_q && override_q < vsi->num_queue_pairs) {
+		vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
+		vsi->next_base_queue = override_q;
+	}
+	return 0;
+}
+#endif
+
+/**
+ * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
+ * @vsi: the VSI being setup
+ * @ctxt: VSI context structure
+ * @enabled_tc: Enabled TCs bitmap
+ * @is_add: True if called before Add VSI
+ *
+ * Setup VSI queue mapping for enabled traffic classes.
+ **/
+static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
+				     struct i40e_vsi_context *ctxt,
+				     u8 enabled_tc,
+				     bool is_add)
+{
+	struct i40e_pf *pf = vsi->back;
+	u16 sections = 0;
+	u8 netdev_tc = 0;
+	u16 qcount = 0;
+	u16 numtc = 1;
+	u8 offset;
+	u16 qmap;
+	int i;
+	u16 num_tc_qps = 0;
+
+	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+	offset = 0;
+
+	/* Number of queues per enabled TC */
+	num_tc_qps = vsi->alloc_queue_pairs;
+	if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+		/* Find numtc from enabled TC bitmap */
+		for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+			if (enabled_tc & BIT(i)) /* TC is enabled */
+				numtc++;
+		}
+		if (!numtc) {
+			dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
+			numtc = 1;
+		}
+		num_tc_qps = num_tc_qps / numtc;
+		num_tc_qps = min_t(int, num_tc_qps,
+				   i40e_pf_get_max_q_per_tc(pf));
+	}
+
+	vsi->tc_config.numtc = numtc;
+	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
+
+	/* Do not allow use more TC queue pairs than MSI-X vectors exist */
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+		num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
+
+	/* Setup queue offset/count for all TCs for given VSI */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		/* See if the given TC is enabled for the given VSI */
+		if (vsi->tc_config.enabled_tc & BIT(i)) {
+			int pow, num_qps;
+
+			switch (vsi->type) {
+			case I40E_VSI_MAIN:
+				if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
+				    I40E_FLAG_FD_ATR_ENABLED)) ||
+				    vsi->tc_config.enabled_tc != 1) {
+					qcount = min_t(int, pf->alloc_rss_size,
+						       num_tc_qps);
+					break;
+				}
+				/* fall through */
+			case I40E_VSI_FDIR:
+			case I40E_VSI_SRIOV:
+			case I40E_VSI_VMDQ2:
+			default:
+				qcount = num_tc_qps;
 				WARN_ON(i != 0);
 				break;
 			}
@@ -1681,6 +1917,10 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
 	/* Set actual Tx/Rx queue pairs */
 	vsi->num_queue_pairs = offset;
 	if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
+		/* This code helps add more queue to the VSI if we have
+		 * more cores than RSS can support, the higher cores will
+		 * be served by ATR or other filters.
+		 */
 		if (vsi->req_queue_pairs > 0)
 			vsi->num_queue_pairs = vsi->req_queue_pairs;
 		else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -1739,6 +1979,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_vsi *vsi = np->vsi;
 
+	/* Under some circumstances, we might receive a request to delete
+	 * our own device address from our uc list. Because we store the
+	 * device address in the VSI's MAC/VLAN filter list, we need to ignore
+	 * such requests and not delete our device address from this list.
+	 */
+	if (ether_addr_equal(addr, netdev->dev_addr))
+		return 0;
+
 	i40e_del_mac_filter(vsi, addr);
 
 	return 0;
@@ -1763,7 +2011,7 @@ static void i40e_set_rx_mode(struct net_device *netdev)
 	/* check for other flag changes */
 	if (vsi->current_netdev_flags != vsi->netdev->flags) {
 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
-		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+		set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
 	}
 
 	/* schedule our worker thread which will take care of
@@ -1774,7 +2022,7 @@ static void i40e_set_rx_mode(struct net_device *netdev)
 
 /**
  * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
- * @vsi: Pointer to VSI struct
+ * @vsi: Pointer to vsi struct
  * @from: Pointer to list which contains MAC filter entries - changes to
  *        those entries needs to be undone.
  *
@@ -1840,7 +2088,7 @@ struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
  * from firmware
  * @count: Number of filters added
  * @add_list: return data from fw
- * @head: pointer to first filter in current batch
+ * @add_head: pointer to first filter in current batch
  *
  * MAC filter entries from list were slated to be added to device. Returns
  * number of successful filters. Note that 0 does NOT mean success!
@@ -1917,17 +2165,16 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
  * @list: the list of filters to send to firmware
  * @add_head: Position in the add hlist
  * @num_add: the number of filters to add
- * @promisc_change: set to true on exit if promiscuous mode was forced on
  *
  * Send a request to firmware via AdminQ to add a chunk of filters. Will set
- * promisc_changed to true if the firmware has run out of space for more
- * filters.
+ * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
+ * space for more filters.
  */
 static
 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
 			  struct i40e_aqc_add_macvlan_element_data *list,
 			  struct i40e_new_mac_filter *add_head,
-			  int num_add, bool *promisc_changed)
+			  int num_add)
 {
 	struct i40e_hw *hw = &vsi->back->hw;
 	int aq_err, fcnt;
@@ -1937,18 +2184,29 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
 	fcnt = i40e_update_filter_state(num_add, list, add_head);
 
 	if (fcnt != num_add) {
-		*promisc_changed = true;
-		set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
-		dev_warn(&vsi->back->pdev->dev,
-			 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
-			 i40e_aq_str(hw, aq_err),
-			 vsi_name);
+		if (vsi->type == I40E_VSI_MAIN) {
+			set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+			dev_warn(&vsi->back->pdev->dev,
+				 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
+				 i40e_aq_str(hw, aq_err), vsi_name);
+		} else if (vsi->type == I40E_VSI_SRIOV ||
+			   vsi->type == I40E_VSI_VMDQ1 ||
+			   vsi->type == I40E_VSI_VMDQ2) {
+			dev_warn(&vsi->back->pdev->dev,
+				 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
+				 i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
+		} else {
+			dev_warn(&vsi->back->pdev->dev,
+				 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
+				 i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
+		}
 	}
 }
 
 /**
  * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
  * @vsi: pointer to the VSI
+ * @vsi_name: the VSI name
  * @f: filter data
  *
  * This function sets or clears the promiscuous broadcast flags for VLAN
@@ -1978,11 +2236,13 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
 							    NULL);
 	}
 
-	if (aq_ret)
+	if (aq_ret) {
+		set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
 		dev_warn(&vsi->back->pdev->dev,
-			 "Error %s setting broadcast promiscuous mode on %s\n",
+			 "Error %s, forcing overflow promiscuous on %s\n",
 			 i40e_aq_str(hw, hw->aq.asq_last_status),
 			 vsi_name);
+	}
 
 	return aq_ret;
 }
@@ -1994,7 +2254,7 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
  *
  * There are different ways of setting promiscuous mode on a PF depending on
  * what state/environment we're in.  This identifies and sets it appropriately.
- * Returns 0 on success.
+ * Returns I40E_SUCCESS on success.
  **/
 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
 {
@@ -2054,6 +2314,56 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
 	return aq_ret;
 }
 
+ /**
+  * i40e_set_switch_mode - sets up switch mode correctly
+  * @pf: working PF
+  * @l4type: TCP, UDP, or both
+  *
+  * Sets up switch mode correctly
+  **/
+static void i40e_set_switch_mode(struct i40e_pf *pf, u8 l4type)
+{
+	struct i40e_hw *hw;
+	u8 mode;
+	int ret;
+
+	if (!pf)
+		return;
+
+	hw = &pf->hw;
+
+	/* Set Bit 7 to be valid */
+	mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
+
+	/* We only support destination port filters, so don't set the
+	 * source port bit here.
+	 */
+	if (l4type > I40E_AQ_SET_SWITCH_L4_TYPE_BOTH ||
+	    l4type < I40E_AQ_SET_SWITCH_L4_TYPE_TCP) {
+		dev_warn(&pf->pdev->dev,
+			 "invalid L4 type 0x%x, unable to set switch mode\n",
+			 l4type);
+		return;
+	}
+
+	mode |= l4type;
+
+	/* Set cloud filter mode */
+	mode |= I40E_AQ_SET_SWITCH_MODE_L4_PORT;
+
+	dev_dbg(&pf->pdev->dev, "setting switch mode to 0x%x\n", mode);
+	/* Prep mode field for set_switch_config */
+	ret = i40e_aq_set_switch_config(hw, 0, 0, mode, NULL);
+	/* If the driver is reloaded, the AQ call will fail. So don't make a
+	 * big deal about it.
+	 */
+	if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
+		dev_dbg(&pf->pdev->dev,
+			"couldn't set switch config bits, err %s aq_err %s\n",
+			i40e_stat_str(hw, ret),
+			i40e_aq_str(hw, hw->aq.asq_last_status));
+}
+
 /**
  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
  * @vsi: ptr to the VSI
@@ -2068,9 +2378,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 	struct i40e_mac_filter *f;
 	struct i40e_new_mac_filter *new, *add_head = NULL;
 	struct i40e_hw *hw = &vsi->back->hw;
+	bool old_overflow, new_overflow;
 	unsigned int failed_filters = 0;
 	unsigned int vlan_filters = 0;
-	bool promisc_changed = false;
 	char vsi_name[16] = "PF";
 	int filter_list_len = 0;
 	i40e_status aq_ret = 0;
@@ -2092,6 +2402,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 		usleep_range(1000, 2000);
 	pf = vsi->back;
 
+	old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+
 	if (vsi->netdev) {
 		changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
 		vsi->current_netdev_flags = vsi->netdev->flags;
@@ -2182,7 +2494,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 				cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
 			} else {
 				del_list[num_del].vlan_tag =
-					cpu_to_le16((u16)(f->vlan));
+					CPU_TO_LE16((u16)(f->vlan));
 			}
 
 			cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
@@ -2224,12 +2536,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 
 		num_add = 0;
 		hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
-			if (test_bit(__I40E_VSI_OVERFLOW_PROMISC,
-				     vsi->state)) {
-				new->state = I40E_FILTER_FAILED;
-				continue;
-			}
-
 			/* handle broadcast filters by updating the broadcast
 			 * promiscuous flag instead of adding a MAC filter.
 			 */
@@ -2253,27 +2559,26 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 				cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
 			} else {
 				add_list[num_add].vlan_tag =
-					cpu_to_le16((u16)(new->f->vlan));
+					CPU_TO_LE16((u16)(new->f->vlan));
 			}
 			add_list[num_add].queue_number = 0;
 			/* set invalid match method for later detection */
 			add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
 			cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
-			add_list[num_add].flags = cpu_to_le16(cmd_flags);
+			add_list[num_add].flags = CPU_TO_LE16(cmd_flags);
 			num_add++;
 
 			/* flush a full buffer */
 			if (num_add == filter_list_len) {
 				i40e_aqc_add_filters(vsi, vsi_name, add_list,
-						     add_head, num_add,
-						     &promisc_changed);
+						     add_head, num_add);
 				memset(add_list, 0, list_size);
 				num_add = 0;
 			}
 		}
 		if (num_add) {
 			i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
-					     num_add, &promisc_changed);
+					     num_add);
 		}
 		/* Now move all of the filters from the temp add list back to
 		 * the VSI's list.
@@ -2302,33 +2607,32 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 	}
 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
 
-	/* If promiscuous mode has changed, we need to calculate a new
-	 * threshold for when we are safe to exit
-	 */
-	if (promisc_changed)
-		vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
-
 	/* Check if we are able to exit overflow promiscuous mode. We can
 	 * safely exit if we didn't just enter, we no longer have any failed
 	 * filters, and we have reduced filters below the threshold value.
 	 */
-	if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) &&
-	    !promisc_changed && !failed_filters &&
-	    (vsi->active_filters < vsi->promisc_threshold)) {
+	if (old_overflow && !failed_filters &&
+	    vsi->active_filters < vsi->promisc_threshold) {
 		dev_info(&pf->pdev->dev,
 			 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
 			 vsi_name);
 		clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
-		promisc_changed = true;
 		vsi->promisc_threshold = 0;
 	}
-
 	/* if the VF is not trusted do not do promisc */
 	if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
 		clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
 		goto out;
 	}
 
+	new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+
+	/* If we are entering overflow promiscuous, we need to calculate a new
+	 * threshold for when we are safe to exit
+	 */
+	if (!old_overflow && new_overflow)
+		vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
+
 	/* check for changes in promiscuous modes */
 	if (changed_flags & IFF_ALLMULTI) {
 		bool cur_multipromisc;
@@ -2346,15 +2650,18 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 				 vsi_name,
 				 i40e_stat_str(hw, aq_ret),
 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+		} else {
+			dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
+				 vsi->netdev->name,
+				 cur_multipromisc ? "entering" : "leaving");
 		}
 	}
 
-	if ((changed_flags & IFF_PROMISC) || promisc_changed) {
+	if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
 		bool cur_promisc;
 
 		cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
-			       test_bit(__I40E_VSI_OVERFLOW_PROMISC,
-					vsi->state));
+			       new_overflow);
 		aq_ret = i40e_set_promiscuous(pf, cur_promisc);
 		if (aq_ret) {
 			retval = i40e_aq_rc_to_posix(aq_ret,
@@ -2366,6 +2673,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 				 i40e_stat_str(hw, aq_ret),
 				 i40e_aq_str(hw, hw->aq.asq_last_status));
 		}
+
 	}
 out:
 	/* if something went wrong then set the changed flag so we try again */
@@ -2396,18 +2704,21 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
 {
 	int v;
 
-	if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
+	if (!pf)
+		return;
+
+	if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
 		return;
-	pf->flags &= ~I40E_FLAG_FILTER_SYNC;
 
 	for (v = 0; v < pf->num_alloc_vsi; v++) {
 		if (pf->vsi[v] &&
-		    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
+		    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
+		    !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
 			int ret = i40e_sync_vsi_filters(pf->vsi[v]);
-
 			if (ret) {
 				/* come back and try again later */
-				pf->flags |= I40E_FLAG_FILTER_SYNC;
+				set_bit(__I40E_MACVLAN_SYNC_PENDING,
+					pf->state);
 				break;
 			}
 		}
@@ -2436,26 +2747,52 @@ static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	int max_frame = new_mtu + I40E_PACKET_HDR_PAD;
 	struct i40e_vsi *vsi = np->vsi;
 	struct i40e_pf *pf = vsi->back;
 
-	if (i40e_enabled_xdp_vsi(vsi)) {
-		int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+	/* MTU < 68 is an error and causes problems on some kernels */
+	if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
+		return -EINVAL;
 
-		if (frame_size > i40e_max_xdp_frame_size(vsi))
+	if (i40e_enabled_xdp_vsi(vsi)) {
+		if (max_frame > i40e_max_xdp_frame_size(vsi))
 			return -EINVAL;
 	}
 
+#ifndef HAVE_NDO_FEATURES_CHECK
+
+	/* MTU < 576 causes problems with TSO */
+	if (new_mtu < 576) {
+		netdev->features &= ~NETIF_F_TSO;
+		netdev->features &= ~NETIF_F_TSO6;
+#ifdef HAVE_NDO_SET_FEATURES
+	} else {
+#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+		if (netdev->wanted_features & NETIF_F_TSO)
+			netdev->features |= NETIF_F_TSO;
+		if (netdev->wanted_features & NETIF_F_TSO6)
+			netdev->features |= NETIF_F_TSO6;
+#else
+		if (netdev_extended(netdev)->wanted_features & NETIF_F_TSO)
+			netdev->features |= NETIF_F_TSO;
+		if (netdev_extended(netdev)->wanted_features & NETIF_F_TSO6)
+			netdev->features |= NETIF_F_TSO6;
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+#endif /* HAVE_NDO_SET_FEATURES */
+	}
+#endif /* ! HAVE_NDO_FEATURES_CHECK */
 	netdev_info(netdev, "changing MTU from %d to %d\n",
 		    netdev->mtu, new_mtu);
 	netdev->mtu = new_mtu;
 	if (netif_running(netdev))
 		i40e_vsi_reinit_locked(vsi);
-	pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
-		      I40E_FLAG_CLIENT_L2_CHANGE);
+	set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+	set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
 	return 0;
 }
 
+#if defined(HAVE_PTP_1588_CLOCK) || defined(HAVE_I40E_INTELCIM_IOCTL)
 /**
  * i40e_ioctl - Access the hwtstamp interface
  * @netdev: network interface device structure
@@ -2464,19 +2801,34 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
  **/
 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 {
+#ifdef HAVE_PTP_1588_CLOCK
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_pf *pf = np->vsi->back;
 
+#endif /* HAVE_PTP_1588_CLOCK */
 	switch (cmd) {
+#ifdef HAVE_PTP_1588_CLOCK
+#ifdef SIOCGHWTSTAMP
 	case SIOCGHWTSTAMP:
 		return i40e_ptp_get_ts_config(pf, ifr);
+#endif
 	case SIOCSHWTSTAMP:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EACCES;
 		return i40e_ptp_set_ts_config(pf, ifr);
+	case SIOCSPINS:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EACCES;
+		return i40e_ptp_set_pins_ioctl(pf, ifr);
+	case SIOCGPINS:
+		return i40e_ptp_get_pins(pf, ifr);
+#endif /* HAVE_PTP_1588_CLOCK */
 	default:
 		return -EOPNOTSUPP;
 	}
 }
 
+#endif
 /**
  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
  * @vsi: the vsi being adjusted
@@ -2486,6 +2838,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
 	struct i40e_vsi_context ctxt;
 	i40e_status ret;
 
+	/* Don't modify stripping options if a port vlan is active */
+	if (vsi->info.pvid)
+		return;
+
 	if ((vsi->info.valid_sections &
 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
@@ -2516,6 +2872,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
 	struct i40e_vsi_context ctxt;
 	i40e_status ret;
 
+	/* Don't modify stripping options if a port vlan is active */
+	if (vsi->info.pvid)
+		return;
+
 	if ((vsi->info.valid_sections &
 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
@@ -2534,25 +2894,31 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
 			 "update vlan stripping failed, err %s aq_err %s\n",
 			 i40e_stat_str(&vsi->back->hw, ret),
 			 i40e_aq_str(&vsi->back->hw,
-				     vsi->back->hw.aq.asq_last_status));
+				      vsi->back->hw.aq.asq_last_status));
 	}
 }
 
+#ifdef HAVE_VLAN_RX_REGISTER
 /**
  * i40e_vlan_rx_register - Setup or shutdown vlan offload
  * @netdev: network interface to be adjusted
- * @features: netdev features to test if VLAN offload is enabled or not
+ * @grp: new vlan group list, NULL if disabling
  **/
-static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
+static void i40e_vlan_rx_register(struct net_device *netdev,
+				  struct vlan_group *grp)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_vsi *vsi = np->vsi;
+	bool enable;
 
-	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+	vsi->vlgrp = grp;
+	enable = (grp || (vsi->back->flags & I40E_FLAG_DCB_ENABLED));
+	if (enable)
 		i40e_vlan_stripping_enable(vsi);
 	else
 		i40e_vlan_stripping_disable(vsi);
 }
+#endif /* HAVE_VLAN_RX_REGISTER */
 
 /**
  * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
@@ -2562,7 +2928,7 @@ static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
  * This is a helper function for adding a new MAC/VLAN filter with the
  * specified VLAN for each existing MAC address already in the hash table.
  * This function does *not* perform any accounting to update filters based on
- * VLAN mode.
+ * vlan mode.
  *
  * NOTE: this function expects to be called while under the
  * mac_filter_hash_lock
@@ -2589,17 +2955,14 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
 }
 
 /**
- * i40e_vsi_add_vlan - Add VSI membership for given VLAN
- * @vsi: the VSI being configured
- * @vid: VLAN id to be added
+ * i40e_vsi_add_vlan - Add vsi membership for given vlan
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be added
  **/
 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
 {
 	int err;
 
-	if (vsi->info.pvid)
-		return -EINVAL;
-
 	/* The network stack will attempt to add VID=0, with the intention to
 	 * receive priority tagged packets with a VLAN of 0. Our HW receives
 	 * these packets by default when configured to receive untagged
@@ -2651,9 +3014,9 @@ void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
 }
 
 /**
- * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
- * @vsi: the VSI being configured
- * @vid: VLAN id to be removed
+ * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be removed
  **/
 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
 {
@@ -2673,36 +3036,126 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
 /**
  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
  * @netdev: network interface to be adjusted
+ * @proto: unused protocol value
  * @vid: vlan id to be added
  *
  * net_device_ops implementation for adding vlan ids
  **/
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
 				__always_unused __be16 proto, u16 vid)
+#else
+static int i40e_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+#endif
+#else
+static void i40e_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+#endif
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_vsi *vsi = np->vsi;
 	int ret = 0;
 
 	if (vid >= VLAN_N_VID)
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
 		return -EINVAL;
+#else
+		return;
+#endif
 
 	ret = i40e_vsi_add_vlan(vsi, vid);
+#ifndef HAVE_VLAN_RX_REGISTER
 	if (!ret)
 		set_bit(vid, vsi->active_vlans);
+#endif /* !HAVE_VLAN_RX_REGISTER */
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+
+	/* Copy feature flags from netdev to the vlan netdev for this vid.
+	 * This allows things like TSO to bubble down to our vlan device.
+	 * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so
+	 * we will not have a netdev that needs updating.
+	 */
+	if (vsi->vlgrp) {
+		struct vlan_group *vlgrp = vsi->vlgrp;
+		struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid);
+		if (v_netdev) {
+			v_netdev->features |= netdev->features;
+#ifdef HAVE_ENCAP_CSUM_OFFLOAD
+			v_netdev->enc_features |= netdev->enc_features;
+#endif
+			vlan_group_set_device(vlgrp, vid, v_netdev);
+		}
+	}
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
 
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
 	return ret;
+#endif
+}
+
+/**
+ * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
+ * @netdev: network interface to be adjusted
+ * @proto: unused protocol value
+ * @vid: vlan id to be added
+ **/
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
+				    __always_unused __be16 proto, u16 vid)
+#else
+static void i40e_vlan_rx_add_vid_up(struct net_device *netdev, u16 vid)
+#endif
+{
+#if (!defined(HAVE_NETDEV_VLAN_FEATURES) || !defined(HAVE_VLAN_RX_REGISTER))
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+#endif
+
+	if (vid >= VLAN_N_VID)
+		return;
+#ifndef HAVE_VLAN_RX_REGISTER
+	set_bit(vid, vsi->active_vlans);
+#endif /* !HAVE_VLAN_RX_REGISTER */
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+
+	/* Copy feature flags from netdev to the vlan netdev for this vid.
+	 * This allows things like TSO to bubble down to our vlan device.
+	 * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so
+	 * we will not have a netdev that needs updating.
+	 */
+	if (vsi->vlgrp) {
+		struct vlan_group *vlgrp = vsi->vlgrp;
+		struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid);
+
+		if (v_netdev) {
+			v_netdev->features |= netdev->features;
+#ifdef HAVE_ENCAP_CSUM_OFFLOAD
+			v_netdev->enc_features |= netdev->enc_features;
+#endif
+			vlan_group_set_device(vlgrp, vid, v_netdev);
+		}
+	}
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
 }
 
 /**
  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
  * @netdev: network interface to be adjusted
+ * @proto: unused protocol value
  * @vid: vlan id to be removed
  *
  * net_device_ops implementation for removing vlan ids
  **/
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
 				 __always_unused __be16 proto, u16 vid)
+#else
+static int i40e_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+#endif
+#else
+static void i40e_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+#endif
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_vsi *vsi = np->vsi;
@@ -2712,10 +3165,14 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
 	 * already printed from the other function
 	 */
 	i40e_vsi_kill_vlan(vsi, vid);
+#ifndef HAVE_VLAN_RX_REGISTER
 
 	clear_bit(vid, vsi->active_vlans);
+#endif /* HAVE_VLAN_RX_REGISTER */
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
 
 	return 0;
+#endif
 }
 
 /**
@@ -2729,11 +3186,40 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
 	if (!vsi->netdev)
 		return;
 
-	i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
+#ifdef HAVE_VLAN_RX_REGISTER
+	i40e_vlan_rx_register(vsi->netdev, vsi->vlgrp);
+
+	if (vsi->vlgrp) {
+		for (vid = 0; vid < VLAN_N_VID; vid++) {
+			if (!vlan_group_get_device(vsi->vlgrp, vid))
+				continue;
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+			i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
+						vid);
+#else
+			i40e_vlan_rx_add_vid_up(vsi->netdev, vid);
+#endif
+		}
+	}
+#else /* HAVE_VLAN_RX_REGISTER */
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+		i40e_vlan_stripping_enable(vsi);
+#else
+	if (vsi->netdev->features & NETIF_F_HW_VLAN_RX)
+		i40e_vlan_stripping_enable(vsi);
+#endif
+	else
+		i40e_vlan_stripping_disable(vsi);
 
 	for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
-		i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
-				     vid);
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+		i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
+					vid);
+#else
+		i40e_vlan_rx_add_vid_up(vsi->netdev, vid);
+#endif
+#endif
 }
 
 /**
@@ -2760,7 +3246,7 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
 			 "add pvid failed, err %s aq_err %s\n",
 			 i40e_stat_str(&vsi->back->hw, ret),
 			 i40e_aq_str(&vsi->back->hw,
-				     vsi->back->hw.aq.asq_last_status));
+				      vsi->back->hw.aq.asq_last_status));
 		return -ENOENT;
 	}
 
@@ -2775,9 +3261,47 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
  **/
 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
 {
+	vsi->info.pvid = 0;
+
 	i40e_vlan_stripping_disable(vsi);
+}
+/**
+ * i40e_get_cloud_filter_type - Get cloud filter type
+ * @flags: set of enabled fields
+ * @type: location to return type
+ *
+ * Given the set of flags indicating which fields are active, look up the type
+ * number for programming the cloud filter in firmware. If the flags are
+ * invalid, return I40E_ERR_CONFIG. @type may be NULL, in which case the
+ * function may be used to verify that the flags would produce a valid type.
+ **/
+int i40e_get_cloud_filter_type(u8 flags, u16 *type)
+{
+	static const u16 table[128] = {
+		[I40E_CLOUD_FILTER_FLAGS_OMAC]  =
+			I40E_AQC_ADD_CLOUD_FILTER_OMAC,
+		[I40E_CLOUD_FILTER_FLAGS_IMAC]  =
+			I40E_AQC_ADD_CLOUD_FILTER_IMAC,
+		[I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN]  =
+			I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
+		[I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
+			I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
+		[I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
+			I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
+		[I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
+			I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
+		[I40E_CLOUD_FILTER_FLAGS_IIP] =
+			I40E_AQC_ADD_CLOUD_FILTER_IIP,
+	};
+
+	if (flags >= ARRAY_SIZE(table) || table[flags] == 0)
+		return I40E_ERR_CONFIG;
+
+	/* Return type if we're given space to do so */
+	if (type)
+		*type = table[flags];
 
-	vsi->info.pvid = 0;
+	return 0;
 }
 
 /**
@@ -2790,7 +3314,7 @@ void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
  *
  * Return 0 on success, negative on failure
  **/
-static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
+int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
 {
 	int i, err = 0;
 
@@ -2802,7 +3326,6 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
 
 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
 		err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
-
 	return err;
 }
 
@@ -2839,7 +3362,7 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
  *
  * Return 0 on success, negative on failure
  **/
-static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
+int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
 {
 	int i, err = 0;
 
@@ -2875,22 +3398,53 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
  **/
 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
 {
+#ifndef HAVE_XPS_QOS_SUPPORT
 	struct i40e_vsi *vsi = ring->vsi;
+#endif
+	int cpu;
 
-	if (!ring->q_vector || !ring->netdev)
+	if (!ring->q_vector || !ring->netdev || ring->ch)
 		return;
 
-	if ((vsi->tc_config.numtc <= 1) &&
-	    !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
-		netif_set_xps_queue(ring->netdev,
-				    get_cpu_mask(ring->q_vector->v_idx),
-				    ring->queue_index);
-	}
+#ifndef HAVE_XPS_QOS_SUPPORT
+	/* Some older kernels do not support XPS with QoS */
+	if (vsi->tc_config.numtc > 1) {
+#ifndef HAVE_NETDEV_TC_RESETS_XPS
+		/* Additionally, some kernels do not properly clear the XPS
+		 * mapping when the number of traffic classes is changed. In
+		 * order to support these kernels we work around this by
+		 * setting the XPS mapping to the empty cpu set.
+		 */
+		cpumask_var_t mask;
 
-	/* schedule our worker thread which will take care of
-	 * applying the new filter changes
-	 */
-	i40e_service_event_schedule(vsi->back);
+		/* Only clear the settings if we initialized XPS */
+		if (!test_and_clear_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
+			return;
+
+		if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+			return;
+
+		netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
+		free_cpumask_var(mask);
+#endif /* !HAVE_NETDEV_TC_RESETS_XPS */
+		return;
+	}
+
+#endif /* !HAVE_XPS_QOS_SUPPORT */
+	/* We only initialize XPS once, so as not to overwrite user settings */
+	if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
+		return;
+
+	cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
+#ifndef HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK
+	/* In kernels before 3.12 the second parameter has no const qualifier.
+	 * It is generating warning in older kernels.
+	 */
+	netif_set_xps_queue(ring->netdev, (struct cpumask *)get_cpu_mask(cpu),
+			    ring->queue_index);
+#else /* !HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK */
+	netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), ring->queue_index);
+#endif /* !HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK */
 }
 
 /**
@@ -2915,7 +3469,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
 	} else {
 		ring->atr_sample_rate = 0;
 	}
-
 	/* configure XPS */
 	i40e_config_xps_tx_ring(ring);
 
@@ -2927,7 +3480,9 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
 	tx_ctx.qlen = ring->count;
 	tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
 					       I40E_FLAG_FD_ATR_ENABLED));
+#ifdef HAVE_PTP_1588_CLOCK
 	tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
+#endif /* HAVE_PTP_1588_CLOCK */
 	/* FDIR VSI tx ring can still use RS bit and writebacks */
 	if (vsi->type != I40E_VSI_FDIR)
 		tx_ctx.head_wb_ena = 1;
@@ -2944,7 +3499,14 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
 	 * initialization. This has to be done regardless of
 	 * DCB as by default everything is mapped to TC0.
 	 */
-	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
+
+	if (ring->ch)
+		tx_ctx.rdylist =
+			le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
+
+	else
+		tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
+
 	tx_ctx.rdylist_act = 0;
 
 	/* clear the context in the HMC */
@@ -2966,12 +3528,23 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
 	}
 
 	/* Now associate this queue with this PCI function */
-	if (vsi->type == I40E_VSI_VMDQ2) {
-		qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
-		qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
-			   I40E_QTX_CTL_VFVM_INDX_MASK;
+	if (ring->ch) {
+		if (ring->ch->type == I40E_VSI_VMDQ2)
+			qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
+		else
+			return -EINVAL;
+
+		qtx_ctl |= (ring->ch->vsi_number <<
+			    I40E_QTX_CTL_VFVM_INDX_SHIFT) &
+			    I40E_QTX_CTL_VFVM_INDX_MASK;
 	} else {
-		qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+		if (vsi->type == I40E_VSI_VMDQ2) {
+			qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
+			qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
+				    I40E_QTX_CTL_VFVM_INDX_MASK;
+		} else {
+			qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+		}
 	}
 
 	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
@@ -3000,7 +3573,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
 	struct i40e_hmc_obj_rxq rx_ctx;
 	i40e_status err = 0;
 
-	ring->state = 0;
+	bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
 
 	/* clear the context structure first */
 	memset(&rx_ctx, 0, sizeof(rx_ctx));
@@ -3014,7 +3587,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
 	rx_ctx.qlen = ring->count;
 
 	/* use 32 byte descriptors */
+#ifdef I40E_32BYTE_RX
 	rx_ctx.dsize = 1;
+#else
+	/* use 16 byte descriptors */
+	rx_ctx.dsize = 0;
+#endif
 
 	/* descriptor type is always zero
 	 * rx_ctx.dtype = 0;
@@ -3022,10 +3600,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
 	rx_ctx.hsplit_0 = 0;
 
 	rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
-	if (hw->revision_id == 0)
-		rx_ctx.lrxqthresh = 0;
-	else
-		rx_ctx.lrxqthresh = 2;
+	rx_ctx.lrxqthresh = 1;
 	rx_ctx.crcstrip = 1;
 	rx_ctx.l2tsel = 1;
 	/* this controls whether VLAN is stripped from inner headers */
@@ -3099,6 +3674,17 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
 {
 	int err = 0;
 	u16 i;
+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
+	u16 max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+
+	if (!vsi->netdev)
+		max_frame = I40E_RXBUFFER_2048;
+	else if (vsi->netdev->mtu + I40E_PACKET_HDR_PAD > max_frame)
+		max_frame = vsi->netdev->mtu + I40E_PACKET_HDR_PAD;
+
+	vsi->max_frame = max_frame;
+	vsi->rx_buf_len = max_frame;
+#else /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
 
 	if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
 		vsi->max_frame = I40E_MAX_RXBUFFER;
@@ -3114,6 +3700,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
 		vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
 						       I40E_RXBUFFER_2048;
 	}
+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
 
 	/* set up individual rings */
 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
@@ -3140,6 +3727,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
 			rx_ring->dcb_tc = 0;
 			tx_ring->dcb_tc = 0;
 		}
+		return;
 	}
 
 	for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
@@ -3165,6 +3753,7 @@ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
 {
 	if (vsi->netdev)
 		i40e_set_rx_mode(vsi->netdev);
+
 }
 
 /**
@@ -3183,7 +3772,7 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
 		return;
 
-	/* Reset FDir counters as we're replaying all existing filters */
+	/* reset FDIR counters as we're replaying all existing filters */
 	pf->fd_tcp4_filter_cnt = 0;
 	pf->fd_udp4_filter_cnt = 0;
 	pf->fd_sctp4_filter_cnt = 0;
@@ -3195,6 +3784,209 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
 	}
 }
 
+/**
+ * i40e_cloud_filter_restore - Restore the switch's cloud filters
+ * @pf: Pointer to the targeted VSI
+ *
+ * This function replays the cloud filter hlist into the hw switch
+ **/
+static void i40e_cloud_filter_restore(struct i40e_pf *pf)
+{
+	struct i40e_cloud_filter *filter;
+	struct hlist_node *node;
+
+	hlist_for_each_entry_safe(filter, node,
+				  &pf->cloud_filter_list, cloud_node) {
+		i40e_add_del_cloud_filter_ex(pf, filter, true);
+	}
+}
+
+/**
+ * i40e_set_cld_element - sets cloud filter element data
+ * @filter: cloud filter rule
+ * @cld: ptr to cloud filter element data
+ *
+ * This is helper function to copy data into cloud filter element
+ **/
+static inline void
+i40e_set_cld_element(struct i40e_cloud_filter *filter,
+		     struct i40e_aqc_cloud_filters_element_data *cld)
+{
+	int i, j;
+	u32 ipa;
+
+	memset(cld, 0, sizeof(*cld));
+	ether_addr_copy(cld->outer_mac, filter->dst_mac);
+	ether_addr_copy(cld->inner_mac, filter->src_mac);
+
+	if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6) {
+		/* copy parameters from filter to cloud filters element
+		 * which are not specific to IP protos
+		 */
+		ether_addr_copy(cld->outer_mac, filter->outer_mac);
+		ether_addr_copy(cld->inner_mac, filter->inner_mac);
+		cld->inner_vlan = cpu_to_le16(ntohs(filter->inner_vlan));
+		cld->tenant_id = cpu_to_le32(filter->tenant_id);
+		return;
+	}
+
+	if (filter->n_proto == ETH_P_IPV6) {
+#define IPV6_MAX_INDEX	(ARRAY_SIZE(filter->dst_ipv6) - 1)
+		for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
+		     i++, j += 2) {
+			ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
+			memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
+		}
+	} else {
+		ipa = be32_to_cpu(filter->dst_ipv4);
+		memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
+	}
+
+	cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
+
+	/* tenant_id is not supported by FW now, once the support is enabled
+	 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
+	 */
+	if (filter->tenant_id)
+		return;
+}
+
+/* i40e_add_del_cloud_filter_ex - Add/del cloud filter using big_buf
+ * @pf: working PF
+ * @filter: cloud filter rule
+ * @add: if true, add, if false, delete
+ *
+ * Add or delete a cloud filter for a specific flow spec using big buffer.
+ * Returns 0 if the filter were successfully added.
+ **/
+int i40e_add_del_cloud_filter_ex(struct i40e_pf *pf,
+				 struct i40e_cloud_filter *filter,
+				 bool add)
+{
+	struct i40e_aqc_cloud_filters_element_bb cld_filter;
+	int ret;
+
+	if (!i40e_is_l4mode_enabled()) {
+		struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+
+		return i40e_add_del_cloud_filter(vsi, filter, add);
+	}
+
+	/* Make sure port is specified, otherwise bail out, for channel
+	 * specific cloud filter needs 'L4 port' to be non-zero
+	 */
+	if (!filter->dst_port)
+		return -EINVAL;
+
+	memset(&cld_filter, 0, sizeof(cld_filter));
+
+	/* Switch is in Mode 1, so this is an L4 port filter */
+	cld_filter.element.flags =
+	cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
+
+	/* Now copy L4 port in Byte 6..7 in general fields */
+	cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
+	be16_to_cpu(filter->dst_port);
+
+	if (add)
+		ret = i40e_aq_add_cloud_filters_bb(&pf->hw,
+						   filter->seid,
+						   &cld_filter, 1);
+	else
+		ret = i40e_aq_rem_cloud_filters_bb(&pf->hw,
+						   filter->seid,
+						   &cld_filter, 1);
+
+	if (ret)
+		dev_err(&pf->pdev->dev,
+			"fail to %s cloud filter err %d aq_err %d\n",
+			add ? "add" : "delete",
+			ret, pf->hw.aq.asq_last_status);
+
+	dev_info(&pf->pdev->dev,
+		 "%s cloud filter for VSI: %d, L4 port: %d\n",
+		 add ? "add" : "delete",
+		 filter->seid, be16_to_cpu(filter->dst_port));
+
+	return ret;
+}
+
+/**
+ * i40e_add_del_cloud_filter - Add/del cloud filter
+ * @vsi: pointer to VSI
+ * @filter: cloud filter rule
+ * @add: if true, add, if false, delete
+ *
+ * Add or delete a cloud filter for a specific flow spec.
+ * Returns 0 if the filter were successfully added.
+ **/
+int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
+			      struct i40e_cloud_filter *filter, bool add)
+{
+	struct i40e_aqc_cloud_filters_element_data cld_filter;
+	struct i40e_pf *pf = vsi->back;
+	int ret;
+	static const u16 flag_table[128] = {
+		[I40E_CLOUD_FILTER_FLAGS_OMAC]  =
+			I40E_AQC_ADD_CLOUD_FILTER_OMAC,
+		[I40E_CLOUD_FILTER_FLAGS_IMAC]  =
+			I40E_AQC_ADD_CLOUD_FILTER_IMAC,
+		[I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN]  =
+			I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
+		[I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
+			I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
+		[I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
+			I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
+		[I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
+			I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
+		[I40E_CLOUD_FILTER_FLAGS_IIP] =
+			I40E_AQC_ADD_CLOUD_FILTER_IIP,
+	};
+
+	if (filter->flags >= ARRAY_SIZE(flag_table))
+		return I40E_ERR_CONFIG;
+
+	/* copy element needed to add cloud filter from filter */
+	i40e_set_cld_element(filter, &cld_filter);
+
+	if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
+		cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
+					     I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
+
+	/* copy queue number from filter to pass to cloud filter engine
+	 * with flags for targeting traffic to specific queue
+	 */
+	if (filter->flags != I40E_CLOUD_FILTER_FLAGS_OMAC) {
+		cld_filter.flags |=
+			cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE);
+		cld_filter.queue_number = cpu_to_le16(filter->queue_id);
+	}
+
+	if (filter->n_proto == ETH_P_IPV6)
+		cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
+						I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
+	else
+		cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
+						I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
+
+	if (add)
+		ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
+						&cld_filter, 1);
+	else
+		ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
+						&cld_filter, 1);
+	if (ret)
+		dev_dbg(&pf->pdev->dev,
+			"Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
+			add ? "add" : "delete", filter->dst_port, ret,
+			pf->hw.aq.asq_last_status);
+	else
+		dev_info(&pf->pdev->dev,
+			 "%s cloud filter for VSI: %d\n",
+			 add ? "Added" : "Deleted", filter->seid);
+	return ret;
+}
+
 /**
  * i40e_vsi_configure - Set up the VSI for action
  * @vsi: the VSI being configured
@@ -3235,55 +4027,46 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
 	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
 		struct i40e_q_vector *q_vector = vsi->q_vectors[i];
 
-		q_vector->itr_countdown = ITR_COUNTDOWN_START;
-		q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
-		q_vector->rx.latency_range = I40E_LOW_LATENCY;
+		q_vector->rx.next_update = jiffies + 1;
+		q_vector->rx.target_itr =
+			ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
 		wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
-		     q_vector->rx.itr);
-		q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
-		q_vector->tx.latency_range = I40E_LOW_LATENCY;
+		     q_vector->rx.target_itr >> 1);
+		q_vector->rx.current_itr = q_vector->rx.target_itr;
+
+		q_vector->tx.next_update = jiffies + 1;
+		q_vector->tx.target_itr =
+			ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
 		wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
-		     q_vector->tx.itr);
+		     q_vector->tx.target_itr >> 1);
+		q_vector->tx.current_itr = q_vector->tx.target_itr;
+
 		wr32(hw, I40E_PFINT_RATEN(vector - 1),
 		     i40e_intrl_usec_to_reg(vsi->int_rate_limit));
 
-		/* Linked list for the queuepairs assigned to this vector */
+		/* begin of linked list for RX queue assigned to this vector */
 		wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
 		for (q = 0; q < q_vector->num_ringpairs; q++) {
 			u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
 			u32 val;
 
-			val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
-			      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
-			      (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
-			      (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
-			      (I40E_QUEUE_TYPE_TX <<
-			       I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
-
+			/* RX queue in linked list with next queue set to TX */
+			val = I40E_QINT_RQCTL_VAL(nextqp, vector, TX);
 			wr32(hw, I40E_QINT_RQCTL(qp), val);
 
 			if (has_xdp) {
-				val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
-				      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
-				      (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
-				      (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
-				      (I40E_QUEUE_TYPE_TX <<
-				       I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
-
+				/* TX queue with next queue set to TX */
+				val = I40E_QINT_TQCTL_VAL(qp, vector, TX);
 				wr32(hw, I40E_QINT_TQCTL(nextqp), val);
 			}
 
-			val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
-			      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
-			      (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
-			      ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
-			      (I40E_QUEUE_TYPE_RX <<
-			       I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+			/* TX queue with next RX or end of linked list */
+			val = I40E_QINT_TQCTL_VAL((qp + 1), vector, RX);
 
 			/* Terminate the linked list */
 			if (q == (q_vector->num_ringpairs - 1))
-				val |= (I40E_QUEUE_END_OF_LIST <<
-					I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+				val |= (I40E_QUEUE_END_OF_LIST
+					   << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
 
 			wr32(hw, I40E_QINT_TQCTL(qp), val);
 			qp++;
@@ -3295,7 +4078,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
 
 /**
  * i40e_enable_misc_int_causes - enable the non-queue interrupts
- * @hw: ptr to the hardware info
+ * @pf: pointer to private device data structure
  **/
 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
 {
@@ -3314,12 +4097,13 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
 	      I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
 	      I40E_PFINT_ICR0_ENA_VFLR_MASK          |
 	      I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
-
 	if (pf->flags & I40E_FLAG_IWARP_ENABLED)
 		val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+#ifdef HAVE_PTP_1588_CLOCK
 
 	if (pf->flags & I40E_FLAG_PTP)
 		val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+#endif /* HAVE_PTP_1588_CLOCK */
 
 	wr32(hw, I40E_PFINT_ICR0_ENA, val);
 
@@ -3341,44 +4125,36 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
 	struct i40e_q_vector *q_vector = vsi->q_vectors[0];
 	struct i40e_pf *pf = vsi->back;
 	struct i40e_hw *hw = &pf->hw;
-	u32 val;
 
 	/* set the ITR configuration */
-	q_vector->itr_countdown = ITR_COUNTDOWN_START;
-	q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
-	q_vector->rx.latency_range = I40E_LOW_LATENCY;
-	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
-	q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
-	q_vector->tx.latency_range = I40E_LOW_LATENCY;
-	wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
+	q_vector->rx.next_update = jiffies + 1;
+	q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
+	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
+	q_vector->rx.current_itr = q_vector->rx.target_itr;
+	q_vector->tx.next_update = jiffies + 1;
+	q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
+	wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
+	q_vector->tx.current_itr = q_vector->tx.target_itr;
 
 	i40e_enable_misc_int_causes(pf);
 
 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
 	wr32(hw, I40E_PFINT_LNKLST0, 0);
 
-	/* Associate the queue pair to the vector and enable the queue int */
-	val = I40E_QINT_RQCTL_CAUSE_ENA_MASK		       |
-	      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
-	      (nextqp	   << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
-	      (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
-
-	wr32(hw, I40E_QINT_RQCTL(0), val);
-
-	if (i40e_enabled_xdp_vsi(vsi)) {
-		val = I40E_QINT_TQCTL_CAUSE_ENA_MASK		     |
-		      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
-		      (I40E_QUEUE_TYPE_TX
-		       << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+	/* Associate the queue pair to the vector and enable the queue
+	 * interrupt RX queue in linked list with next queue set to TX
+	 */
+	wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX));
 
-	       wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+	if (nextqp) {
+		/* TX queue in linked list with next queue set to TX */
+		wr32(hw, I40E_QINT_TQCTL(nextqp),
+		     I40E_QINT_TQCTL_VAL(nextqp, 0, TX));
 	}
 
-	val = I40E_QINT_TQCTL_CAUSE_ENA_MASK		      |
-	      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
-	      (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
-
-	wr32(hw, I40E_QINT_TQCTL(0), val);
+	/* last TX queue so the next RX queue doesn't matter */
+	wr32(hw, I40E_QINT_TQCTL(0),
+	     I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX));
 	i40e_flush(hw);
 }
 
@@ -3398,15 +4174,14 @@ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
 /**
  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
  * @pf: board private structure
- * @clearpba: true when all pending interrupt events should be cleared
  **/
-void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
 {
 	struct i40e_hw *hw = &pf->hw;
 	u32 val;
 
 	val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
-	      (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
+	      I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
 	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
 
 	wr32(hw, I40E_PFINT_DYN_CTL0, val);
@@ -3430,6 +4205,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
 /**
  * i40e_irq_affinity_notify - Callback for affinity changes
  * @notify: context as to what irq was changed
@@ -3456,6 +4232,7 @@ static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
  * receive notifications.
  **/
 static void i40e_irq_affinity_release(struct kref *ref) {}
+#endif /* HAVE_IRQ_AFFINITY_NOTIFY */
 
 /**
  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
@@ -3464,7 +4241,7 @@ static void i40e_irq_affinity_release(struct kref *ref) {}
  *
  * Allocates MSI-X vectors and requests interrupts from the kernel.
  **/
-static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
 {
 	int q_vectors = vsi->num_q_vectors;
 	struct i40e_pf *pf = vsi->back;
@@ -3473,6 +4250,9 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
 	int tx_int_idx = 0;
 	int vector, err;
 	int irq_num;
+#ifdef HAVE_IRQ_AFFINITY_HINT
+	int cpu;
+#endif
 
 	for (vector = 0; vector < q_vectors; vector++) {
 		struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
@@ -3504,14 +4284,22 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
 			goto free_queue_irqs;
 		}
 
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
 		/* register for affinity change notifications */
 		q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
 		q_vector->affinity_notify.release = i40e_irq_affinity_release;
 		irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
-		/* get_cpu_mask returns a static constant mask with
-		 * a permanent lifetime so it's ok to use here.
+#endif
+#ifdef HAVE_IRQ_AFFINITY_HINT
+		/* Spread affinity hints out across online CPUs.
+		 *
+		 * get_cpu_mask returns a static constant mask with
+		 * a permanent lifetime so it's ok to pass to
+		 * irq_set_affinity_hint without making a copy.
 		 */
-		irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx));
+		cpu = cpumask_local_spread(q_vector->v_idx, -1);
+		irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
+#endif /* HAVE_IRQ_AFFINITY_HINT */
 	}
 
 	vsi->irqs_ready = true;
@@ -3521,8 +4309,12 @@ free_queue_irqs:
 	while (vector) {
 		vector--;
 		irq_num = pf->msix_entries[base + vector].vector;
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
 		irq_set_affinity_notifier(irq_num, NULL);
+#endif
+#ifdef HAVE_IRQ_AFFINITY_HINT
 		irq_set_affinity_hint(irq_num, NULL);
+#endif
 		free_irq(irq_num, &vsi->q_vectors[vector]);
 	}
 	return err;
@@ -3587,7 +4379,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
 		for (i = 0; i < vsi->num_q_vectors; i++)
 			i40e_irq_dynamic_enable(vsi, i);
 	} else {
-		i40e_irq_dynamic_enable_icr0(pf, true);
+		i40e_irq_dynamic_enable_icr0(pf);
 	}
 
 	i40e_flush(&pf->hw);
@@ -3595,14 +4387,20 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
 }
 
 /**
- * i40e_stop_misc_vector - Stop the vector that handles non-queue events
+ * i40e_free_misc_vector - Free the vector that handles non-queue events
  * @pf: board private structure
  **/
-static void i40e_stop_misc_vector(struct i40e_pf *pf)
+static void i40e_free_misc_vector(struct i40e_pf *pf)
 {
 	/* Disable ICR 0 */
 	wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
 	i40e_flush(&pf->hw);
+
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
+		synchronize_irq(pf->msix_entries[0].vector);
+		free_irq(pf->msix_entries[0].vector, pf);
+		clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
+	}
 }
 
 /**
@@ -3633,7 +4431,6 @@ static irqreturn_t i40e_intr(int irq, void *data)
 	if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
 	    (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
 		pf->sw_int_count++;
-
 	if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
 	    (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
 		ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
@@ -3697,15 +4494,20 @@ static irqreturn_t i40e_intr(int irq, void *data)
 			 rd32(hw, I40E_PFHMC_ERRORDATA));
 	}
 
+#ifdef HAVE_PTP_1588_CLOCK
 	if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
 		u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
 
-		if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
-			icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+		if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK)
+			schedule_work(&pf->ptp_extts0_work);
+
+		if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
 			i40e_ptp_tx_hwtstamp(pf);
-		}
+
+		icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
 	}
 
+#endif /* HAVE_PTP_1588_CLOCK */
 	/* If a critical error is pending we have no choice but to reset the
 	 * device.
 	 * Report and mask out any remaining unexpected interrupts.
@@ -3728,9 +4530,10 @@ static irqreturn_t i40e_intr(int irq, void *data)
 enable_intr:
 	/* re-enable interrupt causes */
 	wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
-	if (!test_bit(__I40E_DOWN, pf->state)) {
+	if (!test_bit(__I40E_DOWN, pf->state) ||
+	    test_bit(__I40E_RECOVERY_MODE, pf->state)) {
 		i40e_service_event_schedule(pf);
-		i40e_irq_dynamic_enable_icr0(pf, false);
+		i40e_irq_dynamic_enable_icr0(pf);
 	}
 
 	return ret;
@@ -3762,7 +4565,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		smp_rmb();
+		read_barrier_depends();
 
 		/* if the descriptor isn't done, no work yet to do */
 		if (!(eop_desc->cmd_type_offset_bsz &
@@ -3903,6 +4706,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
 		num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
 
 		q_vector->num_ringpairs = num_ringpairs;
+		q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
 
 		q_vector->rx.count = 0;
 		q_vector->tx.count = 0;
@@ -4053,8 +4857,8 @@ static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
  * @is_xdp: true if the queue is used for XDP
  * @enable: start or stop the queue
  **/
-static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
-				  bool is_xdp, bool enable)
+int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
+			   bool is_xdp, bool enable)
 {
 	int ret;
 
@@ -4096,10 +4900,10 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
 		ret = i40e_control_wait_tx_q(vsi->seid, pf,
 					     pf_q + vsi->alloc_queue_pairs,
 					     true /*is xdp*/, enable);
+
 		if (ret)
 			break;
 	}
-
 	return ret;
 }
 
@@ -4138,9 +4942,9 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
  * @pf_q: the PF queue to configure
  * @enable: start or stop the queue
  *
- * This function enables or disables a single queue. Note that any delay
- * required after the operation is expected to be handled by the caller of
- * this function.
+ * This function enables or disables a single queue. Note that
+ * any delay required after the operation is expected to be
+ * handled by the caller of this function.
  **/
 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
 {
@@ -4169,6 +4973,30 @@ static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
 	wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
 }
 
+/**
+ * i40e_control_wait_rx_q
+ * @pf: the PF structure
+ * @pf_q: queue being configured
+ * @enable: start or stop the rings
+ *
+ * This function enables or disables a single queue along with waiting
+ * for the change to finish. The caller of this function should handle
+ * the delays needed in the case of disabling queues.
+ **/
+int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
+{
+	int ret = 0;
+
+	i40e_control_rx_q(pf, pf_q, enable);
+
+	/* wait for the change to finish */
+	ret = i40e_pf_rxq_wait(pf, pf_q, enable);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
 /**
  * i40e_vsi_control_rx - Start or stop a VSI's rings
  * @vsi: the VSI being configured
@@ -4181,10 +5009,7 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
 
 	pf_q = vsi->base_queue;
 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
-		i40e_control_rx_q(pf, pf_q, enable);
-
-		/* wait for the change to finish */
-		ret = i40e_pf_rxq_wait(pf, pf_q, enable);
+		ret = i40e_control_wait_rx_q(pf, pf_q, enable);
 		if (ret) {
 			dev_info(&pf->pdev->dev,
 				 "VSI seid %d Rx ring %d %sable timeout\n",
@@ -4193,9 +5018,7 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
 		}
 	}
 
-	/* Due to HW errata, on Rx disable only, the register can indicate done
-	 * before it really is. Needs 50ms to be sure
-	 */
+	/* HW needs up to 50ms to finish RX queue disable*/
 	if (!enable)
 		mdelay(50);
 
@@ -4291,10 +5114,14 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
 			    !vsi->q_vectors[i]->num_ringpairs)
 				continue;
 
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
 			/* clear the affinity notifier in the IRQ descriptor */
 			irq_set_affinity_notifier(irq_num, NULL);
+#endif
+#ifdef HAVE_IRQ_AFFINITY_HINT
 			/* remove our suggested affinity mask for this IRQ */
 			irq_set_affinity_hint(irq_num, NULL);
+#endif
 			synchronize_irq(irq_num);
 			free_irq(irq_num, vsi->q_vectors[i]);
 
@@ -4457,11 +5284,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
 {
 	int i;
 
-	i40e_stop_misc_vector(pf);
-	if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
-		synchronize_irq(pf->msix_entries[0].vector);
-		free_irq(pf->msix_entries[0].vector, pf);
-	}
+	i40e_free_misc_vector(pf);
 
 	i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
 		      I40E_IWARP_IRQ_PILE_ID);
@@ -4487,7 +5310,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
 		struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
 
-		if (q_vector->rx.ring || q_vector->tx.ring)
+		if (q_vector->tx.ring || q_vector->rx.ring)
 			napi_enable(&q_vector->napi);
 	}
 }
@@ -4506,7 +5329,7 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
 		struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
 
-		if (q_vector->rx.ring || q_vector->tx.ring)
+		if (q_vector->tx.ring || q_vector->rx.ring)
 			napi_disable(&q_vector->napi);
 	}
 }
@@ -4524,23 +5347,27 @@ static void i40e_vsi_close(struct i40e_vsi *vsi)
 	i40e_vsi_free_tx_resources(vsi);
 	i40e_vsi_free_rx_resources(vsi);
 	vsi->current_netdev_flags = 0;
-	pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+	set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
-		pf->flags |=  I40E_FLAG_CLIENT_RESET;
+		set_bit(__I40E_CLIENT_RESET, pf->state);
 }
 
 /**
  * i40e_quiesce_vsi - Pause a given VSI
  * @vsi: the VSI being paused
  **/
-static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
+void i40e_quiesce_vsi(struct i40e_vsi *vsi)
 {
 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
 		return;
 
 	set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
 	if (vsi->netdev && netif_running(vsi->netdev))
+#ifdef HAVE_NET_DEVICE_OPS
 		vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+#else /* HAVE_NET_DEVICE_OPS */
+		vsi->netdev->stop(vsi->netdev);
+#endif /* HAVE_NET_DEVICE_OPS */
 	else
 		i40e_vsi_close(vsi);
 }
@@ -4549,13 +5376,17 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
  * i40e_unquiesce_vsi - Resume a given VSI
  * @vsi: the VSI being resumed
  **/
-static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
+void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
 {
 	if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
 		return;
 
 	if (vsi->netdev && netif_running(vsi->netdev))
+#ifdef HAVE_NET_DEVICE_OPS
 		vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
+#else /* HAVE_NET_DEVICE_OPS */
+		vsi->netdev->open(vsi->netdev);
+#endif /* HAVE_NET_DEVICE_OPS */
 	else
 		i40e_vsi_open(vsi);   /* this clears the DOWN bit */
 }
@@ -4564,7 +5395,7 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
  * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
  * @pf: the PF
  **/
-static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
+void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
 {
 	int v;
 
@@ -4578,7 +5409,7 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
  * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
  * @pf: the PF
  **/
-static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
+void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
 {
 	int v;
 
@@ -4593,7 +5424,7 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
  * @vsi: the VSI being configured
  *
  * Wait until all queues on a given VSI have been disabled.
- **/
+**/
 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
 {
 	struct i40e_pf *pf = vsi->back;
@@ -4616,6 +5447,7 @@ int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
 		/* Check and wait for the XDP Tx queue */
 		ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
 				       false);
+
 		if (ret) {
 			dev_info(&pf->pdev->dev,
 				 "VSI seid %d XDP Tx ring %d disable timeout\n",
@@ -4636,7 +5468,7 @@ wait_rx:
 	return 0;
 }
 
-#ifdef CONFIG_I40E_DCB
+#ifdef CONFIG_DCB
 /**
  * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
  * @pf: the PF
@@ -4658,106 +5490,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
 
 	return ret;
 }
-
-#endif
-
-/**
- * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
- * @q_idx: TX queue number
- * @vsi: Pointer to VSI struct
- *
- * This function checks specified queue for given VSI. Detects hung condition.
- * We proactively detect hung TX queues by checking if interrupts are disabled
- * but there are pending descriptors.  If it appears hung, attempt to recover
- * by triggering a SW interrupt.
- **/
-static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
-{
-	struct i40e_ring *tx_ring = NULL;
-	struct i40e_pf	*pf;
-	u32 val, tx_pending;
-	int i;
-
-	pf = vsi->back;
-
-	/* now that we have an index, find the tx_ring struct */
-	for (i = 0; i < vsi->num_queue_pairs; i++) {
-		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
-			if (q_idx == vsi->tx_rings[i]->queue_index) {
-				tx_ring = vsi->tx_rings[i];
-				break;
-			}
-		}
-	}
-
-	if (!tx_ring)
-		return;
-
-	/* Read interrupt register */
-	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
-		val = rd32(&pf->hw,
-			   I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
-					       tx_ring->vsi->base_vector - 1));
-	else
-		val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
-
-	tx_pending = i40e_get_tx_pending(tx_ring);
-
-	/* Interrupts are disabled and TX pending is non-zero,
-	 * trigger the SW interrupt (don't wait). Worst case
-	 * there will be one extra interrupt which may result
-	 * into not cleaning any queues because queues are cleaned.
-	 */
-	if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
-		i40e_force_wb(vsi, tx_ring->q_vector);
-}
-
-/**
- * i40e_detect_recover_hung - Function to detect and recover hung_queues
- * @pf:  pointer to PF struct
- *
- * LAN VSI has netdev and netdev has TX queues. This function is to check
- * each of those TX queues if they are hung, trigger recovery by issuing
- * SW interrupt.
- **/
-static void i40e_detect_recover_hung(struct i40e_pf *pf)
-{
-	struct net_device *netdev;
-	struct i40e_vsi *vsi;
-	unsigned int i;
-
-	/* Only for LAN VSI */
-	vsi = pf->vsi[pf->lan_vsi];
-
-	if (!vsi)
-		return;
-
-	/* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
-	if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
-	    test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
-		return;
-
-	/* Make sure type is MAIN VSI */
-	if (vsi->type != I40E_VSI_MAIN)
-		return;
-
-	netdev = vsi->netdev;
-	if (!netdev)
-		return;
-
-	/* Bail out if netif_carrier is not OK */
-	if (!netif_carrier_ok(netdev))
-		return;
-
-	/* Go thru' TX queues for netdev */
-	for (i = 0; i < netdev->num_tx_queues; i++) {
-		struct netdev_queue *q;
-
-		q = netdev_get_tx_queue(netdev, i);
-		if (q)
-			i40e_detect_recover_hung_queue(i, vsi);
-	}
-}
+#endif /* I40E_DCB */
 
 /**
  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
@@ -4849,20 +5582,45 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
 	return enabled_tc;
 }
 
+#ifdef __TC_MQPRIO_MODE_MAX
+/**
+ * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
+ * @pf: PF being queried
+ *
+ * Query the current MQPRIO configuration and return the number of
+ * traffic classes enabled.
+ **/
+static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
+{
+	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+	u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
+	u8 enabled_tc = 1, i;
+
+	for (i = 1; i < num_tc; i++)
+		enabled_tc |= BIT(i);
+	return enabled_tc;
+}
+#endif
+
 /**
  * i40e_pf_get_num_tc - Get enabled traffic classes for PF
  * @pf: PF being queried
  *
  * Return number of traffic classes enabled for the given PF
  **/
-static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
+u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
 {
 	struct i40e_hw *hw = &pf->hw;
-	u8 i, enabled_tc = 1;
+	u8 i, enabled_tc;
 	u8 num_tc = 0;
 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
 
-	/* If DCB is not enabled then always in single TC */
+#ifdef __TC_MQPRIO_MODE_MAX
+	if (pf->flags & I40E_FLAG_TC_MQPRIO)
+		return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
+#endif
+
+	/* If neither MQPRIO nor DCB is enabled, then always use single TC */
 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
 		return 1;
 
@@ -4874,7 +5632,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
 	if (pf->hw.func_caps.iscsi)
 		enabled_tc =  i40e_get_iscsi_tc_map(pf);
 	else
-		return 1; /* Only TC0 */
+		return 1;/* Only TC0 */
 
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
 		if (enabled_tc & BIT(i))
@@ -4891,7 +5649,14 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
  **/
 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
 {
-	/* If DCB is not enabled for this PF then just return default TC */
+#ifdef __TC_MQPRIO_MODE_MAX
+	if (pf->flags & I40E_FLAG_TC_MQPRIO)
+		return i40e_mqprio_get_enabled_tc(pf);
+#endif
+
+	/* If neither MQPRIO nor DCB is enabled for this PF then just return
+	 * default TC
+	 */
 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
 		return I40E_DEFAULT_TRAFFIC_CLASS;
 
@@ -4951,14 +5716,14 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
 		/* Still continuing */
 	}
 
-	vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
+	vsi->bw_limit = LE16_TO_CPU(bw_config.port_bw_limit);
 	vsi->bw_max_quanta = bw_config.max_bw;
-	tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
-		    (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
+	tc_bw_max = LE16_TO_CPU(bw_ets_config.tc_bw_max[0]) |
+		    (LE16_TO_CPU(bw_ets_config.tc_bw_max[1]) << 16);
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
 		vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
 		vsi->bw_ets_limit_credits[i] =
-					le16_to_cpu(bw_ets_config.credits[i]);
+					LE16_TO_CPU(bw_ets_config.credits[i]);
 		/* 3 bits out of 4 for each TC */
 		vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
 	}
@@ -4970,7 +5735,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
  * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
  * @vsi: the VSI being configured
  * @enabled_tc: TC bitmap
- * @bw_credits: BW shared credits per TC
+ * @bw_share: BW shared credits per TC
  *
  * Returns 0 on success, negative value on failure
  **/
@@ -4978,19 +5743,32 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
 				       u8 *bw_share)
 {
 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+	struct i40e_pf *pf = vsi->back;
 	i40e_status ret;
 	int i;
 
+#ifdef __TC_MQPRIO_MODE_MAX
+	/* There is no need to reset BW when mqprio mode is on.  */
+	if (pf->flags & I40E_FLAG_TC_MQPRIO)
+		return 0;
+	if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+		ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
+		if (ret)
+			dev_info(&pf->pdev->dev,
+				 "Failed to reset tx rate for vsi->seid %u\n",
+				 vsi->seid);
+		return ret;
+	}
+#endif
 	bw_data.tc_valid_bits = enabled_tc;
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
 		bw_data.tc_bw_credits[i] = bw_share[i];
 
-	ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
-				       NULL);
+	ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
 	if (ret) {
-		dev_info(&vsi->back->pdev->dev,
+		dev_info(&pf->pdev->dev,
 			 "AQ command Config VSI BW allocation per TC failed = %d\n",
-			 vsi->back->hw.aq.asq_last_status);
+			 pf->hw.aq.asq_last_status);
 		return -EINVAL;
 	}
 
@@ -5043,6 +5821,9 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
 					vsi->tc_config.tc_info[i].qoffset);
 	}
 
+	if (pf->flags & I40E_FLAG_TC_MQPRIO)
+		return;
+
 	/* Assign UP2TC map for the VSI */
 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
 		/* Get the actual TC# for the UP */
@@ -5085,16 +5866,23 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
  * It is expected that the VSI queues have been quisced before calling
  * this function.
  **/
-static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
 {
 	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
 	struct i40e_vsi_context ctxt;
 	int ret = 0;
 	int i;
-
+#ifdef __TC_MQPRIO_MODE_MAX
 	/* Check if enabled_tc is same as existing or new TCs */
+	if (vsi->tc_config.enabled_tc == enabled_tc &&
+	    vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
+		return ret;
+#else
 	if (vsi->tc_config.enabled_tc == enabled_tc)
 		return ret;
+#endif
 
 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -5104,19 +5892,74 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
 
 	ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
 	if (ret) {
-		dev_info(&vsi->back->pdev->dev,
+		struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
+
+		dev_info(&pf->pdev->dev,
 			 "Failed configuring TC map %d for VSI %d\n",
 			 enabled_tc, vsi->seid);
-		goto out;
-	}
+		ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
+						  &bw_config, NULL);
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "Failed querying vsi bw info, err %s aq_err %s\n",
+				 i40e_stat_str(hw, ret),
+				 i40e_aq_str(hw, hw->aq.asq_last_status));
+			goto out;
+		}
+		if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
+			u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
 
-	/* Update Queue Pairs Mapping for currently enabled UPs */
+			if (!valid_tc)
+				valid_tc = bw_config.tc_valid_bits;
+			/* Always enable TC0, no matter what */
+			valid_tc |= 1;
+			dev_info(&pf->pdev->dev,
+				 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
+				 enabled_tc, bw_config.tc_valid_bits, valid_tc);
+			enabled_tc = valid_tc;
+		}
+
+		ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
+		if (ret) {
+			dev_err(&pf->pdev->dev,
+				"Unable to  configure TC map %d for VSI %d\n",
+				enabled_tc, vsi->seid);
+			goto out;
+		}
+	}
+
+	/* Update Queue Pairs Mapping for currently enabled UPs */
 	ctxt.seid = vsi->seid;
 	ctxt.pf_num = vsi->back->hw.pf_id;
 	ctxt.vf_num = 0;
 	ctxt.uplink_seid = vsi->uplink_seid;
 	ctxt.info = vsi->info;
+
+#ifdef __TC_MQPRIO_MODE_MAX
+	if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
+		ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
+		if (ret)
+			goto out;
+	} else {
+		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+	}
+	/* On destroying the qdisc, reset vsi->rss_size, as number of enabled
+	 * queues changed.
+	 */
+	if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
+		vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
+				      vsi->num_queue_pairs);
+		ret = i40e_vsi_config_rss(vsi);
+		if (ret) {
+			dev_info(&vsi->back->pdev->dev,
+				 "Failed to reconfig rss for num_queues\n");
+			return ret;
+		}
+		vsi->reconfig_rss = false;
+	}
+#else
 	i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+#endif
 
 	if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
 		ctxt.info.valid_sections |=
@@ -5124,14 +5967,15 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
 	}
 
-	/* Update the VSI after updating the VSI queue-mapping information */
-	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	/* Update the VSI after updating the VSI queue-mapping
+	 * information
+	 */
+	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
 	if (ret) {
-		dev_info(&vsi->back->pdev->dev,
+		dev_info(&pf->pdev->dev,
 			 "Update vsi tc config failed, err %s aq_err %s\n",
-			 i40e_stat_str(&vsi->back->hw, ret),
-			 i40e_aq_str(&vsi->back->hw,
-				     vsi->back->hw.aq.asq_last_status));
+			 i40e_stat_str(hw, ret),
+			 i40e_aq_str(hw, hw->aq.asq_last_status));
 		goto out;
 	}
 	/* update the local VSI info with updated queue map */
@@ -5141,11 +5985,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
 	/* Update current VSI BW information */
 	ret = i40e_vsi_get_bw_info(vsi);
 	if (ret) {
-		dev_info(&vsi->back->pdev->dev,
+		dev_info(&pf->pdev->dev,
 			 "Failed updating vsi bw info, err %s aq_err %s\n",
-			 i40e_stat_str(&vsi->back->hw, ret),
-			 i40e_aq_str(&vsi->back->hw,
-				     vsi->back->hw.aq.asq_last_status));
+			 i40e_stat_str(hw, ret),
+			 i40e_aq_str(hw, hw->aq.asq_last_status));
 		goto out;
 	}
 
@@ -5205,7 +6048,7 @@ out:
 	return ret;
 }
 
-#ifdef CONFIG_I40E_DCB
+#ifdef CONFIG_DCB
 /**
  * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
  * @pf: PF struct
@@ -5256,8 +6099,10 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
 		} else {
 			/* Re-configure VSI vectors based on updated TC map */
 			i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
+#ifdef HAVE_DCBNL_IEEE
 			if (pf->vsi[v]->netdev)
 				i40e_dcbnl_set_all(pf->vsi[v]);
+#endif /* HAVE_DCBNL_IEEE */
 		}
 	}
 }
@@ -5300,12 +6145,17 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
 	struct i40e_hw *hw = &pf->hw;
 	int err = 0;
 
-	/* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
-	if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT)
+	/* Do not enable DCB for SW1 and SW2 images even if the FW is capable
+	 * Also do not enable DCBx if FW LLDP agent is disabled
+	 */
+	if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
+	    (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) {
+		dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n");
+		err = I40E_NOT_SUPPORTED;
 		goto out;
+	}
 
-	/* Get the initial DCB configuration */
-	err = i40e_init_dcb(hw);
+	err = i40e_init_dcb(hw, true);
 	if (!err) {
 		/* Device/Function is not DCBX capable */
 		if ((!hw->func_caps.dcb) ||
@@ -5319,7 +6169,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
 
 			pf->flags |= I40E_FLAG_DCB_CAPABLE;
 			/* Enable DCB tagging only when more than one TC
-			 * or explicitly disable if only one TC
+			 * or explicity disable if only one TC
 			 */
 			if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
 				pf->flags |= I40E_FLAG_DCB_ENABLED;
@@ -5328,6 +6178,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
 			dev_dbg(&pf->pdev->dev,
 				"DCBX offload is supported for this PF.\n");
 		}
+	} else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
+		dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
+		pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
 	} else {
 		dev_info(&pf->pdev->dev,
 			 "Query for DCB configuration failed, err %s aq_err %s\n",
@@ -5338,28 +6191,36 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
 out:
 	return err;
 }
-#endif /* CONFIG_I40E_DCB */
+
+#endif /* CONFIG_DCB */
 #define SPEED_SIZE 14
 #define FC_SIZE 8
 /**
  * i40e_print_link_message - print link up or down
  * @vsi: the VSI for which link needs a message
+ * @isup: true of link is up, false otherwise
  */
 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 {
 	enum i40e_aq_link_speed new_speed;
-	char *speed = "Unknown";
+	struct i40e_pf *pf = vsi->back;
+	char *speed = "Unknown ";
 	char *fc = "Unknown";
 	char *fec = "";
 	char *req_fec = "";
 	char *an = "";
 
-	new_speed = vsi->back->hw.phy.link_info.link_speed;
+	if (isup)
+		new_speed = pf->hw.phy.link_info.link_speed;
+	else
+		new_speed = I40E_LINK_SPEED_UNKNOWN;
 
 	if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
 		return;
+
 	vsi->current_isup = isup;
 	vsi->current_speed = new_speed;
+
 	if (!isup) {
 		netdev_info(vsi->netdev, "NIC Link is Down\n");
 		return;
@@ -5368,13 +6229,13 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 	/* Warn user if link speed on NPAR enabled partition is not at
 	 * least 10GB
 	 */
-	if (vsi->back->hw.func_caps.npar_enable &&
-	    (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
-	     vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
+	if (pf->hw.func_caps.npar_enable &&
+	    (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
+	     pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
 		netdev_warn(vsi->netdev,
 			    "The partition detected link speed that is less than 10Gbps\n");
 
-	switch (vsi->back->hw.phy.link_info.link_speed) {
+	switch (pf->hw.phy.link_info.link_speed) {
 	case I40E_LINK_SPEED_40GB:
 		speed = "40 G";
 		break;
@@ -5387,6 +6248,12 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 	case I40E_LINK_SPEED_10GB:
 		speed = "10 G";
 		break;
+	case I40E_LINK_SPEED_5GB:
+		speed = "5 G";
+		break;
+	case I40E_LINK_SPEED_2_5GB:
+		speed = "2.5 G";
+		break;
 	case I40E_LINK_SPEED_1GB:
 		speed = "1000 M";
 		break;
@@ -5397,7 +6264,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 		break;
 	}
 
-	switch (vsi->back->hw.fc.current_mode) {
+	switch (pf->hw.fc.current_mode) {
 	case I40E_FC_FULL:
 		fc = "RX/TX";
 		break;
@@ -5412,36 +6279,60 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 		break;
 	}
 
-	if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
-		req_fec = ", Requested FEC: None";
-		fec = ", FEC: None";
-		an = ", Autoneg: False";
+	if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
+		req_fec = "None";
+		fec = "None";
+		an = "False";
 
-		if (vsi->back->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
-			an = ", Autoneg: True";
+		if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
+			an = "True";
 
-		if (vsi->back->hw.phy.link_info.fec_info &
+		if (pf->hw.phy.link_info.fec_info &
 		    I40E_AQ_CONFIG_FEC_KR_ENA)
-			fec = ", FEC: CL74 FC-FEC/BASE-R";
-		else if (vsi->back->hw.phy.link_info.fec_info &
+			fec = "CL74 FC-FEC/BASE-R";
+		else if (pf->hw.phy.link_info.fec_info &
 			 I40E_AQ_CONFIG_FEC_RS_ENA)
-			fec = ", FEC: CL108 RS-FEC";
+			fec = "CL108 RS-FEC";
 
 		/* 'CL108 RS-FEC' should be displayed when RS is requested, or
 		 * both RS and FC are requested
 		 */
-		if (vsi->back->hw.phy.link_info.req_fec_info &
+		if (pf->hw.phy.link_info.req_fec_info &
 		    (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
-			if (vsi->back->hw.phy.link_info.req_fec_info &
+			if (pf->hw.phy.link_info.req_fec_info &
 			    I40E_AQ_REQUEST_FEC_RS)
-				req_fec = ", Requested FEC: CL108 RS-FEC";
+				req_fec = "CL108 RS-FEC";
 			else
-				req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
+				req_fec = "CL74 FC-FEC/BASE-R";
 		}
+		netdev_info(vsi->netdev,
+			    "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
+			    speed, req_fec, fec, an, fc);
+	} else {
+		struct ethtool_eee edata;
+
+		edata.supported = 0;
+		edata.eee_enabled = false;
+#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+		if (get_ethtool_ops_ext(vsi->netdev)->get_eee)
+			get_ethtool_ops_ext(vsi->netdev)
+				->get_eee(vsi->netdev, &edata);
+#else
+		if (vsi->netdev->ethtool_ops->get_eee)
+			vsi->netdev->ethtool_ops->get_eee(vsi->netdev, &edata);
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+
+		if (edata.supported)
+			netdev_info(vsi->netdev,
+				    "NIC Link is Up, %sbps Full Duplex, Flow Control: %s, EEE: %s\n",
+				    speed, fc,
+				    edata.eee_enabled ? "Enabled" : "Disabled");
+		else
+			netdev_info(vsi->netdev,
+				    "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
+				    speed, fc);
 	}
 
-	netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
-		    speed, req_fec, fec, an, fc);
 }
 
 /**
@@ -5472,29 +6363,20 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
 		i40e_print_link_message(vsi, true);
 		netif_tx_start_all_queues(vsi->netdev);
 		netif_carrier_on(vsi->netdev);
-	} else if (vsi->netdev) {
-		i40e_print_link_message(vsi, false);
-		/* need to check for qualified module here*/
-		if ((pf->hw.phy.link_info.link_info &
-			I40E_AQ_MEDIA_AVAILABLE) &&
-		    (!(pf->hw.phy.link_info.an_info &
-			I40E_AQ_QUALIFIED_MODULE)))
-			netdev_err(vsi->netdev,
-				   "the driver failed to link because an unqualified module was detected.");
-	}
-
-	/* replay FDIR SB filters */
+	}
+
+	/* replay flow filters */
 	if (vsi->type == I40E_VSI_FDIR) {
 		/* reset fd counters */
-		pf->fd_add_err = 0;
-		pf->fd_atr_cnt = 0;
+		pf->fd_add_err = pf->fd_atr_cnt = 0;
 		i40e_fdir_filter_restore(vsi);
+		i40e_cloud_filter_restore(pf);
 	}
 
 	/* On the next run of the service_task, notify any clients of the new
 	 * opened netdev
 	 */
-	pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+	set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
 	i40e_service_event_schedule(pf);
 
 	return 0;
@@ -5515,7 +6397,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
 	while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
 		usleep_range(1000, 2000);
 	i40e_down(vsi);
-
 	i40e_up(vsi);
 	clear_bit(__I40E_CONFIG_BUSY, pf->state);
 }
@@ -5528,6 +6409,11 @@ int i40e_up(struct i40e_vsi *vsi)
 {
 	int err;
 
+	if ((vsi->type == I40E_VSI_MAIN &&
+	     vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) ||
+	    (vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN))
+		i40e_force_link_state(vsi->back, true);
+
 	err = i40e_vsi_configure(vsi);
 	if (!err)
 		err = i40e_up_complete(vsi);
@@ -5535,6 +6421,111 @@ int i40e_up(struct i40e_vsi *vsi)
 	return err;
 }
 
+/**
+ * i40e_force_link_state - Force the link status
+ * @pf: board private structure
+ * @is_up: whether the link state should be forced up or down
+ **/
+static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+{
+	struct i40e_aq_get_phy_abilities_resp abilities;
+	struct i40e_aq_set_phy_config config = {0};
+	struct i40e_hw *hw = &pf->hw;
+	bool non_zero_phy_type;
+	i40e_status err;
+	u64 mask;
+	u8 speed;
+
+	non_zero_phy_type = is_up;
+	/* Card might've been put in an unstable state by other drivers
+	 * and applications, which causes incorrect speed values being
+	 * set on startup. In order to clear speed registers, we call
+	 * get_phy_capabilities twice, once to get initial state of
+	 * available speeds, and once to get current phy config.
+	 */
+	err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
+					   NULL);
+	if (err) {
+		dev_err(&pf->pdev->dev,
+			"failed to get phy cap., ret =  %s last_status =  %s\n",
+			i40e_stat_str(hw, err),
+			i40e_aq_str(hw, hw->aq.asq_last_status));
+		return err;
+	}
+	speed = abilities.link_speed;
+
+	/* Get the current phy config */
+	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+					   NULL);
+	if (err) {
+		dev_err(&pf->pdev->dev,
+			"failed to get phy cap., ret =  %s last_status =  %s\n",
+			i40e_stat_str(hw, err),
+			i40e_aq_str(hw, hw->aq.asq_last_status));
+		return err;
+	}
+
+	/* If link needs to go up, but was not forced to go down,
+	 * and its speed values are OK, no need for a flap
+	 * if non_zero_phy_type was set, still need to force up
+	 */
+	if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN)
+		non_zero_phy_type = true;
+	else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
+		return I40E_SUCCESS;
+
+	/* To force link we need to set bits for all supported PHY types,
+	 * but there are now more than 32, so we need to split the bitmap
+	 * across two fields.
+	 */
+	mask = I40E_PHY_TYPES_BITMASK;
+	config.phy_type =
+		non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
+	config.phy_type_ext =
+		non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
+	/* Copy the old settings, except of phy_type */
+	config.abilities = abilities.abilities;
+	if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN) {
+		if (is_up)
+			config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
+		else
+			config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
+	}
+	if (abilities.link_speed != 0)
+		config.link_speed = abilities.link_speed;
+	else
+		config.link_speed = speed;
+	config.eee_capability = abilities.eee_capability;
+	config.eeer = abilities.eeer_val;
+	config.low_power_ctrl = abilities.d3_lpan;
+	config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+			    I40E_AQ_PHY_FEC_CONFIG_MASK;
+	err = i40e_aq_set_phy_config(hw, &config, NULL);
+
+	if (err) {
+		dev_err(&pf->pdev->dev,
+			"set phy config ret =  %s last_status =  %s\n",
+			i40e_stat_str(&pf->hw, err),
+			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+		return err;
+	}
+
+	/* Update the link info */
+	err = i40e_update_link_info(hw);
+	if (err) {
+		/* Wait a little bit (on 40G cards it sometimes takes a really
+		 * long time for link to come back from the atomic reset)
+		 * and try once more
+		 */
+		msleep(1000);
+		i40e_update_link_info(hw);
+	}
+
+	i40e_aq_set_link_restart_an(hw, is_up, NULL);
+
+	return I40E_SUCCESS;
+}
+
 /**
  * i40e_down - Shutdown the connection processing
  * @vsi: the VSI being stopped
@@ -5552,6 +6543,10 @@ void i40e_down(struct i40e_vsi *vsi)
 	}
 	i40e_vsi_disable_irq(vsi);
 	i40e_vsi_stop_rings(vsi);
+	if ((vsi->type == I40E_VSI_MAIN &&
+	     vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) ||
+	    (vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN))
+		i40e_force_link_state(vsi->back, false);
 	i40e_napi_disable_all(vsi);
 
 	for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -5564,3795 +6559,6555 @@ void i40e_down(struct i40e_vsi *vsi)
 }
 
 /**
- * i40e_setup_tc - configure multiple traffic classes
- * @netdev: net device to configure
- * @tc: number of traffic classes to enable
+ * i40e_get_link_speed - Returns link speed for the interface
+ * @vsi: VSI to be configured
+ *
  **/
-static int i40e_setup_tc(struct net_device *netdev, u8 tc)
+int i40e_get_link_speed(struct i40e_vsi *vsi)
 {
-	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_vsi *vsi = np->vsi;
 	struct i40e_pf *pf = vsi->back;
-	u8 enabled_tc = 0;
-	int ret = -EINVAL;
-	int i;
 
-	/* Check if DCB enabled to continue */
-	if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
-		netdev_info(netdev, "DCB is not enabled for adapter\n");
-		goto exit;
+	switch (pf->hw.phy.link_info.link_speed) {
+	case I40E_LINK_SPEED_40GB:
+		return 40000;
+	case I40E_LINK_SPEED_25GB:
+		return 25000;
+	case I40E_LINK_SPEED_20GB:
+		return 20000;
+	case I40E_LINK_SPEED_10GB:
+		return 10000;
+	case I40E_LINK_SPEED_1GB:
+		return 1000;
+	default:
+		return -EINVAL;
 	}
+}
 
-	/* Check if MFP enabled */
-	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
-		netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
-		goto exit;
-	}
+/**
+ * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
+ * @vsi: VSI to be configured
+ * @seid: seid of the channel/VSI
+ * @max_tx_rate: max TX rate to be configured as BW limit
+ *
+ * Helper function to set BW limit for a given VSI
+ **/
+int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
+{
+	struct i40e_pf *pf = vsi->back;
+	int speed = 0;
+	int ret = 0;
 
-	/* Check whether tc count is within enabled limit */
-	if (tc > i40e_pf_get_num_tc(pf)) {
-		netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
-		goto exit;
+	speed = i40e_get_link_speed(vsi);
+	if (max_tx_rate > speed) {
+		dev_err(&pf->pdev->dev,
+			"Invalid max tx rate %llu specified for VSI seid %d.",
+			max_tx_rate, seid);
+		return -EINVAL;
+	}
+	if (max_tx_rate && max_tx_rate < 50) {
+		dev_warn(&pf->pdev->dev,
+			 "Setting max tx rate to minimum usable value of 50Mbps.\n");
+		max_tx_rate = 50;
 	}
 
-	/* Generate TC map for number of tc requested */
-	for (i = 0; i < tc; i++)
-		enabled_tc |= BIT(i);
+	/* Tx rate credits are in values of 50Mbps, 0 is disabled */
+	ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid,
+					  max_tx_rate / I40E_BW_CREDIT_DIVISOR,
+					  I40E_MAX_BW_INACTIVE_ACCUM, NULL);
+	if (ret)
+		dev_err(&pf->pdev->dev,
+			"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
+			max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
+			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+	return ret;
+}
 
-	/* Requesting same TC configuration as already enabled */
-	if (enabled_tc == vsi->tc_config.enabled_tc)
-		return 0;
+#ifdef __TC_MQPRIO_MODE_MAX
+/**
+ * i40e_validate_and_set_switch_mode - sets up switch mode correctly
+ * @vsi: ptr to VSI which has PF backing
+ *
+ * Sets up switch mode correctly if it needs to be changed and perform
+ * what are allowed modes.
+ **/
+static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
+{
+	u8 mode;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	int ret;
 
-	/* Quiesce VSI queues */
-	i40e_quiesce_vsi(vsi);
+	ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
+	if (ret)
+		return -EINVAL;
 
-	/* Configure VSI for enabled TCs */
-	ret = i40e_vsi_config_tc(vsi, enabled_tc);
-	if (ret) {
-		netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
-			    vsi->seid);
-		goto exit;
+	if (hw->dev_caps.switch_mode) {
+		/* if switch mode is set, support mode2 (non-tunneled for
+		 * cloud filter) for now
+		 */
+		u32 switch_mode = hw->dev_caps.switch_mode &
+				  I40E_SWITCH_MODE_MASK;
+		if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
+			if (switch_mode == I40E_CLOUD_FILTER_MODE2)
+				return 0;
+			dev_err(&pf->pdev->dev,
+				"Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
+				hw->dev_caps.switch_mode);
+			return -EINVAL;
+		}
 	}
 
-	/* Unquiesce VSI */
-	i40e_unquiesce_vsi(vsi);
-
-exit:
-	return ret;
-}
+	/* Set Bit 7 to be valid */
+	mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
 
-static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
-			   void *type_data)
-{
-	struct tc_mqprio_qopt *mqprio = type_data;
+	/* Set L4type for TCP support */
+	mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
 
-	if (type != TC_SETUP_MQPRIO)
-		return -EOPNOTSUPP;
+	/* Set cloud filter mode */
+	mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
 
-	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+	/* Prep mode field for set_switch_config */
+	ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
+					pf->last_sw_conf_valid_flags,
+					mode, NULL);
+	if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
+		dev_err(&pf->pdev->dev,
+			"couldn't set switch config bits, err %s aq_err %s\n",
+			i40e_stat_str(hw, ret),
+			i40e_aq_str(hw,
+				    hw->aq.asq_last_status));
 
-	return i40e_setup_tc(netdev, mqprio->num_tc);
+	return ret;
 }
 
 /**
- * i40e_open - Called when a network interface is made active
- * @netdev: network interface device structure
- *
- * The open entry point is called when a network interface is made
- * active by the system (IFF_UP).  At this point all resources needed
- * for transmit and receive operations are allocated, the interrupt
- * handler is registered with the OS, the netdev watchdog subtask is
- * enabled, and the stack is notified that the interface is ready.
+ * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
+ * @vsi: pointer to VSI
+ * @filter: cloud filter rule
+ * @add: if true, add, if false, delete
  *
- * Returns 0 on success, negative value on failure
+ * Add or delete a cloud filter for a specific flow spec using big buffer.
+ * Returns 0 if the filter were successfully added.
  **/
-int i40e_open(struct net_device *netdev)
+int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+				      struct i40e_cloud_filter *filter,
+				      bool add)
 {
-	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_aqc_cloud_filters_element_bb cld_filter;
 	struct i40e_pf *pf = vsi->back;
-	int err;
+	int ret;
 
-	/* disallow open during test or if eeprom is broken */
-	if (test_bit(__I40E_TESTING, pf->state) ||
-	    test_bit(__I40E_BAD_EEPROM, pf->state))
-		return -EBUSY;
+	if (i40e_is_l4mode_enabled()) {
+		dev_err(&pf->pdev->dev,
+			"Not expected to run in L4 cloud filter mode\n");
+		return -EINVAL;
+	}
 
-	netif_carrier_off(netdev);
+	/* Both (src/dst) valid mac_addr are not supported */
+	if ((is_valid_ether_addr(filter->dst_mac) &&
+	     is_valid_ether_addr(filter->src_mac)) ||
+	    (is_multicast_ether_addr(filter->dst_mac) &&
+	     is_multicast_ether_addr(filter->src_mac)))
+		return -EOPNOTSUPP;
 
-	err = i40e_vsi_open(vsi);
-	if (err)
-		return err;
+	/* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
+	 * ports are not supported via big buffer now.
+	 */
+	if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
+		return -EOPNOTSUPP;
 
-	/* configure global TSO hardware offload settings */
-	wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
-						       TCP_FLAG_FIN) >> 16);
-	wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
-						       TCP_FLAG_FIN |
-						       TCP_FLAG_CWR) >> 16);
-	wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
+	/* adding filter using src_port/src_ip is not supported at this stage */
+	if (filter->src_port || filter->src_ipv4 ||
+	    !ipv6_addr_any(&filter->ip.v6.src_ip6))
+		return -EOPNOTSUPP;
 
-	udp_tunnel_get_rx_info(netdev);
+	/* copy element needed to add cloud filter from filter */
+	i40e_set_cld_element(filter, &cld_filter.element);
 
-	return 0;
+	if (is_valid_ether_addr(filter->dst_mac) ||
+	    is_valid_ether_addr(filter->src_mac) ||
+	    is_multicast_ether_addr(filter->dst_mac) ||
+	    is_multicast_ether_addr(filter->src_mac)) {
+		/* MAC + IP : unsupported mode */
+		if (filter->dst_ipv4)
+			return -EOPNOTSUPP;
+
+		/* since we validated that L4 port must be valid before
+		 * we get here, start with respective "flags" value
+		 * and update if vlan is present or not
+		 */
+		cld_filter.element.flags =
+			cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
+
+		if (filter->vlan_id) {
+			cld_filter.element.flags =
+			cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
+		}
+
+	} else if (filter->dst_ipv4 ||
+		   !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
+		cld_filter.element.flags =
+				cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
+		if (filter->n_proto == ETH_P_IPV6)
+			cld_filter.element.flags |=
+				cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
+		else
+			cld_filter.element.flags |=
+				cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
+	} else {
+		dev_err(&pf->pdev->dev,
+			"either mac or ip has to be valid for cloud filter\n");
+		return -EINVAL;
+	}
+
+	/* Now copy L4 port in Byte 6..7 in general fields */
+	cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
+						be16_to_cpu(filter->dst_port);
+
+	if (add) {
+		/* Validate current device switch mode, change if necessary */
+		ret = i40e_validate_and_set_switch_mode(vsi);
+		if (ret) {
+			dev_err(&pf->pdev->dev,
+				"failed to set switch mode, ret %d\n",
+				ret);
+			return ret;
+		}
+
+		ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
+						   &cld_filter, 1);
+	} else {
+		ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
+						   &cld_filter, 1);
+	}
+
+	if (ret)
+		dev_dbg(&pf->pdev->dev,
+			"Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
+			add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
+	else
+		dev_info(&pf->pdev->dev,
+			 "%s cloud filter for VSI: %d, L4 port: %d\n",
+			 add ? "add" : "delete", filter->seid,
+			 ntohs(filter->dst_port));
+	return ret;
 }
 
 /**
- * i40e_vsi_open -
- * @vsi: the VSI to open
- *
- * Finish initialization of the VSI.
- *
- * Returns 0 on success, negative value on failure
+ * i40e_remove_queue_channels - Remove queue channels for the TCs
+ * @vsi: VSI to be configured
  *
- * Note: expects to be called while under rtnl_lock()
+ * Remove queue channels for the TCs
  **/
-int i40e_vsi_open(struct i40e_vsi *vsi)
+static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
 {
+	enum i40e_admin_queue_err last_aq_status;
+	struct i40e_cloud_filter *cfilter;
+	struct i40e_channel *ch, *ch_tmp;
 	struct i40e_pf *pf = vsi->back;
-	char int_name[I40E_INT_NAME_STR_LEN];
-	int err;
+	struct hlist_node *node;
+	int ret, i;
 
-	/* allocate descriptors */
-	err = i40e_vsi_setup_tx_resources(vsi);
-	if (err)
-		goto err_setup_tx;
-	err = i40e_vsi_setup_rx_resources(vsi);
-	if (err)
-		goto err_setup_rx;
+	/* Reset rss size that was stored when reconfiguring rss for
+	 * channel VSIs with non-power-of-2 queue count.
+	 */
+	vsi->current_rss_size = 0;
 
-	err = i40e_vsi_configure(vsi);
-	if (err)
-		goto err_setup_rx;
+	/* perform cleanup for channels if they exist */
+	if (list_empty(&vsi->ch_list))
+		return;
 
-	if (vsi->netdev) {
-		snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
-			 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
-		err = i40e_vsi_request_irq(vsi, int_name);
-		if (err)
-			goto err_setup_rx;
+	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
+		struct i40e_vsi *p_vsi;
 
-		/* Notify the stack of the actual queue counts. */
-		err = netif_set_real_num_tx_queues(vsi->netdev,
-						   vsi->num_queue_pairs);
-		if (err)
-			goto err_set_queues;
+		list_del(&ch->list);
+		p_vsi = ch->parent_vsi;
+		if (!p_vsi || !ch->initialized) {
+			kfree(ch);
+			continue;
+		}
+		/* Reset queue contexts */
+		for (i = 0; i < ch->num_queue_pairs; i++) {
+			struct i40e_ring *tx_ring, *rx_ring;
+			u16 pf_q;
 
-		err = netif_set_real_num_rx_queues(vsi->netdev,
-						   vsi->num_queue_pairs);
-		if (err)
-			goto err_set_queues;
+			pf_q = ch->base_queue + i;
+			tx_ring = vsi->tx_rings[pf_q];
+			tx_ring->ch = NULL;
 
-	} else if (vsi->type == I40E_VSI_FDIR) {
-		snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
-			 dev_driver_string(&pf->pdev->dev),
-			 dev_name(&pf->pdev->dev));
-		err = i40e_vsi_request_irq(vsi, int_name);
+			rx_ring = vsi->rx_rings[pf_q];
+			rx_ring->ch = NULL;
+		}
 
-	} else {
-		err = -EINVAL;
-		goto err_setup_rx;
-	}
+		/* Reset BW configured for this VSI via mqprio */
+		ret = i40e_set_bw_limit(vsi, ch->seid, 0);
+		if (ret)
+			dev_info(&vsi->back->pdev->dev,
+				 "Failed to reset tx rate for ch->seid %u\n",
+				 ch->seid);
 
-	err = i40e_up_complete(vsi);
-	if (err)
-		goto err_up_complete;
+		/* delete cloud filters associated with this channel */
+		hlist_for_each_entry_safe(cfilter, node,
+					  &pf->cloud_filter_list, cloud_node) {
+			if (cfilter->seid != ch->seid)
+				continue;
 
-	return 0;
+			hash_del(&cfilter->cloud_node);
+			if (cfilter->dst_port)
+				ret = i40e_add_del_cloud_filter_big_buf(vsi,
+									cfilter,
+									false);
+			else
+				ret = i40e_add_del_cloud_filter(vsi, cfilter,
+								false);
+			last_aq_status = pf->hw.aq.asq_last_status;
+			if (ret)
+				dev_info(&pf->pdev->dev,
+					 "Failed to delete cloud filter, err %s aq_err %s\n",
+					 i40e_stat_str(&pf->hw, ret),
+					 i40e_aq_str(&pf->hw, last_aq_status));
+			kfree(cfilter);
+		}
 
-err_up_complete:
-	i40e_down(vsi);
-err_set_queues:
-	i40e_vsi_free_irq(vsi);
-err_setup_rx:
-	i40e_vsi_free_rx_resources(vsi);
-err_setup_tx:
-	i40e_vsi_free_tx_resources(vsi);
-	if (vsi == pf->vsi[pf->lan_vsi])
-		i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
+		/* delete cloud filters associated with this channel */
+		hlist_for_each_entry_safe(cfilter, node,
+					  &pf->cloud_filter_list, cloud_node) {
+			if (cfilter->seid != ch->seid)
+				continue;
 
-	return err;
+			hash_del(&cfilter->cloud_node);
+			if (cfilter->dst_port)
+				ret = i40e_add_del_cloud_filter_big_buf(vsi,
+									cfilter,
+									false);
+			else
+				ret = i40e_add_del_cloud_filter(vsi, cfilter,
+								false);
+			last_aq_status = pf->hw.aq.asq_last_status;
+			if (ret)
+				dev_info(&pf->pdev->dev,
+					 "Failed to delete cloud filter, err %s aq_err %s\n",
+					 i40e_stat_str(&pf->hw, ret),
+					 i40e_aq_str(&pf->hw, last_aq_status));
+			kfree(cfilter);
+		}
+
+		/* delete VSI from FW */
+		ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
+					     NULL);
+		if (ret)
+			dev_err(&vsi->back->pdev->dev,
+				"unable to remove channel (%d) for parent VSI(%d)\n",
+				ch->seid, p_vsi->seid);
+		kfree(ch);
+	}
+	INIT_LIST_HEAD(&vsi->ch_list);
 }
 
 /**
- * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
- * @pf: Pointer to PF
+ * i40e_get_max_queues_for_channel
+ * @vsi: ptr to VSI to which channels are associated with
  *
- * This function destroys the hlist where all the Flow Director
- * filters were saved.
+ * Helper function which returns max value among the queue counts set on the
+ * channels/TCs created.
  **/
-static void i40e_fdir_filter_exit(struct i40e_pf *pf)
+static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
 {
-	struct i40e_fdir_filter *filter;
-	struct i40e_flex_pit *pit_entry, *tmp;
-	struct hlist_node *node2;
+	struct i40e_channel *ch, *ch_tmp;
+	int max = 0;
 
-	hlist_for_each_entry_safe(filter, node2,
-				  &pf->fdir_filter_list, fdir_node) {
-		hlist_del(&filter->fdir_node);
-		kfree(filter);
+	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
+		if (!ch->initialized)
+			continue;
+		if (ch->num_queue_pairs > max)
+			max = ch->num_queue_pairs;
 	}
 
-	list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
-		list_del(&pit_entry->list);
-		kfree(pit_entry);
+	return max;
+}
+
+/**
+ * i40e_validate_num_queues - validate num_queues w.r.t channel
+ * @pf: ptr to PF device
+ * @num_queues: number of queues
+ * @vsi: the parent VSI
+ * @reconfig_rss: indicates should the RSS be reconfigured or not
+ *
+ * This function validates number of queues in the context of new channel
+ * which is being established and determines if RSS should be reconfigured
+ * or not for parent VSI.
+ **/
+static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
+				    struct i40e_vsi *vsi, bool *reconfig_rss)
+{
+	int max_ch_queues;
+
+	if (!reconfig_rss)
+		return -EINVAL;
+
+	*reconfig_rss = false;
+	if (vsi->current_rss_size) {
+		if (num_queues > vsi->current_rss_size) {
+			dev_dbg(&pf->pdev->dev,
+				"Error: num_queues (%d) > vsi's current_size(%d)\n",
+				num_queues, vsi->current_rss_size);
+			return -EINVAL;
+		} else if ((num_queues < vsi->current_rss_size) &&
+			   (!is_power_of_2(num_queues))) {
+			dev_dbg(&pf->pdev->dev,
+				"Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
+				num_queues, vsi->current_rss_size);
+			return -EINVAL;
+		}
 	}
-	INIT_LIST_HEAD(&pf->l3_flex_pit_list);
 
-	list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
-		list_del(&pit_entry->list);
-		kfree(pit_entry);
+	if (!is_power_of_2(num_queues)) {
+		/* Find the max num_queues configured for channel if channel
+		 * exist.
+		 * if channel exist, then enforce 'num_queues' to be more than
+		 * max ever queues configured for channel.
+		 */
+		max_ch_queues = i40e_get_max_queues_for_channel(vsi);
+		if (num_queues < max_ch_queues) {
+			dev_dbg(&pf->pdev->dev,
+				"Error: num_queues (%d) < max queues configured for channel(%d)\n",
+				num_queues, max_ch_queues);
+			return -EINVAL;
+		}
+		*reconfig_rss = true;
 	}
-	INIT_LIST_HEAD(&pf->l4_flex_pit_list);
 
-	pf->fdir_pf_active_filters = 0;
-	pf->fd_tcp4_filter_cnt = 0;
-	pf->fd_udp4_filter_cnt = 0;
-	pf->fd_sctp4_filter_cnt = 0;
-	pf->fd_ip4_filter_cnt = 0;
+	return 0;
+}
 
-	/* Reprogram the default input set for TCP/IPv4 */
-	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
-				I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
-				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+/**
+ * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
+ * @vsi: the VSI being setup
+ * @rss_size: size of RSS, accordingly LUT gets reprogrammed
+ *
+ * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
+ **/
+static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
+{
+	struct i40e_pf *pf = vsi->back;
+	u8 seed[I40E_HKEY_ARRAY_SIZE];
+	struct i40e_hw *hw = &pf->hw;
+	int local_rss_size;
+	u8 *lut;
+	int ret;
 
-	/* Reprogram the default input set for UDP/IPv4 */
-	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
-				I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
-				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+	if (!vsi->rss_size)
+		return -EINVAL;
 
-	/* Reprogram the default input set for SCTP/IPv4 */
-	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
-				I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
-				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+	if (rss_size > vsi->rss_size)
+		return -EINVAL;
 
-	/* Reprogram the default input set for Other/IPv4 */
-	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
-				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
+	local_rss_size = min_t(int, vsi->rss_size, rss_size);
+	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+	if (!lut)
+		return -ENOMEM;
+
+	/* Ignoring user configured lut if there is one */
+	i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
+
+	/* Use user configured hash key if there is one, otherwise
+	 * use default.
+	 */
+	if (vsi->rss_hkey_user)
+		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
+	else
+		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+
+	ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "Cannot set RSS lut, err %s aq_err %s\n",
+			 i40e_stat_str(hw, ret),
+			 i40e_aq_str(hw, hw->aq.asq_last_status));
+		kfree(lut);
+		return ret;
+	}
+	kfree(lut);
+
+	/* Do the update w.r.t. storing rss_size */
+	if (!vsi->orig_rss_size)
+		vsi->orig_rss_size = vsi->rss_size;
+	vsi->current_rss_size = local_rss_size;
+
+	return ret;
 }
 
 /**
- * i40e_close - Disables a network interface
- * @netdev: network interface device structure
- *
- * The close entry point is called when an interface is de-activated
- * by the OS.  The hardware is still under the driver's control, but
- * this netdev interface is disabled.
+ * i40e_channel_setup_queue_map - Setup a channel queue map
+ * @pf: ptr to PF device
+ * @ctxt: VSI context structure
+ * @ch: ptr to channel structure
  *
- * Returns 0, this is not allowed to fail
+ * Setup queue map for a specific channel
  **/
-int i40e_close(struct net_device *netdev)
+static int i40e_channel_setup_queue_map(struct i40e_pf *pf,
+					struct i40e_vsi_context *ctxt,
+					struct i40e_channel *ch)
 {
-	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_vsi *vsi = np->vsi;
+	u16 qmap, sections = 0;
+	u8 offset = 0;
+	int pow;
 
-	i40e_vsi_close(vsi);
+	if (ch->num_queue_pairs > pf->num_lan_msix) {
+		dev_err(&pf->pdev->dev,
+			"Requested queues number exceeded max available MSI-X vectors. Refused to set queue map\n");
+		return -EINVAL;
+	}
+
+	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+	sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
+
+	/* find the next higher power-of-2 of num queue pairs */
+	pow = ilog2(ch->num_queue_pairs);
+	if (!is_power_of_2(ch->num_queue_pairs))
+		pow++;
+
+	qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+		(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+
+	/* Setup queue TC[0].qmap for given VSI context */
+	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
+
+	ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
+	ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+	ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
+	ctxt->info.valid_sections |= cpu_to_le16(sections);
 
 	return 0;
 }
 
 /**
- * i40e_do_reset - Start a PF or Core Reset sequence
- * @pf: board private structure
- * @reset_flags: which reset is requested
- * @lock_acquired: indicates whether or not the lock has been acquired
- * before this function was called.
+ * i40e_add_channel - add a channel by adding VSI
+ * @pf: ptr to PF device
+ * @uplink_seid: underlying HW switching element (VEB) ID
+ * @ch: ptr to channel structure
  *
- * The essential difference in resets is that the PF Reset
- * doesn't clear the packet buffers, doesn't reset the PE
- * firmware, and doesn't bother the other PFs on the chip.
+ * Add a channel (VSI) using add_vsi and queue_map
  **/
-void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
+static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
+			    struct i40e_channel *ch)
 {
-	u32 val;
-
-	WARN_ON(in_interrupt());
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_vsi_context ctxt;
+	u8 enabled_tc = 0x1; /* TC0 enabled */
+	int ret;
 
+	if (ch->type != I40E_VSI_VMDQ2) {
+		dev_info(&pf->pdev->dev,
+			 "add new vsi failed, ch->type %d\n", ch->type);
+		return -EINVAL;
+	}
 
-	/* do the biggest reset indicated */
-	if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
+	memset(&ctxt, 0, sizeof(ctxt));
+	ctxt.pf_num = hw->pf_id;
+	ctxt.vf_num = 0;
+	ctxt.uplink_seid = uplink_seid;
+	ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
+	if (ch->type == I40E_VSI_VMDQ2)
+		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
 
-		/* Request a Global Reset
-		 *
-		 * This will start the chip's countdown to the actual full
-		 * chip reset event, and a warning interrupt to be sent
-		 * to all PFs, including the requestor.  Our handler
-		 * for the warning interrupt will deal with the shutdown
-		 * and recovery of the switch setup.
-		 */
-		dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
-		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
-		val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
-		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
+		ctxt.info.valid_sections |=
+		     cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+		ctxt.info.switch_id =
+		   cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+	}
 
-	} else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
+	/* Set queue map for a given VSI context */
+	ret = i40e_channel_setup_queue_map(pf, &ctxt, ch);
+	if (ret)
+		return ret;
 
-		/* Request a Core Reset
-		 *
-		 * Same as Global Reset, except does *not* include the MAC/PHY
-		 */
-		dev_dbg(&pf->pdev->dev, "CoreR requested\n");
-		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
-		val |= I40E_GLGEN_RTRIG_CORER_MASK;
-		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
-		i40e_flush(&pf->hw);
+	/* Now time to create VSI */
+	ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "add new vsi failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw,
+				     pf->hw.aq.asq_last_status));
+		return -ENOENT;
+	}
 
-	} else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
+	/* Success, update channel */
+	ch->enabled_tc = enabled_tc;
+	ch->seid = ctxt.seid;
+	ch->vsi_number = ctxt.vsi_number;
+	ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
 
-		/* Request a PF Reset
-		 *
-		 * Resets only the PF-specific registers
-		 *
-		 * This goes directly to the tear-down and rebuild of
-		 * the switch, since we need to do all the recovery as
-		 * for the Core Reset.
-		 */
-		dev_dbg(&pf->pdev->dev, "PFR requested\n");
-		i40e_handle_reset_warning(pf, lock_acquired);
+	/* copy just the sections touched not the entire info
+	 * since not all sections are valid as returned by
+	 * update vsi params
+	 */
+	ch->info.mapping_flags = ctxt.info.mapping_flags;
+	memcpy(&ch->info.queue_mapping,
+	       &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
+	memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
+	       sizeof(ctxt.info.tc_mapping));
 
-	} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
-		int v;
+	return 0;
+}
 
-		/* Find the VSI(s) that requested a re-init */
-		dev_info(&pf->pdev->dev,
-			 "VSI reinit requested\n");
-		for (v = 0; v < pf->num_alloc_vsi; v++) {
-			struct i40e_vsi *vsi = pf->vsi[v];
-
-			if (vsi != NULL &&
-			    test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
-					       vsi->state))
-				i40e_vsi_reinit_locked(pf->vsi[v]);
-		}
-	} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
-		int v;
+static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
+				  u8 *bw_share)
+{
+	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+	i40e_status ret;
+	int i;
 
-		/* Find the VSI(s) that needs to be brought down */
-		dev_info(&pf->pdev->dev, "VSI down requested\n");
-		for (v = 0; v < pf->num_alloc_vsi; v++) {
-			struct i40e_vsi *vsi = pf->vsi[v];
+	bw_data.tc_valid_bits = ch->enabled_tc;
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+		bw_data.tc_bw_credits[i] = bw_share[i];
 
-			if (vsi != NULL &&
-			    test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
-					       vsi->state)) {
-				set_bit(__I40E_VSI_DOWN, vsi->state);
-				i40e_down(vsi);
-			}
-		}
-	} else {
-		dev_info(&pf->pdev->dev,
-			 "bad reset request 0x%08x\n", reset_flags);
+	ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
+				       &bw_data, NULL);
+	if (ret) {
+		dev_info(&vsi->back->pdev->dev,
+			 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
+			 vsi->back->hw.aq.asq_last_status, ch->seid);
+		return -EINVAL;
 	}
+
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+		ch->info.qs_handle[i] = bw_data.qs_handles[i];
+
+	return 0;
 }
 
-#ifdef CONFIG_I40E_DCB
 /**
- * i40e_dcb_need_reconfig - Check if DCB needs reconfig
- * @pf: board private structure
- * @old_cfg: current DCB config
- * @new_cfg: new DCB config
+ * i40e_channel_config_tx_ring - config TX ring associated with new channel
+ * @pf: ptr to PF device
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ *
+ * Configure TX rings associated with channel (VSI) since queues are being
+ * from parent VSI.
  **/
-bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
-			    struct i40e_dcbx_config *old_cfg,
-			    struct i40e_dcbx_config *new_cfg)
+static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
+				       struct i40e_vsi *vsi,
+				       struct i40e_channel *ch)
 {
-	bool need_reconfig = false;
-
-	/* Check if ETS configuration has changed */
-	if (memcmp(&new_cfg->etscfg,
-		   &old_cfg->etscfg,
-		   sizeof(new_cfg->etscfg))) {
-		/* If Priority Table has changed reconfig is needed */
-		if (memcmp(&new_cfg->etscfg.prioritytable,
-			   &old_cfg->etscfg.prioritytable,
-			   sizeof(new_cfg->etscfg.prioritytable))) {
-			need_reconfig = true;
-			dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
-		}
-
-		if (memcmp(&new_cfg->etscfg.tcbwtable,
-			   &old_cfg->etscfg.tcbwtable,
-			   sizeof(new_cfg->etscfg.tcbwtable)))
-			dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+	i40e_status ret;
+	int i;
+	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
 
-		if (memcmp(&new_cfg->etscfg.tsatable,
-			   &old_cfg->etscfg.tsatable,
-			   sizeof(new_cfg->etscfg.tsatable)))
-			dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
+	/* Enable ETS TCs with equal BW Share for now across all VSIs */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		if (ch->enabled_tc & BIT(i))
+			bw_share[i] = 1;
 	}
 
-	/* Check if PFC configuration has changed */
-	if (memcmp(&new_cfg->pfc,
-		   &old_cfg->pfc,
-		   sizeof(new_cfg->pfc))) {
-		need_reconfig = true;
-		dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
+	/* configure BW for new VSI */
+	ret = i40e_channel_config_bw(vsi, ch, bw_share);
+	if (ret) {
+		dev_info(&vsi->back->pdev->dev,
+			 "Failed configuring TC map %d for channel (seid %u)\n",
+			 ch->enabled_tc, ch->seid);
+		return ret;
 	}
 
-	/* Check if APP Table has changed */
-	if (memcmp(&new_cfg->app,
-		   &old_cfg->app,
-		   sizeof(new_cfg->app))) {
-		need_reconfig = true;
-		dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
+	for (i = 0; i < ch->num_queue_pairs; i++) {
+		struct i40e_ring *tx_ring, *rx_ring;
+		u16 pf_q;
+
+		pf_q = ch->base_queue + i;
+
+		/* Get to TX ring ptr of main VSI, for re-setup TX queue
+		 * context
+		 */
+		tx_ring = vsi->tx_rings[pf_q];
+		tx_ring->ch = ch;
+
+		/* Get the RX ring ptr */
+		rx_ring = vsi->rx_rings[pf_q];
+		rx_ring->ch = ch;
 	}
 
-	dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
-	return need_reconfig;
+	return 0;
 }
 
 /**
- * i40e_handle_lldp_event - Handle LLDP Change MIB event
- * @pf: board private structure
- * @e: event info posted on ARQ
+ * i40e_setup_hw_channel - setup new channel
+ * @pf: ptr to PF device
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ * @uplink_seid: underlying HW switching element (VEB) ID
+ * @type: type of channel to be created (VMDq2/VF)
+ *
+ * Setup new channel (VSI) based on specified type (VMDq2/VF)
+ * and configures TX rings accordingly
  **/
-static int i40e_handle_lldp_event(struct i40e_pf *pf,
-				  struct i40e_arq_event_info *e)
+static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
+					struct i40e_vsi *vsi,
+					struct i40e_channel *ch,
+					u16 uplink_seid, u8 type)
 {
-	struct i40e_aqc_lldp_get_mib *mib =
-		(struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
-	struct i40e_hw *hw = &pf->hw;
-	struct i40e_dcbx_config tmp_dcbx_cfg;
-	bool need_reconfig = false;
-	int ret = 0;
-	u8 type;
+	int ret;
 
-	/* Not DCB capable or capability disabled */
-	if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+	ch->initialized = false;
+	ch->base_queue = vsi->next_base_queue;
+	ch->type = type;
+
+	/* Proceed with creation of channel (VMDq2) VSI */
+	ret = i40e_add_channel(pf, uplink_seid, ch);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "failed to add_channel using uplink_seid %u\n",
+			 uplink_seid);
 		return ret;
+	}
 
-	/* Ignore if event is not for Nearest Bridge */
-	type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
-		& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
-	dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
-	if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
+	/* Mark the successful creation of channel */
+	ch->initialized = true;
+
+	/* Reconfigure TX queues using QTX_CTL register */
+	ret = i40e_channel_config_tx_ring(pf, vsi, ch);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "failed to configure TX rings for channel %u\n",
+			 ch->seid);
 		return ret;
+	}
 
-	/* Check MIB Type and return if event for Remote MIB update */
-	type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+	/* update 'next_base_queue' */
+	vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
 	dev_dbg(&pf->pdev->dev,
-		"LLDP event mib type %s\n", type ? "remote" : "local");
-	if (type == I40E_AQ_LLDP_MIB_REMOTE) {
-		/* Update the remote cached instance and return */
-		ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
-				I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
-				&hw->remote_dcbx_config);
-		goto exit;
+		"Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
+		ch->seid, ch->vsi_number, ch->stat_counter_idx,
+		ch->num_queue_pairs,
+		vsi->next_base_queue);
+	return ret;
+}
+
+/**
+ * i40e_setup_channel - setup new channel using uplink element
+ * @pf: ptr to PF device
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ *
+ * Setup new channel (VSI) based on specified type (VMDq2/VF)
+ * and uplink switching element (uplink_seid)
+ **/
+static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
+			       struct i40e_channel *ch)
+{
+	u8 vsi_type;
+	u16 seid;
+	int ret;
+
+	if (vsi->type == I40E_VSI_MAIN) {
+		vsi_type = I40E_VSI_VMDQ2;
+	} else {
+		dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
+			vsi->type);
+		return false;
 	}
 
-	/* Store the old configuration */
-	tmp_dcbx_cfg = hw->local_dcbx_config;
+	/* underlying switching element */
+	seid = pf->vsi[pf->lan_vsi]->uplink_seid;
 
-	/* Reset the old DCBx configuration data */
-	memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
-	/* Get updated DCBX data from firmware */
-	ret = i40e_get_dcb_config(&pf->hw);
+	/* create channel (VSI), configure TX rings */
+	ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
 	if (ret) {
-		dev_info(&pf->pdev->dev,
-			 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
-			 i40e_stat_str(&pf->hw, ret),
-			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
-		goto exit;
+		dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
+		return false;
 	}
 
-	/* No change detected in DCBX configs */
-	if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
-		    sizeof(tmp_dcbx_cfg))) {
-		dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
-		goto exit;
+	return ch->initialized ? true : false;
+}
+
+/**
+ * i40e_create_queue_channel - function to create channel
+ * @vsi: VSI to be configured
+ * @ch: ptr to channel (it contains channel specific params)
+ *
+ * This function creates channel (VSI) using num_queues specified by user,
+ * reconfigs RSS if needed.
+ **/
+int i40e_create_queue_channel(struct i40e_vsi *vsi,
+			      struct i40e_channel *ch)
+{
+	struct i40e_pf *pf = vsi->back;
+	bool reconfig_rss;
+	int err;
+
+	if (!ch)
+		return -EINVAL;
+
+	if (!ch->num_queue_pairs) {
+		dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
+			ch->num_queue_pairs);
+		return -EINVAL;
 	}
 
-	need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
-					       &hw->local_dcbx_config);
+	/* validate user requested num_queues for channel */
+	err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
+				       &reconfig_rss);
+	if (err) {
+		dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
+			 ch->num_queue_pairs);
+		return -EINVAL;
+	}
 
-	i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
+	/* By default we are in VEPA mode, if this is the first VF/VMDq
+	 * VSI to be added switch to VEB mode.
+	 */
+	if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+		pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
 
-	if (!need_reconfig)
-		goto exit;
+		if (vsi->type == I40E_VSI_MAIN) {
+			if (pf->flags & I40E_FLAG_TC_MQPRIO)
+				i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
+			else
+				i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+		}
+	/* now onwards for main VSI, number of queues will be value
+	 * of TC0's queue count
+	 */
+	}
 
-	/* Enable DCB tagging only when more than one TC */
-	if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
-		pf->flags |= I40E_FLAG_DCB_ENABLED;
-	else
-		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+	/* By this time, vsi->cnt_q_avail shall be set to non-zero and
+	 * it should be more than num_queues
+	 */
+	if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
+		dev_dbg(&pf->pdev->dev,
+			"Error: cnt_q_avail (%u) less than num_queues %d\n",
+			vsi->cnt_q_avail, ch->num_queue_pairs);
+		return -EINVAL;
+	}
 
-	set_bit(__I40E_PORT_SUSPENDED, pf->state);
-	/* Reconfiguration needed quiesce all VSIs */
-	i40e_pf_quiesce_all_vsi(pf);
+	/* reconfig_rss only if vsi type is MAIN_VSI */
+	if (reconfig_rss && vsi->type == I40E_VSI_MAIN) {
+		err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
+		if (err) {
+			dev_info(&pf->pdev->dev,
+				 "Error: unable to reconfig rss for num_queues (%u)\n",
+				 ch->num_queue_pairs);
+			return -EINVAL;
+		}
+	}
 
-	/* Changes in configuration update VEB/VSI */
-	i40e_dcb_reconfigure(pf);
+	if (!i40e_setup_channel(pf, vsi, ch)) {
+		dev_info(&pf->pdev->dev, "Failed to setup channel\n");
+		return -EINVAL;
+	}
 
-	ret = i40e_resume_port_tx(pf);
+	dev_info(&pf->pdev->dev,
+		 "Setup channel (id:%u) utilizing num_queues %d\n",
+		 ch->seid, ch->num_queue_pairs);
 
-	clear_bit(__I40E_PORT_SUSPENDED, pf->state);
-	/* In case of error no point in resuming VSIs */
-	if (ret)
-		goto exit;
+	/* configure VSI for BW limit */
+	if (ch->max_tx_rate) {
+		if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
+			return -EINVAL;
 
-	/* Wait for the PF's queues to be disabled */
-	ret = i40e_pf_wait_queues_disabled(pf);
-	if (ret) {
-		/* Schedule PF reset to recover */
-		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
-		i40e_service_event_schedule(pf);
-	} else {
-		i40e_pf_unquiesce_all_vsi(pf);
-	pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
-		      I40E_FLAG_CLIENT_L2_CHANGE);
+		dev_dbg(&pf->pdev->dev,
+			"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
+			ch->max_tx_rate,
+			ch->max_tx_rate / I40E_BW_CREDIT_DIVISOR, ch->seid);
 	}
 
-exit:
-	return ret;
+	/* in case of VF, this will be main SRIOV VSI */
+	ch->parent_vsi = vsi;
+
+	/* and update main_vsi's count for queue_available to use */
+	vsi->cnt_q_avail -= ch->num_queue_pairs;
+
+	return 0;
 }
-#endif /* CONFIG_I40E_DCB */
 
 /**
- * i40e_do_reset_safe - Protected reset path for userland calls.
- * @pf: board private structure
- * @reset_flags: which reset is requested
+ * i40e_configure_queue_channels - Add queue channel for the given TCs
+ * @vsi: VSI to be configured
  *
+ * Configures queue channel mapping to the given TCs
  **/
-void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
+static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
 {
-	rtnl_lock();
-	i40e_do_reset(pf, reset_flags, true);
-	rtnl_unlock();
+	struct i40e_channel *ch;
+	int ret = 0, i;
+
+	/* Create app vsi with the TCs. Main VSI with TC0 is already set up */
+	vsi->tc_seid_map[0] = vsi->seid;
+	for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		if (vsi->tc_config.enabled_tc & BIT(i)) {
+			ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+			if (!ch) {
+				ret = -ENOMEM;
+				goto err_free;
+			}
+
+			INIT_LIST_HEAD(&ch->list);
+			ch->num_queue_pairs =
+				vsi->tc_config.tc_info[i].qcount;
+			ch->base_queue =
+				vsi->tc_config.tc_info[i].qoffset;
+
+			/* Bandwidth limit through tc interface is in bytes/s,
+			 * change to Mbit/s
+			 */
+			ch->max_tx_rate =
+				vsi->mqprio_qopt.max_rate[i] / (1000000 / 8);
+
+			list_add_tail(&ch->list, &vsi->ch_list);
+
+			ret = i40e_create_queue_channel(vsi, ch);
+			if (ret) {
+				dev_err(&vsi->back->pdev->dev,
+					"Failed creating queue channel with TC%d: queues %d\n",
+					i, ch->num_queue_pairs);
+				goto err_free;
+			}
+			vsi->tc_seid_map[i] = ch->seid;
+		}
+	}
+	return ret;
+
+err_free:
+	i40e_remove_queue_channels(vsi);
+	return ret;
 }
 
 /**
- * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
- * @pf: board private structure
- * @e: event info posted on ARQ
- *
- * Handler for LAN Queue Overflow Event generated by the firmware for PF
- * and VF queues
+ * i40e_validate_mqprio_qopt- validate queue mapping info
+ * @vsi: the VSI being configured
+ * @mqprio_qopt: queue parametrs
  **/
-static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
-					   struct i40e_arq_event_info *e)
+static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
+				     struct tc_mqprio_qopt_offload *mqprio_qopt)
 {
-	struct i40e_aqc_lan_overflow *data =
-		(struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
-	u32 queue = le32_to_cpu(data->prtdcb_rupto);
-	u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
-	struct i40e_hw *hw = &pf->hw;
-	struct i40e_vf *vf;
-	u16 vf_id;
+	u64 sum_max_rate = 0;
+	int i;
 
-	dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
-		queue, qtx_ctl);
+	if (mqprio_qopt->qopt.offset[0] != 0 ||
+	    mqprio_qopt->qopt.num_tc < 1 ||
+	    mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
+		return -EINVAL;
+	for (i = 0; ; i++) {
+		if (!mqprio_qopt->qopt.count[i])
+			return -EINVAL;
+		if (mqprio_qopt->min_rate[i]) {
+			dev_err(&vsi->back->pdev->dev,
+				"Invalid min tx rate (greater than 0) specified\n");
+			return -EINVAL;
+		}
+		sum_max_rate += (mqprio_qopt->max_rate[i] / (1000000 / 8));
 
-	/* Queue belongs to VF, find the VF and issue VF reset */
-	if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
-	    >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
-		vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
-			 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
-		vf_id -= hw->func_caps.vf_base_id;
-		vf = &pf->vf[vf_id];
-		i40e_vc_notify_vf_reset(vf);
-		/* Allow VF to process pending reset notification */
-		msleep(20);
-		i40e_reset_vf(vf, false);
+		if (i >= mqprio_qopt->qopt.num_tc - 1)
+			break;
+		if (mqprio_qopt->qopt.offset[i + 1] !=
+		    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
+			return -EINVAL;
+	}
+	if (vsi->num_queue_pairs <
+	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
+		dev_err(&vsi->back->pdev->dev,
+			"Failed to create traffic channel, insufficient number of queues.\n");
+		return -EINVAL;
+	}
+	if (sum_max_rate > i40e_get_link_speed(vsi)) {
+		dev_err(&vsi->back->pdev->dev,
+			"Invalid max tx rate specified\n");
+		return -EINVAL;
 	}
+	return 0;
 }
 
 /**
- * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
- * @pf: board private structure
+ * i40e_vsi_set_default_tc_config - set default values for tc configuration
+ * @vsi: the VSI being configured
  **/
-u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
+static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
 {
-	u32 val, fcnt_prog;
+	u16 qcount;
+	int i;
 
-	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
-	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
-	return fcnt_prog;
+	/* Only TC0 is enabled */
+	vsi->tc_config.numtc = 1;
+	vsi->tc_config.enabled_tc = 1;
+	qcount = min_t(int, vsi->alloc_queue_pairs,
+		       i40e_pf_get_max_q_per_tc(vsi->back));
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		/* For the TC that is not enabled set the offset to to default
+		 * queue and allocate one queue for the given TC.
+		 */
+		vsi->tc_config.tc_info[i].qoffset = 0;
+		if (i == 0)
+			vsi->tc_config.tc_info[i].qcount = qcount;
+		else
+			vsi->tc_config.tc_info[i].qcount = 1;
+		vsi->tc_config.tc_info[i].netdev_tc = 0;
+	}
 }
 
 /**
- * i40e_get_current_fd_count - Get total FD filters programmed for this PF
- * @pf: board private structure
+ * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
+ * @vsi: PF main vsi
+ * @seid: seid of main or channel VSIs
+ *
+ * Rebuilds cloud filters associated with main VSI and channel VSIs if they
+ * existed before reset
  **/
-u32 i40e_get_current_fd_count(struct i40e_pf *pf)
+static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
 {
-	u32 val, fcnt_prog;
+	struct i40e_cloud_filter *cfilter;
+	struct i40e_pf *pf = vsi->back;
+	struct hlist_node *node;
+	i40e_status ret;
 
-	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
-	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
-		    ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
-		      I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
-	return fcnt_prog;
+	/* Add cloud filters back if they exist */
+	hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
+				  cloud_node) {
+		if (cfilter->seid != seid)
+			continue;
+
+		if (cfilter->dst_port)
+			ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
+								true);
+		else
+			ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+
+		if (ret) {
+			dev_dbg(&pf->pdev->dev,
+				"Failed to rebuild cloud filter, err %s aq_err %s\n",
+				i40e_stat_str(&pf->hw, ret),
+				i40e_aq_str(&pf->hw,
+					    pf->hw.aq.asq_last_status));
+			return ret;
+		}
+	}
+	return 0;
 }
 
 /**
- * i40e_get_global_fd_count - Get total FD filters programmed on device
- * @pf: board private structure
+ * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
+ * @vsi: PF main vsi
+ *
+ * Rebuilds channel VSIs if they existed before reset
  **/
-u32 i40e_get_global_fd_count(struct i40e_pf *pf)
+static int i40e_rebuild_channels(struct i40e_vsi *vsi)
 {
-	u32 val, fcnt_prog;
+	struct i40e_channel *ch, *ch_tmp;
+	i40e_status ret;
 
-	val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
-	fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
-		    ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
-		     I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
-	return fcnt_prog;
+	if (list_empty(&vsi->ch_list))
+		return 0;
+
+	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
+		if (!ch->initialized)
+			break;
+		/* Proceed with creation of channel (VMDq2) VSI */
+		ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
+		if (ret) {
+			dev_info(&vsi->back->pdev->dev,
+				 "failed to rebuild channels using uplink_seid %u\n",
+				 vsi->uplink_seid);
+			return ret;
+		}
+		if (ch->max_tx_rate) {
+			if (i40e_set_bw_limit(vsi, ch->seid,
+					      ch->max_tx_rate))
+				return -EINVAL;
+
+			dev_dbg(&vsi->back->pdev->dev,
+				"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
+				ch->max_tx_rate,
+				ch->max_tx_rate / I40E_BW_CREDIT_DIVISOR,
+				ch->seid);
+		}
+		ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
+		if (ret) {
+			dev_dbg(&vsi->back->pdev->dev,
+				"Failed to rebuild cloud filters for channel VSI %u\n",
+				ch->seid);
+			return ret;
+		}
+	}
+	return 0;
 }
+#endif /* __TC_MQPRIO_MODE_MAX */
 
+#ifdef HAVE_SETUP_TC
 /**
- * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
- * @pf: board private structure
+ * i40e_setup_tc - configure multiple traffic classes
+ * @netdev: net device to configure
+ * @type_data: tc offload data
  **/
-void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
+#ifdef __TC_MQPRIO_MODE_MAX
+static int i40e_setup_tc(struct net_device *netdev, void *type_data)
+#else
+static int i40e_setup_tc(struct net_device *netdev, u8 tc)
+#endif
 {
-	struct i40e_fdir_filter *filter;
-	u32 fcnt_prog, fcnt_avail;
-	struct hlist_node *node;
+#ifdef __TC_MQPRIO_MODE_MAX
+	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
+#endif
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	u8 enabled_tc = 0, num_tc;
+#ifdef __TC_MQPRIO_MODE_MAX
+	bool need_reset = false;
+	int old_queue_pairs;
+#endif
+	int ret = -EINVAL;
+#ifdef __TC_MQPRIO_MODE_MAX
+	u16 mode;
+	u8 hw;
+#endif
+	int i;
 
-	if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
-		return;
+#ifdef __TC_MQPRIO_MODE_MAX
+	old_queue_pairs = vsi->num_queue_pairs;
+#endif
+	/* Check if MFP enabled */
+	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+		netdev_info(netdev,
+			    "Configuring TC not supported in MFP mode\n");
+		goto exit;
+	}
 
-	/* Check if we have enough room to re-enable FDir SB capability. */
-	fcnt_prog = i40e_get_global_fd_count(pf);
-	fcnt_avail = pf->fdir_pf_filter_count;
-	if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
-	    (pf->fd_add_err == 0) ||
-	    (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
-		if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
-			pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED;
-			if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
-			    (I40E_DEBUG_FD & pf->hw.debug_mask))
-				dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
-		}
+#ifndef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
+	/* Check whether tc count is within enabled limit */
+	if (tc > i40e_pf_get_num_tc(pf)) {
+		netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
+		goto exit;
+	}
+#endif
+
+#ifdef __TC_MQPRIO_MODE_MAX
+	num_tc = mqprio_qopt->qopt.num_tc;
+	hw = mqprio_qopt->qopt.hw;
+	mode = mqprio_qopt->mode;
+	if (!hw) {
+		pf->flags &= ~I40E_FLAG_TC_MQPRIO;
+		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
+		goto config_tc;
 	}
 
-	/* We should wait for even more space before re-enabling ATR.
-	 * Additionally, we cannot enable ATR as long as we still have TCP SB
-	 * rules active.
-	 */
-	if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
-	    (pf->fd_tcp4_filter_cnt == 0)) {
-		if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
-			pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
-			if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
-			    (I40E_DEBUG_FD & pf->hw.debug_mask))
-				dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
+	switch (mode) {
+	case TC_MQPRIO_MODE_DCB:
+		pf->flags &= ~I40E_FLAG_TC_MQPRIO;
+
+		/* Check if DCB enabled to continue */
+		if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+			netdev_info(netdev,
+				    "DCB is not enabled for adapter\n");
+			return ret;
+		}
+
+		/* Check whether tc count is within enabled limit */
+		if (num_tc > i40e_pf_get_num_tc(pf)) {
+			netdev_info(netdev,
+				    "TC count greater than enabled on link for adapter\n");
+			return ret;
+		}
+		break;
+	case TC_MQPRIO_MODE_CHANNEL:
+		if (pf->flags & I40E_FLAG_DCB_ENABLED) {
+			netdev_info(netdev,
+				    "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
+			return ret;
 		}
+		if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+			return ret;
+		ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
+		if (ret)
+			return ret;
+		memcpy(&vsi->mqprio_qopt, mqprio_qopt,
+		       sizeof(*mqprio_qopt));
+		pf->flags |= I40E_FLAG_TC_MQPRIO;
+		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+		break;
+	default:
+		return -EINVAL;
 	}
 
-	/* if hw had a problem adding a filter, delete it */
-	if (pf->fd_inv > 0) {
-		hlist_for_each_entry_safe(filter, node,
-					  &pf->fdir_filter_list, fdir_node) {
-			if (filter->fd_id == pf->fd_inv) {
-				hlist_del(&filter->fdir_node);
-				kfree(filter);
-				pf->fdir_pf_active_filters--;
+config_tc:
+#else
+	/* Check if DCB enabled to continue */
+	if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+		netdev_info(netdev, "DCB is not enabled for adapter\n");
+		goto exit;
+	}
+	num_tc = tc;
+#endif
+	/* Generate TC map for number of tc requested */
+	for (i = 0; i < num_tc; i++)
+		enabled_tc |= BIT(i);
+
+	/* Requesting same TC configuration as already enabled */
+#ifdef __TC_MQPRIO_MODE_MAX
+	if (enabled_tc == vsi->tc_config.enabled_tc &&
+	    mode != TC_MQPRIO_MODE_CHANNEL)
+		return 0;
+#else
+	if (enabled_tc == vsi->tc_config.enabled_tc)
+		return 0;
+#endif
+
+	/* Quiesce VSI queues */
+	i40e_quiesce_vsi(vsi);
+
+#ifdef __TC_MQPRIO_MODE_MAX
+	if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
+		i40e_remove_queue_channels(vsi);
+#endif
+
+	/* Configure VSI for enabled TCs */
+	ret = i40e_vsi_config_tc(vsi, enabled_tc);
+	if (ret) {
+		netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
+			    vsi->seid);
+#ifdef __TC_MQPRIO_MODE_MAX
+		need_reset = true;
+#endif
+		goto exit;
+#ifdef __TC_MQPRIO_MODE_MAX
+	} else if (enabled_tc &&
+		   (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
+		netdev_info(netdev,
+			    "Failed to create channel. Override queues (%u) not power of 2\n",
+			    vsi->tc_config.tc_info[0].qcount);
+		pf->flags &= ~I40E_FLAG_TC_MQPRIO;
+		vsi->num_queue_pairs = old_queue_pairs;
+		ret = -EINVAL;
+		need_reset = true;
+		goto exit;
+#endif
+	}
+
+	dev_info(&vsi->back->pdev->dev,
+		 "Setup channel (id:%u) utilizing num_queues %d\n",
+		 vsi->seid, vsi->tc_config.tc_info[0].qcount);
+
+#ifdef __TC_MQPRIO_MODE_MAX
+	if (pf->flags & I40E_FLAG_TC_MQPRIO) {
+		if (vsi->mqprio_qopt.max_rate[0]) {
+			u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0] /
+								(1000000 / 8);
+			ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
+			if (!ret) {
+				dev_dbg(&vsi->back->pdev->dev,
+					"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
+					max_tx_rate,
+					max_tx_rate / I40E_BW_CREDIT_DIVISOR,
+					vsi->seid);
+			} else {
+				need_reset = true;
+				goto exit;
 			}
 		}
+		ret = i40e_configure_queue_channels(vsi);
+		if (ret) {
+			vsi->num_queue_pairs = old_queue_pairs;
+			netdev_info(netdev,
+				    "Failed configuring queue channels\n");
+			need_reset = true;
+			goto exit;
+		}
+	}
+
+#endif
+
+exit:
+#ifdef __TC_MQPRIO_MODE_MAX
+	/* Reset the configuration data to defaults, only TC0 is enabled */
+	if (need_reset) {
+		i40e_vsi_set_default_tc_config(vsi);
+		need_reset = false;
 	}
+#endif
+	/* Unquiesce VSI */
+	i40e_unquiesce_vsi(vsi);
+	return ret;
 }
 
-#define I40E_MIN_FD_FLUSH_INTERVAL 10
-#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
+#ifdef __TC_MQPRIO_MODE_MAX
 /**
- * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
- * @pf: board private structure
+ * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
+ * @vsi: Pointer to VSI
+ * @f: Pointer to struct flow_cls_offload
+ * @filter: Pointer to cloud filter structure
+ *
  **/
-static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
+static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
+				 struct flow_cls_offload *f,
+				 struct i40e_cloud_filter *filter)
 {
-	unsigned long min_flush_time;
-	int flush_wait_retry = 50;
-	bool disable_atr = false;
-	int fd_room;
-	int reg;
+	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+	struct flow_dissector *dissector = rule->match.dissector;
+	u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
+	struct i40e_pf *pf = vsi->back;
+	u8 field_flags = 0;
+
+	if (dissector->used_keys &
+	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
+	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS
+	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
+#endif
+	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+#ifdef HAVE_TC_FLOWER_ENC
+	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+#endif
+	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+		dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
+			dissector->used_keys);
+		return -EOPNOTSUPP;
+	}
 
-	if (!time_after(jiffies, pf->fd_flush_timestamp +
-				 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
-		return;
+#ifdef HAVE_TC_FLOWER_ENC
+	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+		struct flow_match_enc_keyid match;
 
-	/* If the flush is happening too quick and we have mostly SB rules we
-	 * should not re-enable ATR for some time.
-	 */
-	min_flush_time = pf->fd_flush_timestamp +
-			 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
-	fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
+		flow_rule_match_enc_keyid(rule, &match);
+		if (match.mask->keyid != 0)
+			field_flags |= I40E_CLOUD_FIELD_TEN_ID;
 
-	if (!(time_after(jiffies, min_flush_time)) &&
-	    (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
-		if (I40E_DEBUG_FD & pf->hw.debug_mask)
-			dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
-		disable_atr = true;
+		filter->tenant_id = be32_to_cpu(match.key->keyid);
 	}
+#endif /* HAVE_TC_FLOWER_ENC */
 
-	pf->fd_flush_timestamp = jiffies;
-	pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
-	/* flush all filters */
-	wr32(&pf->hw, I40E_PFQF_CTL_1,
-	     I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
-	i40e_flush(&pf->hw);
-	pf->fd_flush_cnt++;
-	pf->fd_add_err = 0;
-	do {
-		/* Check FD flush status every 5-6msec */
-		usleep_range(5000, 6000);
-		reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
-		if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
-			break;
-	} while (flush_wait_retry--);
-	if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
-		dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
-	} else {
-		/* replay sideband filters */
-		i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
-		if (!disable_atr && !pf->fd_tcp4_filter_cnt)
-			pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
-		clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
-		if (I40E_DEBUG_FD & pf->hw.debug_mask)
-			dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
+	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+		struct flow_match_basic match;
+
+		flow_rule_match_basic(rule, &match);
+		n_proto_key = ntohs(match.key->n_proto);
+		n_proto_mask = ntohs(match.mask->n_proto);
+
+		if (n_proto_key == ETH_P_ALL) {
+			n_proto_key = 0;
+			n_proto_mask = 0;
+		}
+		filter->n_proto = n_proto_key & n_proto_mask;
+		filter->ip_proto = match.key->ip_proto;
 	}
-}
 
-/**
- * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
- * @pf: board private structure
- **/
-u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
-{
-	return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
-}
+	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+		struct flow_match_eth_addrs match;
 
-/* We can see up to 256 filter programming desc in transit if the filters are
- * being applied really fast; before we see the first
- * filter miss error on Rx queue 0. Accumulating enough error messages before
- * reacting will make sure we don't cause flush too often.
- */
-#define I40E_MAX_FD_PROGRAM_ERROR 256
+		flow_rule_match_eth_addrs(rule, &match);
 
-/**
- * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
- * @pf: board private structure
- **/
-static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
-{
+		/* use is_broadcast and is_zero to check for all 0xf or 0 */
+		if (!is_zero_ether_addr(match.mask->dst)) {
+			if (is_broadcast_ether_addr(match.mask->dst)) {
+				field_flags |= I40E_CLOUD_FIELD_OMAC;
+			} else {
+				dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
+					match.mask->dst);
+				return I40E_ERR_CONFIG;
+			}
+		}
 
-	/* if interface is down do nothing */
-	if (test_bit(__I40E_DOWN, pf->state))
-		return;
+		if (!is_zero_ether_addr(match.mask->src)) {
+			if (is_broadcast_ether_addr(match.mask->src)) {
+				field_flags |= I40E_CLOUD_FIELD_IMAC;
+			} else {
+				dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
+					match.mask->src);
+				return I40E_ERR_CONFIG;
+			}
+		}
+		ether_addr_copy(filter->dst_mac, match.key->dst);
+		ether_addr_copy(filter->src_mac, match.key->src);
+	}
 
-	if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
-		i40e_fdir_flush_and_replay(pf);
+#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS
+	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+		struct flow_match_vlan match;
 
-	i40e_fdir_check_and_reenable(pf);
+		flow_rule_match_vlan(rule, &match);
+		if (match.mask->vlan_id) {
+			if (match.mask->vlan_id == VLAN_VID_MASK) {
+				field_flags |= I40E_CLOUD_FIELD_IVLAN;
 
-}
+			} else {
+				dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
+					match.mask->vlan_id);
+				return I40E_ERR_CONFIG;
+			}
+		}
 
-/**
- * i40e_vsi_link_event - notify VSI of a link event
- * @vsi: vsi to be notified
- * @link_up: link up or down
- **/
-static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
-{
-	if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
-		return;
+		filter->vlan_id = cpu_to_be16(match.key->vlan_id);
+	}
+#endif /* !HAVE_TC_FLOWER_VLAN_IN_TAGS */
 
-	switch (vsi->type) {
-	case I40E_VSI_MAIN:
-		if (!vsi->netdev || !vsi->netdev_registered)
-			break;
+	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+		struct flow_match_control match;
 
-		if (link_up) {
-			netif_carrier_on(vsi->netdev);
-			netif_tx_wake_all_queues(vsi->netdev);
-		} else {
-			netif_carrier_off(vsi->netdev);
-			netif_tx_stop_all_queues(vsi->netdev);
+		flow_rule_match_control(rule, &match);
+		addr_type = match.key->addr_type;
+	}
+
+	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+		struct flow_match_ipv4_addrs match;
+
+		flow_rule_match_ipv4_addrs(rule, &match);
+		if (match.mask->dst) {
+			if (match.mask->dst == cpu_to_be32(0xffffffff)) {
+				field_flags |= I40E_CLOUD_FIELD_IIP;
+			} else {
+				dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
+					&match.mask->dst);
+				return I40E_ERR_CONFIG;
+			}
 		}
-		break;
 
-	case I40E_VSI_SRIOV:
-	case I40E_VSI_VMDQ2:
-	case I40E_VSI_CTRL:
-	case I40E_VSI_IWARP:
-	case I40E_VSI_MIRROR:
-	default:
-		/* there is no notification for other VSIs */
-		break;
+		if (match.mask->src) {
+			if (match.mask->src == cpu_to_be32(0xffffffff)) {
+				field_flags |= I40E_CLOUD_FIELD_IIP;
+			} else {
+				dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
+					&match.mask->src);
+				return I40E_ERR_CONFIG;
+			}
+		}
+
+		if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
+			dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
+			return I40E_ERR_CONFIG;
+		}
+		filter->dst_ipv4 = match.key->dst;
+		filter->src_ipv4 = match.key->src;
+	}
+
+	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+		struct flow_match_ipv6_addrs match;
+
+		flow_rule_match_ipv6_addrs(rule, &match);
+
+		/* src and dest IPV6 address should not be LOOPBACK
+		 * (0:0:0:0:0:0:0:1), which can be represented as ::1
+		 */
+		if (ipv6_addr_loopback(&match.key->dst) ||
+		    ipv6_addr_loopback(&match.key->src)) {
+			dev_err(&pf->pdev->dev,
+				"Bad ipv6, addr is LOOPBACK\n");
+			return I40E_ERR_CONFIG;
+		}
+		if (!ipv6_addr_any(&match.mask->dst) ||
+		    !ipv6_addr_any(&match.mask->src))
+			field_flags |= I40E_CLOUD_FIELD_IIP;
+
+		memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
+		       sizeof(filter->src_ipv6));
+		memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
+		       sizeof(filter->dst_ipv6));
+	}
+
+	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+		struct flow_match_ports match;
+
+		flow_rule_match_ports(rule, &match);
+		if (match.mask->src) {
+			if (match.mask->src == cpu_to_be16(0xffff)) {
+				field_flags |= I40E_CLOUD_FIELD_IIP;
+			} else {
+				dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
+					be16_to_cpu(match.mask->src));
+				return I40E_ERR_CONFIG;
+			}
+		}
+
+		if (match.mask->dst) {
+			if (match.mask->dst == cpu_to_be16(0xffff)) {
+				field_flags |= I40E_CLOUD_FIELD_IIP;
+			} else {
+				dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
+					be16_to_cpu(match.mask->dst));
+				return I40E_ERR_CONFIG;
+			}
+		}
+
+		filter->dst_port = match.key->dst;
+		filter->src_port = match.key->src;
+
+		switch (filter->ip_proto) {
+		case IPPROTO_TCP:
+		case IPPROTO_UDP:
+			break;
+		default:
+			dev_err(&pf->pdev->dev,
+				"Only UDP and TCP transport are supported\n");
+			return -EINVAL;
+		}
 	}
+	filter->flags = field_flags;
+	return 0;
 }
 
 /**
- * i40e_veb_link_event - notify elements on the veb of a link event
- * @veb: veb to be notified
- * @link_up: link up or down
+ * i40e_handle_tclass: Forward to a traffic class on the device
+ * @vsi: Pointer to VSI
+ * @tc: traffic class index on the device
+ * @filter: Pointer to cloud filter structure
+ *
  **/
-static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
+static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
+			      struct i40e_cloud_filter *filter)
 {
-	struct i40e_pf *pf;
-	int i;
-
-	if (!veb || !veb->pf)
-		return;
-	pf = veb->pf;
-
-	/* depth first... */
-	for (i = 0; i < I40E_MAX_VEB; i++)
-		if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
-			i40e_veb_link_event(pf->veb[i], link_up);
+	struct i40e_channel *ch, *ch_tmp;
 
-	/* ... now the local VSIs */
-	for (i = 0; i < pf->num_alloc_vsi; i++)
-		if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
-			i40e_vsi_link_event(pf->vsi[i], link_up);
+	/* direct to a traffic class on the same device */
+	if (tc == 0) {
+		filter->seid = vsi->seid;
+		return 0;
+	} else if (vsi->tc_config.enabled_tc & BIT(tc)) {
+		if (!filter->dst_port) {
+			dev_err(&vsi->back->pdev->dev,
+				"Specify destination port to direct to traffic class that is not default\n");
+			return -EINVAL;
+		}
+		if (list_empty(&vsi->ch_list))
+			return -EINVAL;
+		list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
+					 list) {
+			if (ch->seid == vsi->tc_seid_map[tc])
+				filter->seid = ch->seid;
+		}
+		return 0;
+	}
+	dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
+	return -EINVAL;
 }
 
 /**
- * i40e_link_event - Update netif_carrier status
- * @pf: board private structure
+ * i40e_configure_clsflower - Configure tc flower filters
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to struct flow_cls_offload
+ *
  **/
-static void i40e_link_event(struct i40e_pf *pf)
+static int i40e_configure_clsflower(struct i40e_vsi *vsi,
+				    struct flow_cls_offload *cls_flower)
 {
-	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
-	u8 new_link_speed, old_link_speed;
-	i40e_status status;
-	bool new_link, old_link;
-
-	/* save off old link status information */
-	pf->hw.phy.link_info_old = pf->hw.phy.link_info;
+	int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
+	struct i40e_cloud_filter *filter = NULL;
+	struct i40e_pf *pf = vsi->back;
+	int err = 0;
 
-	/* set this to force the get_link_status call to refresh state */
-	pf->hw.phy.get_link_info = true;
+	if (tc < 0) {
+		dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
+		return -EINVAL;
+	}
 
-	old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
+	if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+	    test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
+		return -EBUSY;
 
-	status = i40e_get_link_status(&pf->hw, &new_link);
+	if (pf->fdir_pf_active_filters ||
+	    (!hlist_empty(&pf->fdir_filter_list))) {
+		dev_err(&vsi->back->pdev->dev,
+			"Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
+		return -EOPNOTSUPP;
+	}
 
-	/* On success, disable temp link polling */
-	if (status == I40E_SUCCESS) {
-		if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)
-			pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING;
-	} else {
-		/* Enable link polling temporarily until i40e_get_link_status
-		 * returns I40E_SUCCESS
-		 */
-		pf->flags |= I40E_FLAG_TEMP_LINK_POLLING;
-		dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
-			status);
-		return;
+	if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
+		dev_err(&vsi->back->pdev->dev,
+			"Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
+		vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+		vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
 	}
 
-	old_link_speed = pf->hw.phy.link_info_old.link_speed;
-	new_link_speed = pf->hw.phy.link_info.link_speed;
+	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+	if (!filter)
+		return -ENOMEM;
 
-	if (new_link == old_link &&
-	    new_link_speed == old_link_speed &&
-	    (test_bit(__I40E_VSI_DOWN, vsi->state) ||
-	     new_link == netif_carrier_ok(vsi->netdev)))
-		return;
+	filter->cookie = cls_flower->cookie;
 
-	if (!test_bit(__I40E_VSI_DOWN, vsi->state))
-		i40e_print_link_message(vsi, new_link);
+	err = i40e_parse_cls_flower(vsi, cls_flower, filter);
+	if (err < 0)
+		goto err;
 
-	/* Notify the base of the switch tree connected to
-	 * the link.  Floating VEBs are not notified.
-	 */
-	if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
-		i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
+	err = i40e_handle_tclass(vsi, tc, filter);
+	if (err < 0)
+		goto err;
+
+	/* Add cloud filter */
+	if (filter->dst_port)
+		err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
 	else
-		i40e_vsi_link_event(vsi, new_link);
+		err = i40e_add_del_cloud_filter(vsi, filter, true);
 
-	if (pf->vf)
-		i40e_vc_notify_link_state(pf);
+	if (err) {
+		dev_err(&pf->pdev->dev,
+			"Failed to add cloud filter, err %s\n",
+			i40e_stat_str(&pf->hw, err));
+		goto err;
+	}
 
-	if (pf->flags & I40E_FLAG_PTP)
-		i40e_ptp_set_increment(pf);
+	/* add filter to the ordered list */
+	INIT_HLIST_NODE(&filter->cloud_node);
+
+	hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
+
+	pf->num_cloud_filters++;
+
+	return err;
+err:
+	kfree(filter);
+	return err;
 }
 
 /**
- * i40e_watchdog_subtask - periodic checks not using event driven response
- * @pf: board private structure
+ * i40e_find_cloud_filter - Find the could filter in the list
+ * @vsi: Pointer to VSI
+ * @cookie: filter specific cookie
+ *
  **/
-static void i40e_watchdog_subtask(struct i40e_pf *pf)
+static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
+							unsigned long *cookie)
 {
-	int i;
+	struct i40e_cloud_filter *filter = NULL;
+	struct hlist_node *node2;
 
-	/* if interface is down do nothing */
-	if (test_bit(__I40E_DOWN, pf->state) ||
-	    test_bit(__I40E_CONFIG_BUSY, pf->state))
-		return;
+	hlist_for_each_entry_safe(filter, node2,
+				  &vsi->back->cloud_filter_list, cloud_node)
+		if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
+			return filter;
+	return NULL;
+}
 
-	/* make sure we don't do these things too often */
-	if (time_before(jiffies, (pf->service_timer_previous +
-				  pf->service_timer_period)))
-		return;
-	pf->service_timer_previous = jiffies;
+/**
+ * i40e_delete_clsflower - Remove tc flower filters
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to struct flow_cls_offload
+ *
+ **/
+static int i40e_delete_clsflower(struct i40e_vsi *vsi,
+				 struct flow_cls_offload *cls_flower)
+{
+	struct i40e_cloud_filter *filter = NULL;
+	struct i40e_pf *pf = vsi->back;
+	int err = 0;
 
-	if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
-	    (pf->flags & I40E_FLAG_TEMP_LINK_POLLING))
-		i40e_link_event(pf);
+	filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
 
-	/* Update the stats for active netdevs so the network stack
-	 * can look at updated numbers whenever it cares to
-	 */
-	for (i = 0; i < pf->num_alloc_vsi; i++)
-		if (pf->vsi[i] && pf->vsi[i]->netdev)
-			i40e_update_stats(pf->vsi[i]);
+	if (!filter)
+		return -EINVAL;
 
-	if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
-		/* Update the stats for the active switching components */
-		for (i = 0; i < I40E_MAX_VEB; i++)
-			if (pf->veb[i])
-				i40e_update_veb_stats(pf->veb[i]);
+	hash_del(&filter->cloud_node);
+
+	if (filter->dst_port)
+		err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
+	else
+		err = i40e_add_del_cloud_filter(vsi, filter, false);
+
+	kfree(filter);
+	if (err) {
+		dev_err(&pf->pdev->dev,
+			"Failed to delete cloud filter, err %s\n",
+			i40e_stat_str(&pf->hw, err));
+		return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
 	}
 
-	i40e_ptp_rx_hang(pf);
-	i40e_ptp_tx_hang(pf);
+	pf->num_cloud_filters--;
+	if (!pf->num_cloud_filters)
+		if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
+		    !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
+			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+			pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
+			pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+		}
+	return 0;
 }
 
 /**
- * i40e_reset_subtask - Set up for resetting the device and driver
- * @pf: board private structure
+ * i40e_setup_tc_cls_flower - flower classifier offloads
+ * @np: net device to configure
+ * @cls_flower: pointer to cls flower offload structure
  **/
-static void i40e_reset_subtask(struct i40e_pf *pf)
+static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
+				    struct flow_cls_offload *cls_flower)
 {
-	u32 reset_flags = 0;
+	struct i40e_vsi *vsi = np->vsi;
 
-	if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
-		reset_flags |= BIT(__I40E_REINIT_REQUESTED);
-		clear_bit(__I40E_REINIT_REQUESTED, pf->state);
-	}
-	if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
-		reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
-		clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
-	}
-	if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
-		reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
-		clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
-	}
-	if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
-		reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
-		clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
-	}
-	if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
-		reset_flags |= BIT(__I40E_DOWN_REQUESTED);
-		clear_bit(__I40E_DOWN_REQUESTED, pf->state);
-	}
+	if (cls_flower->common.chain_index)
+		return -EOPNOTSUPP;
 
-	/* If there's a recovery already waiting, it takes
-	 * precedence before starting a new reset sequence.
-	 */
-	if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
-		i40e_prep_for_reset(pf, false);
-		i40e_reset(pf);
-		i40e_rebuild(pf, false, false);
+	switch (cls_flower->command) {
+	case FLOW_CLS_REPLACE:
+		return i40e_configure_clsflower(vsi, cls_flower);
+	case FLOW_CLS_DESTROY:
+		return i40e_delete_clsflower(vsi, cls_flower);
+	case FLOW_CLS_STATS:
+		return -EOPNOTSUPP;
+	default:
+		return -EOPNOTSUPP;
 	}
+}
 
-	/* If we're already down or resetting, just bail */
-	if (reset_flags &&
-	    !test_bit(__I40E_DOWN, pf->state) &&
-	    !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
-		i40e_do_reset(pf, reset_flags, false);
+static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+				  void *cb_priv)
+{
+	struct i40e_netdev_priv *np = cb_priv;
+
+	switch (type) {
+	case TC_SETUP_CLSFLOWER:
+		return i40e_setup_tc_cls_flower(np, type_data);
+
+	default:
+		return -EOPNOTSUPP;
 	}
 }
 
-/**
- * i40e_handle_link_event - Handle link event
- * @pf: board private structure
- * @e: event info posted on ARQ
- **/
-static void i40e_handle_link_event(struct i40e_pf *pf,
-				   struct i40e_arq_event_info *e)
+static LIST_HEAD(i40e_block_cb_list);
+#endif
+
+#ifdef NETIF_F_HW_TC
+#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
+static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+			   void *type_data)
+#elif defined(HAVE_NDO_SETUP_TC_CHAIN_INDEX)
+static int __i40e_setup_tc(struct net_device *netdev, u32 handle,
+			   u32 chain_index, __be16 proto,
+			   struct tc_to_netdev *tc)
+#else
+static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+			   struct tc_to_netdev *tc)
+#endif
 {
-	struct i40e_aqc_get_link_status *status =
-		(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
+#ifdef __TC_MQPRIO_MODE_MAX
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+#endif
+#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
+#ifndef __TC_MQPRIO_MODE_MAX
+	struct tc_mqprio_qopt *mqprio = type_data;
+#endif
+#else
+#ifdef TC_MQPRIO_HW_OFFLOAD_MAX
+	struct tc_mqprio_qopt *mqprio = tc->mqprio;
+#endif /* TC_MQPRIO_HW_OFFLOAD_MAX*/
+	unsigned int type = tc->type;
+#endif /* HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV */
+#ifdef __TC_MQPRIO_MODE_MAX
+	switch (type) {
+	case TC_SETUP_QDISC_MQPRIO:
+		return i40e_setup_tc(netdev, type_data);
+	case TC_SETUP_BLOCK:
+		return flow_block_cb_setup_simple(type_data,
+						  &i40e_block_cb_list,
+						  i40e_setup_tc_block_cb,
+						  np, np, true);
+	default:
+		return -EOPNOTSUPP;
+	}
+#else
+	if (type != TC_SETUP_QDISC_MQPRIO)
+		return -EINVAL;
 
-	/* Do a new status request to re-enable LSE reporting
-	 * and load new status information into the hw struct
-	 * This completely ignores any state information
-	 * in the ARQ event info, instead choosing to always
-	 * issue the AQ update link status command.
-	 */
-	i40e_link_event(pf);
+#ifdef TC_MQPRIO_HW_OFFLOAD_MAX
+	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
 
-	/* check for unqualified module, if link is down */
-	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
-	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
-	    (!(status->link_info & I40E_AQ_LINK_UP)))
-		dev_err(&pf->pdev->dev,
-			"The driver failed to link because an unqualified module was detected.\n");
+	return i40e_setup_tc(netdev, mqprio->num_tc);
+#else
+	return i40e_setup_tc(netdev, tc->tc);
+#endif /* TC_MQPRIO_HW_OFFLOAD_MAX */
+#endif /* __TC_MQPRIO_MODE_MAX */
 }
+#endif /* NETIF_F_HW_TC */
+#endif /* HAVE_SETUP_TC */
 
 /**
- * i40e_clean_adminq_subtask - Clean the AdminQ rings
- * @pf: board private structure
+ * i40e_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the netdev watchdog subtask is
+ * enabled, and the stack is notified that the interface is ready.
+ *
+ * Returns 0 on success, negative value on failure
  **/
-static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
+int i40e_open(struct net_device *netdev)
 {
-	struct i40e_arq_event_info event;
-	struct i40e_hw *hw = &pf->hw;
-	u16 pending, i = 0;
-	i40e_status ret;
-	u16 opcode;
-	u32 oldval;
-	u32 val;
-
-	/* Do not run clean AQ when PF reset fails */
-	if (test_bit(__I40E_RESET_FAILED, pf->state))
-		return;
-
-	/* check for error indications */
-	val = rd32(&pf->hw, pf->hw.aq.arq.len);
-	oldval = val;
-	if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
-		if (hw->debug_mask & I40E_DEBUG_AQ)
-			dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
-		val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
-	}
-	if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
-		if (hw->debug_mask & I40E_DEBUG_AQ)
-			dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
-		val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
-		pf->arq_overflows++;
-	}
-	if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
-		if (hw->debug_mask & I40E_DEBUG_AQ)
-			dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
-		val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
-	}
-	if (oldval != val)
-		wr32(&pf->hw, pf->hw.aq.arq.len, val);
-
-	val = rd32(&pf->hw, pf->hw.aq.asq.len);
-	oldval = val;
-	if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
-		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
-			dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
-		val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
-	}
-	if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
-		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
-			dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
-		val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
-	}
-	if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
-		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
-			dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
-		val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
-	}
-	if (oldval != val)
-		wr32(&pf->hw, pf->hw.aq.asq.len, val);
-
-	event.buf_len = I40E_MAX_AQ_BUF_SIZE;
-	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
-	if (!event.msg_buf)
-		return;
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	int err;
 
-	do {
-		ret = i40e_clean_arq_element(hw, &event, &pending);
-		if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
-			break;
-		else if (ret) {
-			dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
-			break;
-		}
+	/* disallow open during test or if eeprom is broken */
+	if (test_bit(__I40E_TESTING, pf->state) ||
+	    test_bit(__I40E_BAD_EEPROM, pf->state))
+		return -EBUSY;
 
-		opcode = le16_to_cpu(event.desc.opcode);
-		switch (opcode) {
+	netif_carrier_off(netdev);
 
-		case i40e_aqc_opc_get_link_status:
-			i40e_handle_link_event(pf, &event);
-			break;
-		case i40e_aqc_opc_send_msg_to_pf:
-			ret = i40e_vc_process_vf_msg(pf,
-					le16_to_cpu(event.desc.retval),
-					le32_to_cpu(event.desc.cookie_high),
-					le32_to_cpu(event.desc.cookie_low),
-					event.msg_buf,
-					event.msg_len);
-			break;
-		case i40e_aqc_opc_lldp_update_mib:
-			dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
-#ifdef CONFIG_I40E_DCB
-			rtnl_lock();
-			ret = i40e_handle_lldp_event(pf, &event);
-			rtnl_unlock();
-#endif /* CONFIG_I40E_DCB */
-			break;
-		case i40e_aqc_opc_event_lan_overflow:
-			dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
-			i40e_handle_lan_overflow_event(pf, &event);
-			break;
-		case i40e_aqc_opc_send_msg_to_peer:
-			dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
-			break;
-		case i40e_aqc_opc_nvm_erase:
-		case i40e_aqc_opc_nvm_update:
-		case i40e_aqc_opc_oem_post_update:
-			i40e_debug(&pf->hw, I40E_DEBUG_NVM,
-				   "ARQ NVM operation 0x%04x completed\n",
-				   opcode);
-			break;
-		default:
-			dev_info(&pf->pdev->dev,
-				 "ARQ: Unknown event 0x%04x ignored\n",
-				 opcode);
-			break;
-		}
-	} while (i++ < pf->adminq_work_limit);
+	if (i40e_force_link_state(pf, true))
+		return -EAGAIN;
 
-	if (i < pf->adminq_work_limit)
-		clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
+	err = i40e_vsi_open(vsi);
+	if (err)
+		return err;
+	/* configure global TSO hardware offload settings */
+	wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
+						       TCP_FLAG_FIN) >> 16);
+	wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
+						       TCP_FLAG_FIN |
+						       TCP_FLAG_CWR) >> 16);
+	wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
 
-	/* re-enable Admin queue interrupt cause */
-	val = rd32(hw, I40E_PFINT_ICR0_ENA);
-	val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
-	wr32(hw, I40E_PFINT_ICR0_ENA, val);
-	i40e_flush(hw);
+#ifdef HAVE_VXLAN_RX_OFFLOAD
+#if IS_ENABLED(CONFIG_VXLAN)
+	vxlan_get_rx_port(netdev);
+#endif
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+#ifdef HAVE_GENEVE_RX_OFFLOAD
+#if IS_ENABLED(CONFIG_GENEVE)
+	if (pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE)
+		geneve_get_rx_port(netdev);
+#endif
+#endif /* HAVE_GENEVE_RX_OFFLOAD */
+#ifdef HAVE_UDP_ENC_RX_OFFLOAD
+	udp_tunnel_get_rx_info(netdev);
+#endif /* HAVE_UDP_ENC_RX_OFFLOAD */
 
-	kfree(event.msg_buf);
+	return 0;
 }
 
 /**
- * i40e_verify_eeprom - make sure eeprom is good to use
- * @pf: board private structure
+ * i40e_vsi_open -
+ * @vsi: the VSI to open
+ *
+ * Finish initialization of the VSI.
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * Note: expects to be called while under rtnl_lock()
  **/
-static void i40e_verify_eeprom(struct i40e_pf *pf)
+int i40e_vsi_open(struct i40e_vsi *vsi)
 {
+	struct i40e_pf *pf = vsi->back;
+	char int_name[I40E_INT_NAME_STR_LEN];
 	int err;
+	int i;
+	u8 enabled_tc = 0;
 
-	err = i40e_diag_eeprom_test(&pf->hw);
-	if (err) {
-		/* retry in case of garbage read */
-		err = i40e_diag_eeprom_test(&pf->hw);
-		if (err) {
-			dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
-				 err);
-			set_bit(__I40E_BAD_EEPROM, pf->state);
-		}
-	}
+	/* allocate descriptors */
+	err = i40e_vsi_setup_tx_resources(vsi);
+	if (err)
+		goto err_setup_tx;
+	err = i40e_vsi_setup_rx_resources(vsi);
+	if (err)
+		goto err_setup_rx;
 
-	if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
-		dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
-		clear_bit(__I40E_BAD_EEPROM, pf->state);
-	}
-}
+	err = i40e_vsi_configure(vsi);
+	if (err)
+		goto err_setup_rx;
 
-/**
- * i40e_enable_pf_switch_lb
- * @pf: pointer to the PF structure
- *
- * enable switch loop back or die - no point in a return value
+	if (vsi->netdev) {
+		snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
+			 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
+		err = i40e_vsi_request_irq(vsi, int_name);
+		if (err)
+			goto err_setup_rx;
+
+		/* If real_num_tx_queues is changed the tc mapping may no
+		 * longer be valid. Run tc reset function with new value
+		 * of queues.
+		 */
+#ifdef __TC_MQPRIO_MODE_MAX
+		for (i = 0; i < vsi->mqprio_qopt.qopt.num_tc; i++)
+#else
+		for (i = 0; i < vsi->tc_config.numtc; i++)
+#endif
+			enabled_tc |= BIT(i);
+		if (!enabled_tc)
+			netdev_reset_tc(vsi->netdev);
+
+		/* Notify the stack of the actual queue counts. */
+		err = netif_set_real_num_tx_queues(vsi->netdev,
+						   vsi->num_queue_pairs);
+		if (err)
+			goto err_set_queues;
+
+		/* When reducing the number of Tx queues, any pre-existing
+		 * skbuffs might target a now removed queue. Older versions of
+		 * the Linux kernel do not check for this, and it can result
+		 * in a kernel panic. Avoid this by flushing all skbs now, so
+		 * that we avoid attempting to transmit one that has an
+		 * invalid queue mapping.
+		 */
+		qdisc_reset_all_tx(vsi->netdev);
+
+		err = netif_set_real_num_rx_queues(vsi->netdev,
+						   vsi->num_queue_pairs);
+		if (err)
+			goto err_set_queues;
+
+	} else if (vsi->type == I40E_VSI_FDIR) {
+		snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
+			 dev_driver_string(&pf->pdev->dev),
+			 dev_name(&pf->pdev->dev));
+		err = i40e_vsi_request_irq(vsi, int_name);
+
+	} else {
+		err = -EINVAL;
+		goto err_setup_rx;
+	}
+
+	err = i40e_up_complete(vsi);
+	if (err)
+		goto err_up_complete;
+
+	return 0;
+
+err_up_complete:
+	i40e_down(vsi);
+err_set_queues:
+	i40e_vsi_free_irq(vsi);
+err_setup_rx:
+	i40e_vsi_free_rx_resources(vsi);
+err_setup_tx:
+	i40e_vsi_free_tx_resources(vsi);
+	if (vsi == pf->vsi[pf->lan_vsi])
+		i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
+
+	return err;
+}
+
+/**
+ * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
+ * @pf: Pointer to PF
+ *
+ * This function destroys the hlist where all the Flow Director
+ * filters were saved.
  **/
-static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+static void i40e_fdir_filter_exit(struct i40e_pf *pf)
 {
-	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
-	struct i40e_vsi_context ctxt;
-	int ret;
+	struct i40e_fdir_filter *filter;
+	struct i40e_flex_pit *pit_entry, *tmp;
+	struct hlist_node *node2;
 
-	ctxt.seid = pf->main_vsi_seid;
-	ctxt.pf_num = pf->hw.pf_id;
-	ctxt.vf_num = 0;
-	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-	if (ret) {
-		dev_info(&pf->pdev->dev,
-			 "couldn't get PF vsi config, err %s aq_err %s\n",
-			 i40e_stat_str(&pf->hw, ret),
-			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
-		return;
+	hlist_for_each_entry_safe(filter, node2,
+				  &pf->fdir_filter_list, fdir_node) {
+		hlist_del(&filter->fdir_node);
+		kfree(filter);
 	}
-	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
-	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-	ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 
-	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-	if (ret) {
-		dev_info(&pf->pdev->dev,
-			 "update vsi switch failed, err %s aq_err %s\n",
-			 i40e_stat_str(&pf->hw, ret),
-			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+	list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
+		list_del(&pit_entry->list);
+		kfree(pit_entry);
+	}
+	INIT_LIST_HEAD(&pf->l3_flex_pit_list);
+
+	list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
+		list_del(&pit_entry->list);
+		kfree(pit_entry);
 	}
+	INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+
+	pf->fdir_pf_active_filters = 0;
+	pf->fd_tcp4_filter_cnt = 0;
+	pf->fd_udp4_filter_cnt = 0;
+	pf->fd_sctp4_filter_cnt = 0;
+	pf->fd_ip4_filter_cnt = 0;
+
+	/* Reprogram the default input set for TCP/IPv4 */
+	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+				I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+	/* Reprogram the default input set for UDP/IPv4 */
+	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+				I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+	/* Reprogram the default input set for SCTP/IPv4 */
+	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+				I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+	/* Reprogram the default input set for Other/IPv4 */
+	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
+
+	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
 }
 
 /**
- * i40e_disable_pf_switch_lb
- * @pf: pointer to the PF structure
+ * i40e_cloud_filter_exit - Cleans up Cloud Filters
+ * @pf: Pointer to PF
  *
- * disable switch loop back or die - no point in a return value
+ * This function destroys the hlist which keeps all the Cloud Filters.
  **/
-static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+static void i40e_cloud_filter_exit(struct i40e_pf *pf)
 {
-	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
-	struct i40e_vsi_context ctxt;
-	int ret;
+	struct i40e_cloud_filter *cfilter;
+	struct hlist_node *node;
 
-	ctxt.seid = pf->main_vsi_seid;
-	ctxt.pf_num = pf->hw.pf_id;
-	ctxt.vf_num = 0;
-	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-	if (ret) {
-		dev_info(&pf->pdev->dev,
-			 "couldn't get PF vsi config, err %s aq_err %s\n",
-			 i40e_stat_str(&pf->hw, ret),
-			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
-		return;
+	hlist_for_each_entry_safe(cfilter, node,
+				  &pf->cloud_filter_list, cloud_node) {
+		hlist_del(&cfilter->cloud_node);
+		kfree(cfilter);
 	}
-	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
-	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-	ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+	pf->num_cloud_filters = 0;
 
-	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-	if (ret) {
-		dev_info(&pf->pdev->dev,
-			 "update vsi switch failed, err %s aq_err %s\n",
-			 i40e_stat_str(&pf->hw, ret),
-			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+	if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
+	    !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
+		pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+		pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
+		pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
 	}
 }
 
 /**
- * i40e_config_bridge_mode - Configure the HW bridge mode
- * @veb: pointer to the bridge instance
+ * i40e_close - Disables a network interface
+ * @netdev: network interface device structure
  *
- * Configure the loop back mode for the LAN VSI that is downlink to the
- * specified HW bridge instance. It is expected this function is called
- * when a new HW bridge is instantiated.
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the driver's control, but
+ * this netdev interface is disabled.
+ *
+ * Returns 0, this is not allowed to fail
  **/
-static void i40e_config_bridge_mode(struct i40e_veb *veb)
+int i40e_close(struct net_device *netdev)
 {
-	struct i40e_pf *pf = veb->pf;
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
 
-	if (pf->hw.debug_mask & I40E_DEBUG_LAN)
-		dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
-			 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
-	if (veb->bridge_mode & BRIDGE_MODE_VEPA)
-		i40e_disable_pf_switch_lb(pf);
-	else
-		i40e_enable_pf_switch_lb(pf);
+	i40e_vsi_close(vsi);
+
+	return 0;
 }
 
 /**
- * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
- * @veb: pointer to the VEB instance
+ * i40e_do_reset - Start a PF or Core Reset sequence
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
  *
- * This is a recursive function that first builds the attached VSIs then
- * recurses in to build the next layer of VEB.  We track the connections
- * through our own index numbers because the seid's from the HW could
- * change across the reset.
+ * The essential difference in resets is that the PF Reset
+ * doesn't clear the packet buffers, doesn't reset the PE
+ * firmware, and doesn't bother the other PFs on the chip.
  **/
-static int i40e_reconstitute_veb(struct i40e_veb *veb)
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
 {
-	struct i40e_vsi *ctl_vsi = NULL;
-	struct i40e_pf *pf = veb->pf;
-	int v, veb_idx;
-	int ret;
+	u32 val;
 
-	/* build VSI that owns this VEB, temporarily attached to base VEB */
-	for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
-		if (pf->vsi[v] &&
-		    pf->vsi[v]->veb_idx == veb->idx &&
-		    pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
-			ctl_vsi = pf->vsi[v];
-			break;
-		}
-	}
-	if (!ctl_vsi) {
-		dev_info(&pf->pdev->dev,
-			 "missing owner VSI for veb_idx %d\n", veb->idx);
-		ret = -ENOENT;
-		goto end_reconstitute;
-	}
-	if (ctl_vsi != pf->vsi[pf->lan_vsi])
-		ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
-	ret = i40e_add_vsi(ctl_vsi);
-	if (ret) {
-		dev_info(&pf->pdev->dev,
-			 "rebuild of veb_idx %d owner VSI failed: %d\n",
-			 veb->idx, ret);
-		goto end_reconstitute;
-	}
-	i40e_vsi_reset_stats(ctl_vsi);
+	WARN_ON(in_interrupt());
 
-	/* create the VEB in the switch and move the VSI onto the VEB */
-	ret = i40e_add_veb(veb, ctl_vsi);
-	if (ret)
-		goto end_reconstitute;
 
-	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
-		veb->bridge_mode = BRIDGE_MODE_VEB;
-	else
-		veb->bridge_mode = BRIDGE_MODE_VEPA;
-	i40e_config_bridge_mode(veb);
+	/* do the biggest reset indicated */
+	if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
 
-	/* create the remaining VSIs attached to this VEB */
-	for (v = 0; v < pf->num_alloc_vsi; v++) {
-		if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
-			continue;
+		/* Request a Global Reset
+		 *
+		 * This will start the chip's countdown to the actual full
+		 * chip reset event, and a warning interrupt to be sent
+		 * to all PFs, including the requestor.  Our handler
+		 * for the warning interrupt will deal with the shutdown
+		 * and recovery of the switch setup.
+		 */
+		dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
+		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+		val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
+		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
 
-		if (pf->vsi[v]->veb_idx == veb->idx) {
-			struct i40e_vsi *vsi = pf->vsi[v];
+	} else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
 
-			vsi->uplink_seid = veb->seid;
-			ret = i40e_add_vsi(vsi);
-			if (ret) {
-				dev_info(&pf->pdev->dev,
-					 "rebuild of vsi_idx %d failed: %d\n",
-					 v, ret);
-				goto end_reconstitute;
-			}
-			i40e_vsi_reset_stats(vsi);
-		}
-	}
+		/* Request a Core Reset
+		 *
+		 * Same as Global Reset, except does *not* include the MAC/PHY
+		 */
+		dev_dbg(&pf->pdev->dev, "CoreR requested\n");
+		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+		val |= I40E_GLGEN_RTRIG_CORER_MASK;
+		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+		i40e_flush(&pf->hw);
 
-	/* create any VEBs attached to this VEB - RECURSION */
-	for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
-		if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
-			pf->veb[veb_idx]->uplink_seid = veb->seid;
-			ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
-			if (ret)
-				break;
-		}
-	}
+	} else if (reset_flags & I40E_PF_RESET_FLAG) {
 
-end_reconstitute:
-	return ret;
-}
+		/* Request a PF Reset
+		 *
+		 * Resets only the PF-specific registers
+		 *
+		 * This goes directly to the tear-down and rebuild of
+		 * the switch, since we need to do all the recovery as
+		 * for the Core Reset.
+		 */
+		dev_dbg(&pf->pdev->dev, "PFR requested\n");
+		i40e_handle_reset_warning(pf, lock_acquired);
 
-/**
- * i40e_get_capabilities - get info about the HW
- * @pf: the PF struct
- **/
-static int i40e_get_capabilities(struct i40e_pf *pf)
-{
-	struct i40e_aqc_list_capabilities_element_resp *cap_buf;
-	u16 data_size;
-	int buf_len;
-	int err;
+		dev_info(&pf->pdev->dev,
+			 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
+			 "FW LLDP is disabled\n" :
+			 "FW LLDP is enabled\n");
 
-	buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
-	do {
-		cap_buf = kzalloc(buf_len, GFP_KERNEL);
-		if (!cap_buf)
-			return -ENOMEM;
+	} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
+		int v;
 
-		/* this loads the data into the hw struct for us */
-		err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
-					    &data_size,
-					    i40e_aqc_opc_list_func_capabilities,
-					    NULL);
-		/* data loaded, buffer no longer needed */
-		kfree(cap_buf);
+		/* Find the VSI(s) that requested a re-init */
+		dev_info(&pf->pdev->dev,
+			 "VSI reinit requested\n");
+		for (v = 0; v < pf->num_alloc_vsi; v++) {
+			struct i40e_vsi *vsi = pf->vsi[v];
 
-		if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
-			/* retry with a larger buffer */
-			buf_len = data_size;
-		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
-			dev_info(&pf->pdev->dev,
-				 "capability discovery failed, err %s aq_err %s\n",
-				 i40e_stat_str(&pf->hw, err),
-				 i40e_aq_str(&pf->hw,
-					     pf->hw.aq.asq_last_status));
-			return -ENODEV;
+			if (vsi != NULL &&
+			    test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
+					       vsi->state))
+				i40e_vsi_reinit_locked(pf->vsi[v]);
 		}
-	} while (err);
+	} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
+		int v;
 
-	if (pf->hw.debug_mask & I40E_DEBUG_USER)
-		dev_info(&pf->pdev->dev,
-			 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
-			 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
-			 pf->hw.func_caps.num_msix_vectors,
-			 pf->hw.func_caps.num_msix_vectors_vf,
-			 pf->hw.func_caps.fd_filters_guaranteed,
-			 pf->hw.func_caps.fd_filters_best_effort,
-			 pf->hw.func_caps.num_tx_qp,
-			 pf->hw.func_caps.num_vsis);
-
-#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
-		       + pf->hw.func_caps.num_vfs)
-	if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
+		/* Find the VSI(s) that needs to be brought down */
+		dev_info(&pf->pdev->dev, "VSI down requested\n");
+		for (v = 0; v < pf->num_alloc_vsi; v++) {
+			struct i40e_vsi *vsi = pf->vsi[v];
+
+			if (vsi != NULL &&
+			    test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
+					       vsi->state)) {
+				set_bit(__I40E_VSI_DOWN, vsi->state);
+				i40e_down(vsi);
+			}
+		}
+	} else {
 		dev_info(&pf->pdev->dev,
-			 "got num_vsis %d, setting num_vsis to %d\n",
-			 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
-		pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
+			 "bad reset request 0x%08x\n", reset_flags);
 	}
-
-	return 0;
 }
 
-static int i40e_vsi_clear(struct i40e_vsi *vsi);
+/**
+ * i40e_do_reset_safe - Protected reset path for userland calls.
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ *
+ **/
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
+{
+	rtnl_lock();
+	i40e_do_reset(pf, reset_flags, true);
+	rtnl_unlock();
+}
 
+#ifdef CONFIG_DCB
 /**
- * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
+ * i40e_dcb_need_reconfig - Check if DCB needs reconfig
  * @pf: board private structure
+ * @old_cfg: current DCB config
+ * @new_cfg: new DCB config
  **/
-static void i40e_fdir_sb_setup(struct i40e_pf *pf)
+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
+			    struct i40e_dcbx_config *old_cfg,
+			    struct i40e_dcbx_config *new_cfg)
 {
-	struct i40e_vsi *vsi;
+	bool need_reconfig = false;
 
-	/* quick workaround for an NVM issue that leaves a critical register
-	 * uninitialized
-	 */
-	if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
-		static const u32 hkey[] = {
-			0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
-			0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
-			0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
-			0x95b3a76d};
-		int i;
+	/* Check if ETS configuration has changed */
+	if (memcmp(&new_cfg->etscfg,
+		   &old_cfg->etscfg,
+		   sizeof(new_cfg->etscfg))) {
+		/* If Priority Table has changed reconfig is needed */
+		if (memcmp(&new_cfg->etscfg.prioritytable,
+			   &old_cfg->etscfg.prioritytable,
+			   sizeof(new_cfg->etscfg.prioritytable))) {
+			need_reconfig = true;
+			dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
+		}
 
-		for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
-			wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
-	}
+		if (memcmp(&new_cfg->etscfg.tcbwtable,
+			   &old_cfg->etscfg.tcbwtable,
+			   sizeof(new_cfg->etscfg.tcbwtable)))
+			dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
 
-	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
-		return;
+		if (memcmp(&new_cfg->etscfg.tsatable,
+			   &old_cfg->etscfg.tsatable,
+			   sizeof(new_cfg->etscfg.tsatable)))
+			dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
+	}
 
-	/* find existing VSI and see if it needs configuring */
-	vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
+	/* Check if PFC configuration has changed */
+	if (memcmp(&new_cfg->pfc,
+		   &old_cfg->pfc,
+		   sizeof(new_cfg->pfc))) {
+		need_reconfig = true;
+		dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
+	}
 
-	/* create a new VSI if none exists */
-	if (!vsi) {
-		vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
-				     pf->vsi[pf->lan_vsi]->seid, 0);
-		if (!vsi) {
-			dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
-			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
-			return;
-		}
+	/* Check if APP Table has changed */
+	if (memcmp(&new_cfg->app,
+		   &old_cfg->app,
+		   sizeof(new_cfg->app))) {
+		need_reconfig = true;
+		dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
 	}
 
-	i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
+	dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
+	return need_reconfig;
 }
 
 /**
- * i40e_fdir_teardown - release the Flow Director resources
+ * i40e_handle_lldp_event - Handle LLDP Change MIB event
  * @pf: board private structure
+ * @e: event info posted on ARQ
  **/
-static void i40e_fdir_teardown(struct i40e_pf *pf)
-{
-	struct i40e_vsi *vsi;
-
-	i40e_fdir_filter_exit(pf);
-	vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
-	if (vsi)
-		i40e_vsi_release(vsi);
-}
-
-/**
- * i40e_prep_for_reset - prep for the core to reset
- * @pf: board private structure
- * @lock_acquired: indicates whether or not the lock has been acquired
- * before this function was called.
- *
- * Close up the VFs and other things in prep for PF Reset.
-  **/
-static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
+static int i40e_handle_lldp_event(struct i40e_pf *pf,
+				  struct i40e_arq_event_info *e)
 {
+	struct i40e_aqc_lldp_get_mib *mib =
+		(struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
 	struct i40e_hw *hw = &pf->hw;
-	i40e_status ret = 0;
-	u32 v;
+	struct i40e_dcbx_config tmp_dcbx_cfg;
+	bool need_reconfig = false;
+	int ret = 0;
+	u8 type;
 
-	clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
-	if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
-		return;
-	if (i40e_check_asq_alive(&pf->hw))
-		i40e_vc_notify_reset(pf);
+	/* X710-T*L 2.5G and 5G speeds don't support DCB */
+	if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
+	    (hw->phy.link_info.link_speed &
+	     ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
+	     !(pf->flags & I40E_FLAG_DCB_CAPABLE))
+		/* let firmware decide if the DCB should be disabled */
+		pf->flags |= I40E_FLAG_DCB_CAPABLE;
 
-	dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
+	/* Not DCB capable or capability disabled */
+	if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+		return ret;
 
-	/* quiesce the VSIs and their queues that are not already DOWN */
-	/* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
-	if (!lock_acquired)
-		rtnl_lock();
-	i40e_pf_quiesce_all_vsi(pf);
-	if (!lock_acquired)
-		rtnl_unlock();
+	/* Ignore if event is not for Nearest Bridge */
+	type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
+		& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+	dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
+	if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
+		return ret;
 
-	for (v = 0; v < pf->num_alloc_vsi; v++) {
-		if (pf->vsi[v])
-			pf->vsi[v]->seid = 0;
+	/* Check MIB Type and return if event for Remote MIB update */
+	type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+	dev_dbg(&pf->pdev->dev,
+		"LLDP event mib type %s\n", type ? "remote" : "local");
+	if (type == I40E_AQ_LLDP_MIB_REMOTE) {
+		/* Update the remote cached instance and return */
+		ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+				I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+				&hw->remote_dcbx_config);
+		goto exit;
 	}
 
-	i40e_shutdown_adminq(&pf->hw);
+	/* Store the old configuration */
+	tmp_dcbx_cfg = hw->local_dcbx_config;
 
-	/* call shutdown HMC */
-	if (hw->hmc.hmc_obj) {
-		ret = i40e_shutdown_lan_hmc(hw);
-		if (ret)
-			dev_warn(&pf->pdev->dev,
-				 "shutdown_lan_hmc failed: %d\n", ret);
-	}
+	/* Reset the old DCBx configuration data */
+	memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
+	/* Get updated DCBX data from firmware */
+	ret = i40e_get_dcb_config(&pf->hw);
+	if (ret) {
+		/* X710-T*L 2.5G and 5G speeds don't support DCB */
+		if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
+		    (hw->phy.link_info.link_speed &
+		     (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
+			dev_warn(&pf->pdev->dev,
+				 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
+			pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+		} else {
+			dev_info(&pf->pdev->dev,
+				 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
+		}
+		goto exit;
+	}
+
+	/* No change detected in DCBX configs */
+	if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
+		    sizeof(tmp_dcbx_cfg))) {
+		dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
+		goto exit;
+	}
+
+	need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
+					       &hw->local_dcbx_config);
+
+#ifdef HAVE_DCBNL_IEEE
+	i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
+#endif /* HAVE_DCBNL_IEEE */
+
+	if (!need_reconfig)
+		goto exit;
+
+	/* Enable DCB tagging only when more than one TC */
+	if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
+		pf->flags |= I40E_FLAG_DCB_ENABLED;
+	else
+		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+
+	set_bit(__I40E_PORT_SUSPENDED, pf->state);
+	/* Reconfiguration needed quiesce all VSIs */
+	i40e_pf_quiesce_all_vsi(pf);
+
+	/* Changes in configuration update VEB/VSI */
+	i40e_dcb_reconfigure(pf);
+
+	ret = i40e_resume_port_tx(pf);
+
+	clear_bit(__I40E_PORT_SUSPENDED, pf->state);
+	/* In case of error no point in resuming VSIs */
+	if (ret)
+		goto exit;
+
+	/* Wait for the PF's queues to be disabled */
+	ret = i40e_pf_wait_queues_disabled(pf);
+	if (ret) {
+		/* Schedule PF reset to recover */
+		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+		i40e_service_event_schedule(pf);
+	} else {
+		i40e_pf_unquiesce_all_vsi(pf);
+	set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+	set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
+	}
+
+exit:
+	return ret;
 }
 
+#endif /* CONFIG_DCB */
 /**
- * i40e_send_version - update firmware with driver version
- * @pf: PF struct
- */
-static void i40e_send_version(struct i40e_pf *pf)
+ * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ *
+ * Handler for LAN Queue Overflow Event generated by the firmware for PF
+ * and VF queues
+ **/
+static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
+					   struct i40e_arq_event_info *e)
 {
-	struct i40e_driver_version dv;
+	struct i40e_aqc_lan_overflow *data =
+		(struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
+	u32 queue = le32_to_cpu(data->prtdcb_rupto);
+	u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_vf *vf;
+	u16 vf_id;
 
-	dv.major_version = DRV_VERSION_MAJOR;
-	dv.minor_version = DRV_VERSION_MINOR;
-	dv.build_version = DRV_VERSION_BUILD;
-	dv.subbuild_version = 0;
-	strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
-	i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+	dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
+		queue, qtx_ctl);
+
+	/* Queue belongs to VF, find the VF and issue VF reset */
+	if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
+	    >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
+		vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
+			 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
+		vf_id -= hw->func_caps.vf_base_id;
+		vf = &pf->vf[vf_id];
+		i40e_vc_notify_vf_reset(vf);
+		/* Allow VF to process pending reset notification */
+		msleep(20);
+		i40e_reset_vf(vf, false);
+	}
 }
 
 /**
- * i40e_get_oem_version - get OEM specific version information
- * @hw: pointer to the hardware structure
+ * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
+ * @pf: board private structure
  **/
-static void i40e_get_oem_version(struct i40e_hw *hw)
+u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
 {
-	u16 block_offset = 0xffff;
-	u16 block_length = 0;
-	u16 capabilities = 0;
-	u16 gen_snap = 0;
-	u16 release = 0;
-
-#define I40E_SR_NVM_OEM_VERSION_PTR		0x1B
-#define I40E_NVM_OEM_LENGTH_OFFSET		0x00
-#define I40E_NVM_OEM_CAPABILITIES_OFFSET	0x01
-#define I40E_NVM_OEM_GEN_OFFSET			0x02
-#define I40E_NVM_OEM_RELEASE_OFFSET		0x03
-#define I40E_NVM_OEM_CAPABILITIES_MASK		0x000F
-#define I40E_NVM_OEM_LENGTH			3
-
-	/* Check if pointer to OEM version block is valid. */
-	i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
-	if (block_offset == 0xffff)
-		return;
+	u32 val, fcnt_prog;
 
-	/* Check if OEM version block has correct length. */
-	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
-			   &block_length);
-	if (block_length < I40E_NVM_OEM_LENGTH)
-		return;
+	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
+	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
+	return fcnt_prog;
+}
 
-	/* Check if OEM version format is as expected. */
-	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
-			   &capabilities);
-	if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
-		return;
+/**
+ * i40e_get_current_fd_count - Get total FD filters programmed for this PF
+ * @pf: board private structure
+ **/
+u32 i40e_get_current_fd_count(struct i40e_pf *pf)
+{
+	u32 val, fcnt_prog;
 
-	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
-			   &gen_snap);
-	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
-			   &release);
-	hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
-	hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
+	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
+	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
+		    ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+		      I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+	return fcnt_prog;
 }
 
 /**
- * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
+ * i40e_get_global_fd_count - Get total FD filters programmed on device
  * @pf: board private structure
  **/
-static int i40e_reset(struct i40e_pf *pf)
+u32 i40e_get_global_fd_count(struct i40e_pf *pf)
 {
-	struct i40e_hw *hw = &pf->hw;
-	i40e_status ret;
+	u32 val, fcnt_prog;
 
-	ret = i40e_pf_reset(hw);
-	if (ret) {
-		dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
-		set_bit(__I40E_RESET_FAILED, pf->state);
-		clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
-	} else {
-		pf->pfr_count++;
-	}
-	return ret;
+	val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
+	fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
+		    ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
+		     I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
+	return fcnt_prog;
 }
 
 /**
- * i40e_rebuild - rebuild using a saved config
+ * i40e_reenable_fdir_sb - Restore FDir SB capability
  * @pf: board private structure
- * @reinit: if the Main VSI needs to re-initialized.
- * @lock_acquired: indicates whether or not the lock has been acquired
- * before this function was called.
  **/
-static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
 {
-	struct i40e_hw *hw = &pf->hw;
-	u8 set_fc_aq_fail = 0;
-	i40e_status ret;
-	u32 val;
-	int v;
+	if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
+		if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+		    (I40E_DEBUG_FD & pf->hw.debug_mask))
+			dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
+}
 
-	if (test_bit(__I40E_DOWN, pf->state))
-		goto clear_recovery;
-	dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
+/**
+ * i40e_reenable_fdir_atr - Restore FDir ATR capability
+ * @pf: board private structure
+ **/
+static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
+{
+	if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
+		/* ATR uses the same filtering logic as SB rules. It only
+		 * functions properly if the input set mask is at the default
+		 * settings. It is safe to restore the default input set
+		 * because there are no active TCPv4 filter rules.
+		 */
+		i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+					I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+					I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
 
-	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
-	ret = i40e_init_adminq(&pf->hw);
-	if (ret) {
-		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
-			 i40e_stat_str(&pf->hw, ret),
-			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
-		goto clear_recovery;
+		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+		    (I40E_DEBUG_FD & pf->hw.debug_mask))
+			dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
 	}
-	i40e_get_oem_version(&pf->hw);
-
-	/* re-verify the eeprom if we just had an EMP reset */
-	if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
-		i40e_verify_eeprom(pf);
+}
 
-	i40e_clear_pxe_mode(hw);
-	ret = i40e_get_capabilities(pf);
-	if (ret)
-		goto end_core_reset;
+/**
+ * i40e_delete_invalid_filter - Delete an invalid FDIR filter
+ * @pf: board private structure
+ * @filter: FDir filter to remove
+ */
+static void i40e_delete_invalid_filter(struct i40e_pf *pf,
+				       struct i40e_fdir_filter *filter)
+{
+	/* Update counters */
+	pf->fdir_pf_active_filters--;
+	pf->fd_inv = 0;
 
-	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
-				hw->func_caps.num_rx_qp, 0, 0);
-	if (ret) {
-		dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
-		goto end_core_reset;
-	}
-	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
-	if (ret) {
-		dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
-		goto end_core_reset;
+	switch (filter->flow_type) {
+	case TCP_V4_FLOW:
+		pf->fd_tcp4_filter_cnt--;
+		break;
+	case UDP_V4_FLOW:
+		pf->fd_udp4_filter_cnt--;
+		break;
+	case SCTP_V4_FLOW:
+		pf->fd_sctp4_filter_cnt--;
+		break;
+	case IP_USER_FLOW:
+		switch (filter->ip4_proto) {
+		case IPPROTO_TCP:
+			pf->fd_tcp4_filter_cnt--;
+			break;
+		case IPPROTO_UDP:
+			pf->fd_udp4_filter_cnt--;
+			break;
+		case IPPROTO_SCTP:
+			pf->fd_sctp4_filter_cnt--;
+			break;
+		case IPPROTO_IP:
+			pf->fd_ip4_filter_cnt--;
+			break;
+		}
+		break;
 	}
 
-#ifdef CONFIG_I40E_DCB
-	ret = i40e_init_pf_dcb(pf);
-	if (ret) {
-		dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
-		pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
-		/* Continue without DCB enabled */
-	}
-#endif /* CONFIG_I40E_DCB */
-	/* do basic switch setup */
-	if (!lock_acquired)
-		rtnl_lock();
-	ret = i40e_setup_pf_switch(pf, reinit);
-	if (ret)
-		goto end_unlock;
+	/* Remove the filter from the list and free memory */
+	hlist_del(&filter->fdir_node);
+	kfree(filter);
+}
 
-	/* The driver only wants link up/down and module qualification
-	 * reports from firmware.  Note the negative logic.
-	 */
-	ret = i40e_aq_set_phy_int_mask(&pf->hw,
-				       ~(I40E_AQ_EVENT_LINK_UPDOWN |
-					 I40E_AQ_EVENT_MEDIA_NA |
-					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
-	if (ret)
-		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
-			 i40e_stat_str(&pf->hw, ret),
-			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
-
-	/* make sure our flow control settings are restored */
-	ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
-	if (ret)
-		dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
-			i40e_stat_str(&pf->hw, ret),
-			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
-
-	/* Rebuild the VSIs and VEBs that existed before reset.
-	 * They are still in our local switch element arrays, so only
-	 * need to rebuild the switch model in the HW.
-	 *
-	 * If there were VEBs but the reconstitution failed, we'll try
-	 * try to recover minimal use by getting the basic PF VSI working.
-	 */
-	if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
-		dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
-		/* find the one VEB connected to the MAC, and find orphans */
-		for (v = 0; v < I40E_MAX_VEB; v++) {
-			if (!pf->veb[v])
-				continue;
-
-			if (pf->veb[v]->uplink_seid == pf->mac_seid ||
-			    pf->veb[v]->uplink_seid == 0) {
-				ret = i40e_reconstitute_veb(pf->veb[v]);
-
-				if (!ret)
-					continue;
+/**
+ * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
+ * @pf: board private structure
+ **/
+void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
+{
+	struct i40e_fdir_filter *filter;
+	u32 fcnt_prog, fcnt_avail;
+	struct hlist_node *node;
 
-				/* If Main VEB failed, we're in deep doodoo,
-				 * so give up rebuilding the switch and set up
-				 * for minimal rebuild of PF VSI.
-				 * If orphan failed, we'll report the error
-				 * but try to keep going.
-				 */
-				if (pf->veb[v]->uplink_seid == pf->mac_seid) {
-					dev_info(&pf->pdev->dev,
-						 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
-						 ret);
-					pf->vsi[pf->lan_vsi]->uplink_seid
-								= pf->mac_seid;
-					break;
-				} else if (pf->veb[v]->uplink_seid == 0) {
-					dev_info(&pf->pdev->dev,
-						 "rebuild of orphan VEB failed: %d\n",
-						 ret);
-				}
-			}
-		}
-	}
+	if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
+		return;
 
-	if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
-		dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
-		/* no VEB, so rebuild only the Main VSI */
-		ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
-		if (ret) {
-			dev_info(&pf->pdev->dev,
-				 "rebuild of Main VSI failed: %d\n", ret);
-			goto end_unlock;
-		}
-	}
+	/* Check if we have enough room to re-enable FDir SB capability. */
+	fcnt_prog = i40e_get_global_fd_count(pf);
+	fcnt_avail = pf->fdir_pf_filter_count;
+	if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
+	    (pf->fd_add_err == 0) ||
+	    (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
+		i40e_reenable_fdir_sb(pf);
 
-	/* Reconfigure hardware for allowing smaller MSS in the case
-	 * of TSO, so that we avoid the MDD being fired and causing
-	 * a reset in the case of small MSS+TSO.
+	/* We should wait for even more space before re-enabling ATR.
+	 * Additionally, we cannot enable ATR as long as we still have TCP SB
+	 * rules active.
 	 */
-#define I40E_REG_MSS          0x000E64DC
-#define I40E_REG_MSS_MIN_MASK 0x3FF0000
-#define I40E_64BYTE_MSS       0x400000
-	val = rd32(hw, I40E_REG_MSS);
-	if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
-		val &= ~I40E_REG_MSS_MIN_MASK;
-		val |= I40E_64BYTE_MSS;
-		wr32(hw, I40E_REG_MSS, val);
-	}
+	if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
+	    (pf->fd_tcp4_filter_cnt == 0))
+		i40e_reenable_fdir_atr(pf);
 
-	if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
-		msleep(75);
-		ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
-		if (ret)
-			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
-				 i40e_stat_str(&pf->hw, ret),
-				 i40e_aq_str(&pf->hw,
-					     pf->hw.aq.asq_last_status));
+	/* if hw had a problem adding a filter, delete it */
+	if (pf->fd_inv > 0) {
+		hlist_for_each_entry_safe(filter, node,
+					  &pf->fdir_filter_list, fdir_node)
+			if (filter->fd_id == pf->fd_inv)
+				i40e_delete_invalid_filter(pf, filter);
 	}
-	/* reinit the misc interrupt */
-	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
-		ret = i40e_setup_misc_vector(pf);
-
-	/* Add a filter to drop all Flow control frames from any VSI from being
-	 * transmitted. By doing so we stop a malicious VF from sending out
-	 * PAUSE or PFC frames and potentially controlling traffic for other
-	 * PF/VF VSIs.
-	 * The FW can still send Flow control frames if enabled.
-	 */
-	i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
-						       pf->main_vsi_seid);
-
-	/* restart the VSIs that were rebuilt and running before the reset */
-	i40e_pf_unquiesce_all_vsi(pf);
+}
 
-	/* Release the RTNL lock before we start resetting VFs */
-	if (!lock_acquired)
-		rtnl_unlock();
+#define I40E_MIN_FD_FLUSH_INTERVAL 10
+#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
+/**
+ * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
+ * @pf: board private structure
+ **/
+static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
+{
+	unsigned long min_flush_time;
+	int flush_wait_retry = 50;
+	bool disable_atr = false;
+	int fd_room;
+	int reg;
 
-	/* Restore promiscuous settings */
-	ret = i40e_set_promiscuous(pf, pf->cur_promisc);
-	if (ret)
-		dev_warn(&pf->pdev->dev,
-			 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
-			 pf->cur_promisc ? "on" : "off",
-			 i40e_stat_str(&pf->hw, ret),
-			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+	if (!time_after(jiffies, pf->fd_flush_timestamp +
+				 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
+		return;
 
-	i40e_reset_all_vfs(pf, true);
+	/* If the flush is happening too quick and we have mostly SB rules we
+	 * should not re-enable ATR for some time.
+	 */
+	min_flush_time = pf->fd_flush_timestamp +
+			 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
+	fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
 
-	/* tell the firmware that we're starting */
-	i40e_send_version(pf);
+	if (!(time_after(jiffies, min_flush_time)) &&
+	    (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
+		if (I40E_DEBUG_FD & pf->hw.debug_mask)
+			dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+		disable_atr = true;
+	}
 
-	/* We've already released the lock, so don't do it again */
-	goto end_core_reset;
+	pf->fd_flush_timestamp = jiffies;
+	set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
+	/* flush all filters */
+	wr32(&pf->hw, I40E_PFQF_CTL_1,
+	     I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
+	i40e_flush(&pf->hw);
+	pf->fd_flush_cnt++;
+	pf->fd_add_err = 0;
+	do {
+		/* Check FD flush status every 5-6msec */
+		usleep_range(5000, 6000);
+		reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
+		if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
+			break;
+	} while (flush_wait_retry--);
 
-end_unlock:
-	if (!lock_acquired)
-		rtnl_unlock();
-end_core_reset:
-	clear_bit(__I40E_RESET_FAILED, pf->state);
-clear_recovery:
-	clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+	if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
+		dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
+	} else {
+		/* replay sideband filters */
+		i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
+		if (!disable_atr && !pf->fd_tcp4_filter_cnt)
+			clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
+		clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
+		if (I40E_DEBUG_FD & pf->hw.debug_mask)
+			dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
+	}
 }
 
 /**
- * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
  * @pf: board private structure
- * @reinit: if the Main VSI needs to re-initialized.
- * @lock_acquired: indicates whether or not the lock has been acquired
- * before this function was called.
  **/
-static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
-				   bool lock_acquired)
+u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
 {
-	int ret;
-	/* Now we wait for GRST to settle out.
-	 * We don't have to delete the VEBs or VSIs from the hw switch
-	 * because the reset will make them disappear.
-	 */
-	ret = i40e_reset(pf);
-	if (!ret)
-		i40e_rebuild(pf, reinit, lock_acquired);
+	return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
 }
 
+/* We can see up to 256 filter programming desc in transit if the filters are
+ * being applied really fast; before we see the first
+ * filter miss error on Rx queue 0. Accumulating enough error messages before
+ * reacting will make sure we don't cause flush too often.
+ */
+#define I40E_MAX_FD_PROGRAM_ERROR 256
 /**
- * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
+ * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
  * @pf: board private structure
- *
- * Close up the VFs and other things in prep for a Core Reset,
- * then get ready to rebuild the world.
- * @lock_acquired: indicates whether or not the lock has been acquired
- * before this function was called.
  **/
-static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
+static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
 {
-	i40e_prep_for_reset(pf, lock_acquired);
-	i40e_reset_and_rebuild(pf, false, lock_acquired);
+
+	/* if interface is down do nothing */
+	if (test_bit(__I40E_DOWN, pf->state))
+		return;
+
+	if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
+		i40e_fdir_flush_and_replay(pf);
+
+	i40e_fdir_check_and_reenable(pf);
+
 }
 
 /**
- * i40e_handle_mdd_event
- * @pf: pointer to the PF structure
- *
- * Called from the MDD irq handler to identify possibly malicious vfs
+ * i40e_vsi_link_event - notify VSI of a link event
+ * @vsi: vsi to be notified
+ * @link_up: link up or down
  **/
-static void i40e_handle_mdd_event(struct i40e_pf *pf)
+static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
 {
-	struct i40e_hw *hw = &pf->hw;
-	bool mdd_detected = false;
-	bool pf_mdd_detected = false;
-	struct i40e_vf *vf;
-	u32 reg;
-	int i;
-
-	if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
+	if (!vsi || (test_bit(__I40E_VSI_DOWN, vsi->state)))
 		return;
 
-	/* find what triggered the MDD event */
-	reg = rd32(hw, I40E_GL_MDET_TX);
-	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
-		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
-				I40E_GL_MDET_TX_PF_NUM_SHIFT;
-		u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
-				I40E_GL_MDET_TX_VF_NUM_SHIFT;
-		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
-				I40E_GL_MDET_TX_EVENT_SHIFT;
-		u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
-				I40E_GL_MDET_TX_QUEUE_SHIFT) -
-				pf->hw.func_caps.base_queue;
-		if (netif_msg_tx_err(pf))
-			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
-				 event, queue, pf_num, vf_num);
-		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
-		mdd_detected = true;
-	}
-	reg = rd32(hw, I40E_GL_MDET_RX);
-	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
-		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
-				I40E_GL_MDET_RX_FUNCTION_SHIFT;
-		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
-				I40E_GL_MDET_RX_EVENT_SHIFT;
-		u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
-				I40E_GL_MDET_RX_QUEUE_SHIFT) -
-				pf->hw.func_caps.base_queue;
-		if (netif_msg_rx_err(pf))
-			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
-				 event, queue, func);
-		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
-		mdd_detected = true;
-	}
-
-	if (mdd_detected) {
-		reg = rd32(hw, I40E_PF_MDET_TX);
-		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
-			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
-			dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
-			pf_mdd_detected = true;
-		}
-		reg = rd32(hw, I40E_PF_MDET_RX);
-		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
-			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
-			dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
-			pf_mdd_detected = true;
-		}
-		/* Queue belongs to the PF, initiate a reset */
-		if (pf_mdd_detected) {
-			set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
-			i40e_service_event_schedule(pf);
-		}
-	}
-
-	/* see if one of the VFs needs its hand slapped */
-	for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
-		vf = &(pf->vf[i]);
-		reg = rd32(hw, I40E_VP_MDET_TX(i));
-		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
-			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
-			vf->num_mdd_events++;
-			dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
-				 i);
-		}
-
-		reg = rd32(hw, I40E_VP_MDET_RX(i));
-		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
-			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
-			vf->num_mdd_events++;
-			dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
-				 i);
-		}
+	switch (vsi->type) {
+	case I40E_VSI_MAIN:
+		if (!vsi->netdev || !vsi->netdev_registered)
+			break;
 
-		if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
-			dev_info(&pf->pdev->dev,
-				 "Too many MDD events on VF %d, disabled\n", i);
-			dev_info(&pf->pdev->dev,
-				 "Use PF Control I/F to re-enable the VF\n");
-			set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+		if (link_up) {
+			netif_carrier_on(vsi->netdev);
+			netif_tx_wake_all_queues(vsi->netdev);
+		} else {
+			netif_carrier_off(vsi->netdev);
+			netif_tx_stop_all_queues(vsi->netdev);
 		}
-	}
-
-	/* re-enable mdd interrupt cause */
-	clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
-	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
-	reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
-	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
-	i40e_flush(hw);
-}
+		break;
 
-static const char *i40e_tunnel_name(struct i40e_udp_port_config *port)
-{
-	switch (port->type) {
-	case UDP_TUNNEL_TYPE_VXLAN:
-		return "vxlan";
-	case UDP_TUNNEL_TYPE_GENEVE:
-		return "geneve";
+	case I40E_VSI_SRIOV:
+	case I40E_VSI_VMDQ2:
+	case I40E_VSI_CTRL:
+	case I40E_VSI_IWARP:
+	case I40E_VSI_MIRROR:
 	default:
-		return "unknown";
+		/* there is no notification for other VSIs */
+		break;
 	}
 }
 
 /**
- * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
- * @pf: board private structure
+ * i40e_veb_link_event - notify elements on the veb of a link event
+ * @veb: veb to be notified
+ * @link_up: link up or down
  **/
-static void i40e_sync_udp_filters(struct i40e_pf *pf)
+static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
 {
+	struct i40e_pf *pf;
 	int i;
 
-	/* loop through and set pending bit for all active UDP filters */
-	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
-		if (pf->udp_ports[i].port)
-			pf->pending_udp_bitmap |= BIT_ULL(i);
-	}
+	if (!veb || !veb->pf)
+		return;
+	pf = veb->pf;
+
+	/* depth first... */
+	for (i = 0; i < I40E_MAX_VEB; i++)
+		if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
+			i40e_veb_link_event(pf->veb[i], link_up);
 
-	pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+	/* ... now the local VSIs */
+	for (i = 0; i < pf->num_alloc_vsi; i++)
+		if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
+			i40e_vsi_link_event(pf->vsi[i], link_up);
 }
 
 /**
- * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
+ * i40e_link_event - Update netif_carrier status
  * @pf: board private structure
  **/
-static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
+static void i40e_link_event(struct i40e_pf *pf)
 {
-	struct i40e_hw *hw = &pf->hw;
-	i40e_status ret;
-	u16 port;
-	int i;
+	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+	u8 new_link_speed, old_link_speed;
+	i40e_status status;
+	bool new_link, old_link;
+
+	/* set this to force the get_link_status call to refresh state */
+	pf->hw.phy.get_link_info = true;
+	old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
+	status = i40e_get_link_status(&pf->hw, &new_link);
 
-	if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
+	/* On success, disable temp link polling */
+	if (status == I40E_SUCCESS) {
+		clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
+	} else {
+		/* Enable link polling temporarily until i40e_get_link_status
+		 * returns I40E_SUCCESS
+		 */
+		set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
+		dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
+			status);
 		return;
+	}
 
-	pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
+	old_link_speed = pf->hw.phy.link_info_old.link_speed;
+	new_link_speed = pf->hw.phy.link_info.link_speed;
 
-	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
-		if (pf->pending_udp_bitmap & BIT_ULL(i)) {
-			pf->pending_udp_bitmap &= ~BIT_ULL(i);
-			port = pf->udp_ports[i].port;
-			if (port)
-				ret = i40e_aq_add_udp_tunnel(hw, port,
-							pf->udp_ports[i].type,
-							NULL, NULL);
-			else
-				ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
+	if (new_link == old_link &&
+	    new_link_speed == old_link_speed &&
+	    (test_bit(__I40E_VSI_DOWN, vsi->state) ||
+	     new_link == netif_carrier_ok(vsi->netdev)))
+		return;
 
-			if (ret) {
-				dev_info(&pf->pdev->dev,
-					 "%s %s port %d, index %d failed, err %s aq_err %s\n",
-					 i40e_tunnel_name(&pf->udp_ports[i]),
-					 port ? "add" : "delete",
-					 port, i,
-					 i40e_stat_str(&pf->hw, ret),
-					 i40e_aq_str(&pf->hw,
-						     pf->hw.aq.asq_last_status));
-				pf->udp_ports[i].port = 0;
-			}
-		}
-	}
+	i40e_print_link_message(vsi, new_link);
+
+	/* Notify the base of the switch tree connected to
+	 * the link.  Floating VEBs are not notified.
+	 */
+	if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
+		i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
+	else
+		i40e_vsi_link_event(vsi, new_link);
+
+	if (pf->vf)
+		i40e_vc_notify_link_state(pf);
+#ifdef HAVE_PTP_1588_CLOCK
+
+	if (pf->flags & I40E_FLAG_PTP)
+		i40e_ptp_set_increment(pf);
+#endif /* HAVE_PTP_1588_CLOCK */
 }
 
 /**
- * i40e_service_task - Run the driver's async subtasks
- * @work: pointer to work_struct containing our data
+ * i40e_watchdog_subtask - periodic checks not using event driven response
+ * @pf: board private structure
  **/
-static void i40e_service_task(struct work_struct *work)
+static void i40e_watchdog_subtask(struct i40e_pf *pf)
 {
-	struct i40e_pf *pf = container_of(work,
-					  struct i40e_pf,
-					  service_task);
-	unsigned long start_time = jiffies;
+	int i;
 
-	/* don't bother with service tasks if a reset is in progress */
-	if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+	/* if interface is down do nothing */
+	if (test_bit(__I40E_DOWN, pf->state) ||
+	    test_bit(__I40E_CONFIG_BUSY, pf->state))
 		return;
 
-	if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
+	/* make sure we don't do these things too often */
+	if (time_before(jiffies, (pf->service_timer_previous +
+				  pf->service_timer_period)))
 		return;
+	pf->service_timer_previous = jiffies;
 
-	i40e_detect_recover_hung(pf);
-	i40e_sync_filters_subtask(pf);
-	i40e_reset_subtask(pf);
-	i40e_handle_mdd_event(pf);
-	i40e_vc_process_vflr_event(pf);
-	i40e_watchdog_subtask(pf);
-	i40e_fdir_reinit_subtask(pf);
-	if (pf->flags & I40E_FLAG_CLIENT_RESET) {
-		/* Client subtask will reopen next time through. */
-		i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
-		pf->flags &= ~I40E_FLAG_CLIENT_RESET;
-	} else {
-		i40e_client_subtask(pf);
-		if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
-			i40e_notify_client_of_l2_param_changes(
-							pf->vsi[pf->lan_vsi]);
-			pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
-		}
-	}
-	i40e_sync_filters_subtask(pf);
-	i40e_sync_udp_filters_subtask(pf);
-	i40e_clean_adminq_subtask(pf);
-
-	/* flush memory to make sure state is correct before next watchdog */
-	smp_mb__before_atomic();
-	clear_bit(__I40E_SERVICE_SCHED, pf->state);
+	if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
+	    test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
+		i40e_link_event(pf);
 
-	/* If the tasks have taken longer than one timer cycle or there
-	 * is more work to be done, reschedule the service task now
-	 * rather than wait for the timer to tick again.
+	/* Update the stats for active netdevs so the network stack
+	 * can look at updated numbers whenever it cares to
 	 */
-	if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
-	    test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state)		 ||
-	    test_bit(__I40E_MDD_EVENT_PENDING, pf->state)		 ||
-	    test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
-		i40e_service_event_schedule(pf);
-}
+	for (i = 0; i < pf->num_alloc_vsi; i++)
+		if (pf->vsi[i] && pf->vsi[i]->netdev)
+			i40e_update_stats(pf->vsi[i]);
 
-/**
- * i40e_service_timer - timer callback
- * @data: pointer to PF struct
- **/
-static void i40e_service_timer(unsigned long data)
-{
-	struct i40e_pf *pf = (struct i40e_pf *)data;
+	if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
+		/* Update the stats for the active switching components */
+		for (i = 0; i < I40E_MAX_VEB; i++)
+			if (pf->veb[i])
+				i40e_update_veb_stats(pf->veb[i]);
+	}
+#ifdef HAVE_PTP_1588_CLOCK
 
-	mod_timer(&pf->service_timer,
-		  round_jiffies(jiffies + pf->service_timer_period));
-	i40e_service_event_schedule(pf);
+	i40e_ptp_rx_hang(pf);
+	i40e_ptp_tx_hang(pf);
+#endif /* HAVE_PTP_1588_CLOCK */
 }
 
 /**
- * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
- * @vsi: the VSI being configured
+ * i40e_reset_subtask - Set up for resetting the device and driver
+ * @pf: board private structure
  **/
-static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
+static void i40e_reset_subtask(struct i40e_pf *pf)
 {
-	struct i40e_pf *pf = vsi->back;
-
-	switch (vsi->type) {
-	case I40E_VSI_MAIN:
-		vsi->alloc_queue_pairs = pf->num_lan_qps;
-		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
-				      I40E_REQ_DESCRIPTOR_MULTIPLE);
-		if (pf->flags & I40E_FLAG_MSIX_ENABLED)
-			vsi->num_q_vectors = pf->num_lan_msix;
-		else
-			vsi->num_q_vectors = 1;
-
-		break;
-
-	case I40E_VSI_FDIR:
-		vsi->alloc_queue_pairs = 1;
-		vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
-				      I40E_REQ_DESCRIPTOR_MULTIPLE);
-		vsi->num_q_vectors = pf->num_fdsb_msix;
-		break;
-
-	case I40E_VSI_VMDQ2:
-		vsi->alloc_queue_pairs = pf->num_vmdq_qps;
-		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
-				      I40E_REQ_DESCRIPTOR_MULTIPLE);
-		vsi->num_q_vectors = pf->num_vmdq_msix;
-		break;
+	u32 reset_flags = 0;
 
-	case I40E_VSI_SRIOV:
-		vsi->alloc_queue_pairs = pf->num_vf_qps;
-		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
-				      I40E_REQ_DESCRIPTOR_MULTIPLE);
-		break;
+	if (test_and_clear_bit(__I40E_REINIT_REQUESTED, pf->state))
+		reset_flags |= BIT(__I40E_REINIT_REQUESTED);
+	if (test_and_clear_bit(__I40E_PF_RESET_REQUESTED, pf->state))
+		reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
+	if (test_and_clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state))
+		reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
+	if (test_and_clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state))
+		reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
+	if (test_and_clear_bit(__I40E_DOWN_REQUESTED, pf->state))
+		reset_flags |= BIT(__I40E_DOWN_REQUESTED);
 
-	default:
-		WARN_ON(1);
-		return -ENODATA;
+	/* If there's a recovery already waiting, it takes
+	 * precedence before starting a new reset sequence.
+	 */
+	if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
+		i40e_prep_for_reset(pf, false);
+		i40e_reset(pf);
+		i40e_rebuild(pf, false, false);
 	}
 
-	return 0;
+	/* If we're already down or resetting, just bail */
+	if (reset_flags &&
+	    !test_bit(__I40E_DOWN, pf->state) &&
+	    !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
+		i40e_do_reset(pf, reset_flags, false);
+	}
 }
 
 /**
- * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
- * @type: VSI pointer
- * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
- *
- * On error: returns error code (negative)
- * On success: returns 0
+ * i40e_handle_link_event - Handle link event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
  **/
-static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
+static void i40e_handle_link_event(struct i40e_pf *pf,
+				   struct i40e_arq_event_info *e)
 {
-	struct i40e_ring **next_rings;
-	int size;
-	int ret = 0;
+	struct i40e_aqc_get_link_status *status =
+		(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
 
-	/* allocate memory for both Tx, XDP Tx and Rx ring pointers */
-	size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
-	       (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
-	vsi->tx_rings = kzalloc(size, GFP_KERNEL);
-	if (!vsi->tx_rings)
-		return -ENOMEM;
-	next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
-	if (i40e_enabled_xdp_vsi(vsi)) {
-		vsi->xdp_rings = next_rings;
-		next_rings += vsi->alloc_queue_pairs;
-	}
-	vsi->rx_rings = next_rings;
+	/* Do a new status request to re-enable LSE reporting
+	 * and load new status information into the hw struct
+	 * This completely ignores any state information
+	 * in the ARQ event info, instead choosing to always
+	 * issue the AQ update link status command.
+	 */
+	i40e_link_event(pf);
 
-	if (alloc_qvectors) {
-		/* allocate memory for q_vector pointers */
-		size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
-		vsi->q_vectors = kzalloc(size, GFP_KERNEL);
-		if (!vsi->q_vectors) {
-			ret = -ENOMEM;
-			goto err_vectors;
+	/* Check if module meets thermal requirements */
+	if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
+		dev_err(&pf->pdev->dev,
+			"Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
+		dev_err(&pf->pdev->dev,
+			"Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
+	} else {
+		/* check for unqualified module, if link is down, suppress
+		 * the message if link was forced to be down.
+		 */
+		if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+		    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
+		    (!(status->link_info & I40E_AQ_LINK_UP)) &&
+		    (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
+			dev_err(&pf->pdev->dev,
+				"Rx/Tx is disabled on this device because an unsupported SFP+ module type was detected.\n");
+			dev_err(&pf->pdev->dev,
+				"Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
 		}
 	}
-	return ret;
-
-err_vectors:
-	kfree(vsi->tx_rings);
-	return ret;
 }
 
 /**
- * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
+ * i40e_clean_adminq_subtask - Clean the AdminQ rings
  * @pf: board private structure
- * @type: type of VSI
- *
- * On error: returns error code (negative)
- * On success: returns vsi index in PF (positive)
  **/
-static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
+static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
 {
-	int ret = -ENODEV;
-	struct i40e_vsi *vsi;
-	int vsi_idx;
-	int i;
+	struct i40e_arq_event_info event;
+	struct i40e_hw *hw = &pf->hw;
+	u16 pending, i = 0;
+	i40e_status ret;
+	u16 opcode;
+	u32 oldval;
+	u32 val;
 
-	/* Need to protect the allocation of the VSIs at the PF level */
-	mutex_lock(&pf->switch_mutex);
+	/* Do not run clean AQ when PF reset fails */
+	if (test_bit(__I40E_RESET_FAILED, pf->state))
+		return;
 
-	/* VSI list may be fragmented if VSI creation/destruction has
-	 * been happening.  We can afford to do a quick scan to look
-	 * for any free VSIs in the list.
-	 *
-	 * find next empty vsi slot, looping back around if necessary
-	 */
-	i = pf->next_vsi;
-	while (i < pf->num_alloc_vsi && pf->vsi[i])
-		i++;
-	if (i >= pf->num_alloc_vsi) {
-		i = 0;
-		while (i < pf->next_vsi && pf->vsi[i])
-			i++;
+	/* check for error indications */
+	val = rd32(&pf->hw, pf->hw.aq.arq.len);
+	oldval = val;
+	if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
+		if (hw->debug_mask & I40E_DEBUG_AQ)
+			dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
+		val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
 	}
-
-	if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
-		vsi_idx = i;             /* Found one! */
-	} else {
-		ret = -ENODEV;
-		goto unlock_pf;  /* out of VSI slots! */
+	if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
+		if (hw->debug_mask & I40E_DEBUG_AQ)
+			dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
+		val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
+		pf->arq_overflows++;
 	}
-	pf->next_vsi = ++i;
+	if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
+		if (hw->debug_mask & I40E_DEBUG_AQ)
+			dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
+		val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
+	}
+	if (oldval != val)
+		wr32(&pf->hw, pf->hw.aq.arq.len, val);
 
-	vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
-	if (!vsi) {
-		ret = -ENOMEM;
-		goto unlock_pf;
+	val = rd32(&pf->hw, pf->hw.aq.asq.len);
+	oldval = val;
+	if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
+		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
+			dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
+		val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
 	}
-	vsi->type = type;
-	vsi->back = pf;
-	set_bit(__I40E_VSI_DOWN, vsi->state);
-	vsi->flags = 0;
-	vsi->idx = vsi_idx;
-	vsi->int_rate_limit = 0;
-	vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
-				pf->rss_table_size : 64;
-	vsi->netdev_registered = false;
-	vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
-	hash_init(vsi->mac_filter_hash);
-	vsi->irqs_ready = false;
+	if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
+		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
+			dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
+		val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
+	}
+	if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
+		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
+			dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
+		val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
+	}
+	if (oldval != val)
+		wr32(&pf->hw, pf->hw.aq.asq.len, val);
 
-	ret = i40e_set_num_rings_in_vsi(vsi);
-	if (ret)
-		goto err_rings;
+	event.buf_len = I40E_MAX_AQ_BUF_SIZE;
+	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
+	if (!event.msg_buf)
+		return;
 
-	ret = i40e_vsi_alloc_arrays(vsi, true);
-	if (ret)
-		goto err_rings;
+	do {
+		ret = i40e_clean_arq_element(hw, &event, &pending);
+		if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+			break;
+		else if (ret) {
+			dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
+			break;
+		}
 
-	/* Setup default MSIX irq handler for VSI */
-	i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
+		opcode = LE16_TO_CPU(event.desc.opcode);
+		switch (opcode) {
 
-	/* Initialize VSI lock */
-	spin_lock_init(&vsi->mac_filter_hash_lock);
-	pf->vsi[vsi_idx] = vsi;
-	ret = vsi_idx;
-	goto unlock_pf;
+		case i40e_aqc_opc_get_link_status:
+			i40e_handle_link_event(pf, &event);
+			break;
+		case i40e_aqc_opc_send_msg_to_pf:
+			ret = i40e_vc_process_vf_msg(pf,
+					le16_to_cpu(event.desc.retval),
+					le32_to_cpu(event.desc.cookie_high),
+					le32_to_cpu(event.desc.cookie_low),
+					event.msg_buf,
+					event.msg_len);
+			break;
+		case i40e_aqc_opc_lldp_update_mib:
+			dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+#ifdef CONFIG_DCB
+			rtnl_lock();
+			ret = i40e_handle_lldp_event(pf, &event);
+			rtnl_unlock();
+#endif /* CONFIG_DCB */
+			break;
+		case i40e_aqc_opc_event_lan_overflow:
+			dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
+			i40e_handle_lan_overflow_event(pf, &event);
+			break;
+		case i40e_aqc_opc_send_msg_to_peer:
+			dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
+			break;
+		case i40e_aqc_opc_nvm_erase:
+		case i40e_aqc_opc_nvm_update:
+		case i40e_aqc_opc_oem_post_update:
+			i40e_debug(&pf->hw, I40E_DEBUG_NVM,
+				   "ARQ NVM operation 0x%04x completed\n",
+				   opcode);
+			break;
+		default:
+			dev_info(&pf->pdev->dev,
+				 "ARQ: Unknown event 0x%04x ignored\n",
+				 opcode);
+			break;
+		}
+	} while (i++ < pf->adminq_work_limit);
+
+	if (i < pf->adminq_work_limit)
+		clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
+
+	/* re-enable Admin queue interrupt cause */
+	val = rd32(hw, I40E_PFINT_ICR0_ENA);
+	val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+	wr32(hw, I40E_PFINT_ICR0_ENA, val);
+	i40e_flush(hw);
+
+	kfree(event.msg_buf);
+}
+
+/**
+ * i40e_verify_eeprom - make sure eeprom is good to use
+ * @pf: board private structure
+ **/
+static void i40e_verify_eeprom(struct i40e_pf *pf)
+{
+	int err;
+
+	err = i40e_diag_eeprom_test(&pf->hw);
+	if (err) {
+		/* retry in case of garbage read */
+		err = i40e_diag_eeprom_test(&pf->hw);
+		if (err) {
+			dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
+				 err);
+			set_bit(__I40E_BAD_EEPROM, pf->state);
+		}
+	}
+
+	if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
+		dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
+		clear_bit(__I40E_BAD_EEPROM, pf->state);
+	}
+}
+
+/**
+ * i40e_enable_pf_switch_lb
+ * @pf: pointer to the PF structure
+ *
+ * enable switch loop back or die - no point in a return value
+ **/
+static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+{
+	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+	struct i40e_vsi_context ctxt;
+	int ret;
+
+	ctxt.seid = pf->main_vsi_seid;
+	ctxt.pf_num = pf->hw.pf_id;
+	ctxt.vf_num = 0;
+	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "couldn't get PF vsi config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+		return;
+	}
+	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+	ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "update vsi switch failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+	}
+}
+
+/**
+ * i40e_disable_pf_switch_lb
+ * @pf: pointer to the PF structure
+ *
+ * disable switch loop back or die - no point in a return value
+ **/
+static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+{
+	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+	struct i40e_vsi_context ctxt;
+	int ret;
+
+	ctxt.seid = pf->main_vsi_seid;
+	ctxt.pf_num = pf->hw.pf_id;
+	ctxt.vf_num = 0;
+	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "couldn't get PF vsi config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+		return;
+	}
+	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+	ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "update vsi switch failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+	}
+}
+
+/**
+ * i40e_config_bridge_mode - Configure the HW bridge mode
+ * @veb: pointer to the bridge instance
+ *
+ * Configure the loop back mode for the LAN VSI that is downlink to the
+ * specified HW bridge instance. It is expected this function is called
+ * when a new HW bridge is instantiated.
+ **/
+static void i40e_config_bridge_mode(struct i40e_veb *veb)
+{
+	struct i40e_pf *pf = veb->pf;
+
+#ifdef HAVE_BRIDGE_ATTRIBS
+	if (pf->hw.debug_mask & I40E_DEBUG_LAN)
+		dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
+			 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+	if (veb->bridge_mode & BRIDGE_MODE_VEPA)
+		i40e_disable_pf_switch_lb(pf);
+	else
+		i40e_enable_pf_switch_lb(pf);
+#else
+	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+		i40e_enable_pf_switch_lb(pf);
+	else
+		i40e_disable_pf_switch_lb(pf);
+#endif
+}
+
+/**
+ * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
+ * @veb: pointer to the VEB instance
+ *
+ * This is a recursive function that first builds the attached VSIs then
+ * recurses in to build the next layer of VEB.  We track the connections
+ * through our own index numbers because the seid's from the HW could
+ * change across the reset.
+ **/
+static int i40e_reconstitute_veb(struct i40e_veb *veb)
+{
+	struct i40e_vsi *ctl_vsi = NULL;
+	struct i40e_pf *pf = veb->pf;
+	int v, veb_idx;
+	int ret;
+
+	/* build VSI that owns this VEB, temporarily attached to base VEB */
+	for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
+		if (pf->vsi[v] &&
+		    pf->vsi[v]->veb_idx == veb->idx &&
+		    pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
+			ctl_vsi = pf->vsi[v];
+			break;
+		}
+	}
+	if (!ctl_vsi) {
+		dev_info(&pf->pdev->dev,
+			 "missing owner VSI for veb_idx %d\n", veb->idx);
+		ret = -ENOENT;
+		goto end_reconstitute;
+	}
+	if (ctl_vsi != pf->vsi[pf->lan_vsi])
+		ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+	ret = i40e_add_vsi(ctl_vsi);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "rebuild of veb_idx %d owner VSI failed: %d\n",
+			 veb->idx, ret);
+		goto end_reconstitute;
+	}
+	i40e_vsi_reset_stats(ctl_vsi);
+
+	/* create the VEB in the switch and move the VSI onto the VEB */
+	ret = i40e_add_veb(veb, ctl_vsi);
+	if (ret)
+		goto end_reconstitute;
+
+#ifdef HAVE_BRIDGE_ATTRIBS
+	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+		veb->bridge_mode = BRIDGE_MODE_VEB;
+	else
+		veb->bridge_mode = BRIDGE_MODE_VEPA;
+#endif
+	i40e_config_bridge_mode(veb);
+
+	/* create the remaining VSIs attached to this VEB */
+	for (v = 0; v < pf->num_alloc_vsi; v++) {
+		if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
+			continue;
+
+		if (pf->vsi[v]->veb_idx == veb->idx) {
+			struct i40e_vsi *vsi = pf->vsi[v];
+
+			vsi->uplink_seid = veb->seid;
+			ret = i40e_add_vsi(vsi);
+			if (ret) {
+				dev_info(&pf->pdev->dev,
+					 "rebuild of vsi_idx %d failed: %d\n",
+					 v, ret);
+				goto end_reconstitute;
+			}
+			i40e_vsi_reset_stats(vsi);
+		}
+	}
+
+	/* create any VEBs attached to this VEB - RECURSION */
+	for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
+		if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
+			pf->veb[veb_idx]->uplink_seid = veb->seid;
+			ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
+			if (ret)
+				break;
+		}
+	}
+
+end_reconstitute:
+	return ret;
+}
+
+/**
+ * i40e_get_capabilities - get info about the HW
+ * @pf: the PF struct
+ * @list_type: admin queue opcode list type
+ **/
+static int i40e_get_capabilities(struct i40e_pf *pf,
+				 enum i40e_admin_queue_opc list_type)
+{
+	struct i40e_aqc_list_capabilities_element_resp *cap_buf;
+	u16 data_size;
+	int buf_len;
+	int err;
+
+	buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
+	do {
+		cap_buf = kzalloc(buf_len, GFP_KERNEL);
+		if (!cap_buf)
+			return -ENOMEM;
+
+		/* this loads the data into the hw struct for us */
+		err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
+						    &data_size, list_type,
+						    NULL);
+		/* data loaded, buffer no longer needed */
+		kfree(cap_buf);
+
+		if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
+			/* retry with a larger buffer */
+			buf_len = data_size;
+		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
+			dev_info(&pf->pdev->dev,
+				 "capability discovery failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, err),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
+			return -ENODEV;
+		}
+	} while (err);
+
+	if (pf->hw.debug_mask & I40E_DEBUG_USER) {
+		if (list_type == i40e_aqc_opc_list_func_capabilities) {
+			dev_info(&pf->pdev->dev,
+				 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
+				 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
+				 pf->hw.func_caps.num_msix_vectors,
+				 pf->hw.func_caps.num_msix_vectors_vf,
+				 pf->hw.func_caps.fd_filters_guaranteed,
+				 pf->hw.func_caps.fd_filters_best_effort,
+				 pf->hw.func_caps.num_tx_qp,
+				 pf->hw.func_caps.num_vsis);
+		} else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
+			dev_info(&pf->pdev->dev,
+				 "switch_mode=0x%04x, function_valid=0x%08x\n",
+				 pf->hw.dev_caps.switch_mode,
+				 pf->hw.dev_caps.valid_functions);
+			dev_info(&pf->pdev->dev,
+				 "SR-IOV=%d, num_vfs for all function=%u\n",
+				 pf->hw.dev_caps.sr_iov_1_1,
+				 pf->hw.dev_caps.num_vfs);
+			dev_info(&pf->pdev->dev,
+				 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
+				 pf->hw.dev_caps.num_vsis,
+				 pf->hw.dev_caps.num_rx_qp,
+				 pf->hw.dev_caps.num_tx_qp);
+		}
+	}
+
+	return 0;
+}
+
+static int i40e_vsi_clear(struct i40e_vsi *vsi);
+
+/**
+ * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
+ * @pf: board private structure
+ **/
+static void i40e_fdir_sb_setup(struct i40e_pf *pf)
+{
+	struct i40e_vsi *vsi;
+
+	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+		return;
+
+	/* find existing VSI and see if it needs configuring */
+	vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
+
+	/* create a new VSI if none exists */
+	if (!vsi) {
+		vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
+				     pf->vsi[pf->lan_vsi]->seid, 0);
+		if (!vsi) {
+			dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
+			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+			pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+			return;
+		}
+	}
+
+	i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
+}
+
+/**
+ * i40e_fdir_teardown - release the Flow Director resources
+ * @pf: board private structure
+ **/
+static void i40e_fdir_teardown(struct i40e_pf *pf)
+{
+	struct i40e_vsi *vsi;
+
+	i40e_fdir_filter_exit(pf);
+	i40e_cloud_filter_exit(pf);
+	vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
+	if (vsi)
+		i40e_vsi_release(vsi);
+}
+
+/**
+ * i40e_prep_for_reset - prep for the core to reset
+ * @pf: board private structure
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ *
+ * Close up the VFs and other things in prep for PF Reset.
+  **/
+static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
+{
+	struct i40e_hw *hw = &pf->hw;
+	i40e_status ret = 0;
+	u32 v;
+
+	clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
+	if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+		return;
+	if (i40e_check_asq_alive(&pf->hw))
+		i40e_vc_notify_reset(pf);
+
+	dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
+
+	/* TODO: warn any registered clients */
+	/* quiesce the VSIs and their queues that are not already DOWN */
+	/* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
+	if (!lock_acquired)
+		rtnl_lock();
+	i40e_pf_quiesce_all_vsi(pf);
+	if (!lock_acquired)
+		rtnl_unlock();
+
+	for (v = 0; v < pf->num_alloc_vsi; v++) {
+		if (pf->vsi[v])
+			pf->vsi[v]->seid = 0;
+	}
+
+	i40e_shutdown_adminq(&pf->hw);
+
+	/* call shutdown HMC */
+	if (hw->hmc.hmc_obj) {
+		ret = i40e_shutdown_lan_hmc(hw);
+		if (ret)
+			dev_warn(&pf->pdev->dev,
+				 "shutdown_lan_hmc failed: %d\n", ret);
+	}
+
+#ifdef HAVE_PTP_1588_CLOCK
+	/* Save the current PTP time so that we can restore the time after the
+	 * reset completes.
+	 */
+	i40e_ptp_save_hw_time(pf);
+#endif /* HAVE_PTP_1588_CLOCK */
+}
+
+/**
+ * i40e_send_version - update firmware with driver version
+ * @pf: PF struct
+ */
+static void i40e_send_version(struct i40e_pf *pf)
+{
+	struct i40e_driver_version dv;
+
+	dv.major_version = DRV_VERSION_MAJOR;
+	dv.minor_version = DRV_VERSION_MINOR;
+	dv.build_version = DRV_VERSION_BUILD;
+	dv.subbuild_version = 0;
+	strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
+	i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+}
+
+/**
+ * i40e_get_oem_version - get OEM specific version information
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_get_oem_version(struct i40e_hw *hw)
+{
+	u16 block_offset = 0xffff;
+	u16 block_length = 0;
+	u16 capabilities = 0;
+	u16 gen_snap = 0;
+	u16 release = 0;
+
+#define I40E_SR_NVM_OEM_VERSION_PTR		0x1B
+#define I40E_NVM_OEM_LENGTH_OFFSET		0x00
+#define I40E_NVM_OEM_CAPABILITIES_OFFSET	0x01
+#define I40E_NVM_OEM_GEN_OFFSET			0x02
+#define I40E_NVM_OEM_RELEASE_OFFSET		0x03
+#define I40E_NVM_OEM_CAPABILITIES_MASK		0x000F
+#define I40E_NVM_OEM_LENGTH			3
+
+	/* Check if pointer to OEM version block is valid. */
+	i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
+	if ((block_offset & 0x7fff) == 0x7fff)
+		return;
+
+	/* Check if OEM version block has correct length. */
+	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
+			   &block_length);
+	if (block_length < I40E_NVM_OEM_LENGTH)
+		return;
+
+	/* Check if OEM version format is as expected. */
+	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
+			   &capabilities);
+	if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
+		return;
+
+	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
+			   &gen_snap);
+	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
+			   &release);
+	hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
+	hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
+}
+
+/**
+ * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
+ * @pf: board private structure
+ **/
+static int i40e_reset(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	i40e_status ret;
+
+	ret = i40e_pf_reset(hw);
+	if (ret) {
+		dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
+		set_bit(__I40E_RESET_FAILED, pf->state);
+		clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+	} else {
+		pf->pfr_count++;
+	}
+	return ret;
+}
+
+/**
+ * i40e_rebuild - rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ **/
+static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+{
+	int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
+	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+	struct i40e_hw *hw = &pf->hw;
+	u8 set_fc_aq_fail = 0;
+	i40e_status ret;
+	u32 val;
+	int v;
+
+	if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
+	    i40e_check_recovery_mode(pf)) {
+#ifdef SIOCETHTOOL
+		i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
+#endif
+	}
+
+	if (test_bit(__I40E_DOWN, pf->state) &&
+	    !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
+	    !old_recovery_mode_bit)
+		goto clear_recovery;
+	dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
+
+	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
+	ret = i40e_init_adminq(&pf->hw);
+	if (ret) {
+		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+		goto clear_recovery;
+	}
+	i40e_get_oem_version(&pf->hw);
+
+	if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
+	    ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
+	     hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
+		/* The following delay is necessary for 4.33 firmware and older
+		 * to recover after EMP reset. 200 ms should suffice but we
+		 * put here 300 ms to be sure that FW is ready to operate
+		 * after reset.
+		 */
+		mdelay(300);
+	}
+
+	/* re-verify the eeprom if we just had an EMP reset */
+	if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
+		i40e_verify_eeprom(pf);
+	}
+
+	/* if we are going out of or into recovery mode we have to act
+	 * accordingly with regard to resources initialization
+	 * and deinitialization
+	 */
+	if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
+	    old_recovery_mode_bit) {
+		if (i40e_get_capabilities(pf,
+					  i40e_aqc_opc_list_func_capabilities))
+			goto end_unlock;
+
+		if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+			/* we're staying in recovery mode so we'll reinitialize
+			 * misc vector here
+			 */
+			if (i40e_setup_misc_vector_for_recovery_mode(pf))
+				goto end_unlock;
+		} else {
+			if (!lock_acquired)
+				rtnl_lock();
+			/* we're going out of recovery mode so we'll free
+			 * the IRQ allocated specifically for recovery mode
+			 * and restore the interrupt scheme
+			 */
+			free_irq(pf->pdev->irq, pf);
+			i40e_clear_interrupt_scheme(pf);
+			if (i40e_restore_interrupt_scheme(pf))
+				goto end_unlock;
+		}
+
+		/* tell the firmware that we're starting */
+		i40e_send_version(pf);
+
+		/* bail out in case recovery mode was detected, as there is
+		 * no need for further configuration.
+		 */
+		goto end_unlock;
+	}
+
+	i40e_clear_pxe_mode(hw);
+	ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
+	if (ret)
+		goto end_core_reset;
+
+	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+				hw->func_caps.num_rx_qp, 0, 0);
+	if (ret) {
+		dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
+		goto end_core_reset;
+	}
+	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+	if (ret) {
+		dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
+		goto end_core_reset;
+	}
+
+	/* Enable FW to write a default DCB config on link-up */
+	i40e_aq_set_dcb_parameters(hw, true, NULL);
+
+#ifdef CONFIG_DCB
+	ret = i40e_init_pf_dcb(pf);
+	if (ret) {
+		dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
+		pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+		/* Continue without DCB enabled */
+	}
+
+#endif /* CONFIG_DCB */
+	/* do basic switch setup */
+	if (!lock_acquired)
+		rtnl_lock();
+	ret = i40e_setup_pf_switch(pf, reinit);
+	if (ret)
+		goto end_unlock;
+
+	/* The driver only wants link up/down and module qualification
+	 * reports from firmware.  Note the negative logic.
+	 */
+	ret = i40e_aq_set_phy_int_mask(&pf->hw,
+				       ~(I40E_AQ_EVENT_LINK_UPDOWN |
+					 I40E_AQ_EVENT_MEDIA_NA |
+					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
+	if (ret)
+		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+	/* make sure our flow control settings are restored */
+	ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
+	if (ret)
+		dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
+			i40e_stat_str(&pf->hw, ret),
+			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+	/* Rebuild the VSIs and VEBs that existed before reset.
+	 * They are still in our local switch element arrays, so only
+	 * need to rebuild the switch model in the HW.
+	 *
+	 * If there were VEBs but the reconstitution failed, we'll try
+	 * try to recover minimal use by getting the basic PF VSI working.
+	 */
+	if (vsi->uplink_seid != pf->mac_seid) {
+		dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
+		/* find the one VEB connected to the MAC, and find orphans */
+		for (v = 0; v < I40E_MAX_VEB; v++) {
+			if (!pf->veb[v])
+				continue;
+
+			if (pf->veb[v]->uplink_seid == pf->mac_seid ||
+			    pf->veb[v]->uplink_seid == 0) {
+				ret = i40e_reconstitute_veb(pf->veb[v]);
+
+				if (!ret)
+					continue;
+
+				/* If Main VEB failed, we're in deep doodoo,
+				 * so give up rebuilding the switch and set up
+				 * for minimal rebuild of PF VSI.
+				 * If orphan failed, we'll report the error
+				 * but try to keep going.
+				 */
+				if (pf->veb[v]->uplink_seid == pf->mac_seid) {
+					dev_info(&pf->pdev->dev,
+						 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
+						 ret);
+					vsi->uplink_seid = pf->mac_seid;
+					break;
+				} else if (pf->veb[v]->uplink_seid == 0) {
+					dev_info(&pf->pdev->dev,
+						 "rebuild of orphan VEB failed: %d\n",
+						 ret);
+				}
+			}
+		}
+	}
+
+	if (vsi->uplink_seid == pf->mac_seid) {
+		dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
+		/* no VEB, so rebuild only the Main VSI */
+		ret = i40e_add_vsi(vsi);
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "rebuild of Main VSI failed: %d\n", ret);
+			goto end_unlock;
+		}
+	}
+
+#ifdef __TC_MQPRIO_MODE_MAX
+	if (vsi->mqprio_qopt.max_rate[0]) {
+		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0] / (1000000 / 8);
+
+		ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
+		if (!ret)
+			dev_dbg(&vsi->back->pdev->dev,
+				"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
+				max_tx_rate,
+				max_tx_rate / I40E_BW_CREDIT_DIVISOR,
+				vsi->seid);
+		else
+			goto end_unlock;
+	}
+#endif
+
+#ifdef __TC_MQPRIO_MODE_MAX
+	/* Not going to support channel VSI in L4 cloud filter mode */
+	if (!i40e_is_l4mode_enabled()) {
+		/* PF Main VSI is rebuild by now, go ahead and
+		 * rebuild channel VSIs for this main VSI if they exist
+		 */
+		ret = i40e_rebuild_channels(vsi);
+		if (ret)
+			goto end_unlock;
+	}
+#endif
+
+	/* Reconfigure hardware for allowing smaller MSS in the case
+	 * of TSO, so that we avoid the MDD being fired and causing
+	 * a reset in the case of small MSS+TSO.
+	 */
+#define I40E_REG_MSS          0x000E64DC
+#define I40E_REG_MSS_MIN_MASK 0x3FF0000
+#define I40E_64BYTE_MSS       0x400000
+	val = rd32(hw, I40E_REG_MSS);
+	if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
+		val &= ~I40E_REG_MSS_MIN_MASK;
+		val |= I40E_64BYTE_MSS;
+		wr32(hw, I40E_REG_MSS, val);
+	}
+
+	if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
+		msleep(75);
+		ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+		if (ret)
+			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
+	}
+	/* reinit the misc interrupt */
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+		ret = i40e_setup_misc_vector(pf);
+
+	/* Add a filter to drop all Flow control frames from any VSI from being
+	 * transmitted. By doing so we stop a malicious VF from sending out
+	 * PAUSE or PFC frames and potentially controlling traffic for other
+	 * PF/VF VSIs.
+	 * The FW can still send Flow control frames if enabled.
+	 */
+	i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+						       pf->main_vsi_seid);
+
+	/* restart the VSIs that were rebuilt and running before the reset */
+	i40e_pf_unquiesce_all_vsi(pf);
+
+	/* Release the RTNL lock before we start resetting VFs */
+	if (!lock_acquired)
+		rtnl_unlock();
+
+	/* Restore promiscuous settings */
+	ret = i40e_set_promiscuous(pf, pf->cur_promisc);
+	if (ret)
+		dev_warn(&pf->pdev->dev,
+			 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
+			 pf->cur_promisc ? "on" : "off",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+	/* Restore all VF-d config */
+
+	/* If uncommitted changes were made to the BW share settings, then warn
+	 * the user that the configuration may not be restored correctly.
+	 */
+	if (!pf->vf_bw_applied)
+		dev_info(&pf->pdev->dev, "VF BW shares not restored\n");
+
+	if (I40E_IS_MIRROR_VLAN_ID_VALID(pf->ingress_vlan)) {
+		u16 rule_type;
+
+		/* The Admin Queue mirroring rules refer to the traffic
+		 * directions from the perspective of the switch, not the VSI
+		 * we apply the mirroring rule on - so the behaviour of a VSI
+		 * ingress mirror is classified as an egress rule
+		 */
+		rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS;
+		ret = i40e_restore_ingress_egress_mirror(vsi, pf->ingress_vlan,
+							 rule_type,
+							 &pf->ingress_rule_id);
+		if (ret)
+			pf->ingress_vlan = I40E_NO_VF_MIRROR;
+	}
+
+	if (I40E_IS_MIRROR_VLAN_ID_VALID(pf->egress_vlan)) {
+		u16 rule_type;
+
+		/* The Admin Queue mirroring rules refer to the traffic
+		 * directions from the perspective of the switch, not the VSI
+		 * we apply the mirroring rule on - so the behaviour of a VSI
+		 * egress mirror is classified as an ingress rule
+		 */
+		rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
+		ret = i40e_restore_ingress_egress_mirror(vsi, pf->egress_vlan,
+							 rule_type,
+							 &pf->egress_rule_id);
+		if (ret)
+			pf->egress_vlan = I40E_NO_VF_MIRROR;
+	}
+
+	i40e_reset_all_vfs(pf, true);
+
+	/* TODO: restart clients */
+	/* tell the firmware that we're starting */
+	i40e_send_version(pf);
+
+	/* We've already released the lock, so don't do it again */
+	goto end_core_reset;
+
+end_unlock:
+	if (!lock_acquired)
+		rtnl_unlock();
+end_core_reset:
+	clear_bit(__I40E_RESET_FAILED, pf->state);
+clear_recovery:
+	clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+	clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
+}
+
+/**
+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ **/
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+				   bool lock_acquired)
+{
+	int ret;
+	/* Now we wait for GRST to settle out.
+	 * We don't have to delete the VEBs or VSIs from the hw switch
+	 * because the reset will make them disappear.
+	 */
+	ret = i40e_reset(pf);
+	if (!ret)
+		i40e_rebuild(pf, reinit, lock_acquired);
+}
+
+/**
+ * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
+ * @pf: board private structure
+ *
+ * Close up the VFs and other things in prep for a Core Reset,
+ * then get ready to rebuild the world.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ **/
+static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
+{
+	i40e_prep_for_reset(pf, lock_acquired);
+	i40e_reset_and_rebuild(pf, false, lock_acquired);
+}
+
+/**
+ * i40e_handle_mdd_event
+ * @pf: pointer to the PF structure
+ *
+ * Called from the MDD irq handler to identify possibly malicious vfs
+ **/
+static void i40e_handle_mdd_event(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	bool mdd_detected = false;
+	struct i40e_vf *vf;
+	u32 reg;
+	int i;
+
+	if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
+		return;
+
+	/* find what triggered the MDD event */
+	reg = rd32(hw, I40E_GL_MDET_TX);
+	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
+		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
+				I40E_GL_MDET_TX_PF_NUM_SHIFT;
+		u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
+				I40E_GL_MDET_TX_VF_NUM_SHIFT;
+		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
+				I40E_GL_MDET_TX_EVENT_SHIFT;
+		u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
+				I40E_GL_MDET_TX_QUEUE_SHIFT) -
+				pf->hw.func_caps.base_queue;
+		if (netif_msg_tx_err(pf))
+			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
+				 event, queue, pf_num, vf_num);
+		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
+		mdd_detected = true;
+	}
+	reg = rd32(hw, I40E_GL_MDET_RX);
+	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
+		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
+				I40E_GL_MDET_RX_FUNCTION_SHIFT;
+		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
+				I40E_GL_MDET_RX_EVENT_SHIFT;
+		u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
+				I40E_GL_MDET_RX_QUEUE_SHIFT) -
+				pf->hw.func_caps.base_queue;
+		if (netif_msg_rx_err(pf))
+			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
+				 event, queue, func);
+		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
+		mdd_detected = true;
+	}
+
+	if (mdd_detected) {
+		reg = rd32(hw, I40E_PF_MDET_TX);
+		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
+			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
+			dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
+		}
+		reg = rd32(hw, I40E_PF_MDET_RX);
+		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
+			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
+			dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
+		}
+	}
+
+	/* see if one of the VFs needs its hand slapped */
+	for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+		vf = &(pf->vf[i]);
+		reg = rd32(hw, I40E_VP_MDET_TX(i));
+		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
+			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
+			vf->num_mdd_events++;
+			dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+				 i);
+			dev_info(&pf->pdev->dev,
+				 "Use PF Control I/F to re-enable the VF\n");
+			set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+		}
+
+		reg = rd32(hw, I40E_VP_MDET_RX(i));
+		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
+			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
+			vf->num_mdd_events++;
+			dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
+				 i);
+			dev_info(&pf->pdev->dev,
+				 "Use PF Control I/F to re-enable the VF\n");
+			set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+		}
+	}
+
+	/* re-enable mdd interrupt cause */
+	clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
+	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+	reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+	i40e_flush(hw);
+}
+
+#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
+#if defined(HAVE_UDP_ENC_TUNNEL) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
+static const char *i40e_tunnel_name(u8 type)
+{
+	switch (type) {
+	case UDP_TUNNEL_TYPE_VXLAN:
+		return "vxlan";
+	case UDP_TUNNEL_TYPE_GENEVE:
+		return "geneve";
+	default:
+		return "unknown";
+	}
+}
+
+/**
+ * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
+ * @pf: board private structure
+ **/
+static void i40e_sync_udp_filters(struct i40e_pf *pf)
+{
+	int i;
+
+	/* loop through and set pending bit for all active UDP filters */
+	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+		if (pf->udp_ports[i].port)
+			pf->pending_udp_bitmap |= BIT_ULL(i);
+	}
+
+	set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
+}
+
+/**
+ * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ **/
+static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	u8 filter_index, type;
+	u16 port;
+	int i;
+
+	if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
+		return;
+
+	/* acquire RTNL to maintain state of flags and port requests */
+	rtnl_lock();
+
+	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+		if (pf->pending_udp_bitmap & BIT_ULL(i)) {
+			struct i40e_udp_port_config *udp_port;
+			i40e_status ret = 0;
+
+			udp_port = &pf->udp_ports[i];
+			pf->pending_udp_bitmap &= ~BIT_ULL(i);
+
+			port = READ_ONCE(udp_port->port);
+			type = READ_ONCE(udp_port->type);
+			filter_index = READ_ONCE(udp_port->filter_index);
+
+			/* release RTNL while we wait on AQ command */
+			rtnl_unlock();
+
+			if (port)
+				ret = i40e_aq_add_udp_tunnel(hw, port,
+							     type,
+							     &filter_index,
+							     NULL);
+			else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
+				ret = i40e_aq_del_udp_tunnel(hw, filter_index,
+							     NULL);
+
+			/* reacquire RTNL so we can update filter_index */
+			rtnl_lock();
+
+			if (ret) {
+				dev_info(&pf->pdev->dev,
+					 "%s %s port %d, index %d failed, err %s aq_err %s\n",
+					 i40e_tunnel_name(type),
+					 port ? "add" : "delete",
+					 port,
+					 filter_index,
+					 i40e_stat_str(&pf->hw, ret),
+					 i40e_aq_str(&pf->hw,
+						    pf->hw.aq.asq_last_status));
+				if (port) {
+					/* failed to add, just reset port,
+					 * drop pending bit for any deletion
+					 */
+					udp_port->port = 0;
+					pf->pending_udp_bitmap &= ~BIT_ULL(i);
+				}
+			} else if (port) {
+				/* record filter index on success */
+				udp_port->filter_index = filter_index;
+			}
+		}
+	}
+
+	rtnl_unlock();
+}
+
+#endif /* HAVE_UDP_ENC_TUNNEL || HAVE_UDP_ENC_RX_OFFLOAD */
+#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_UDP_ENC_RX_OFFLOAD */
+
+/**
+ * i40e_service_task - Run the driver's async subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void i40e_service_task(struct work_struct *work)
+{
+	struct i40e_pf *pf = container_of(work,
+					  struct i40e_pf,
+					  service_task);
+	unsigned long start_time = jiffies;
+
+	/* don't bother with service tasks if a reset is in progress */
+	if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+	    test_bit(__I40E_SUSPENDED, pf->state)) {
+		return;
+	}
+
+	if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
+		return;
+
+	if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+		i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
+		i40e_sync_filters_subtask(pf);
+		i40e_reset_subtask(pf);
+		i40e_handle_mdd_event(pf);
+		i40e_vc_process_vflr_event(pf);
+		i40e_watchdog_subtask(pf);
+		i40e_fdir_reinit_subtask(pf);
+		if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
+			/* Client subtask will reopen next time through. */
+			i40e_notify_client_of_netdev_close(
+						pf->vsi[pf->lan_vsi], true);
+		} else {
+			i40e_client_subtask(pf);
+			if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
+					       pf->state))
+				i40e_notify_client_of_l2_param_changes(
+							pf->vsi[pf->lan_vsi]);
+		}
+#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
+#if defined(HAVE_UDP_ENC_TUNNEL) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
+		i40e_sync_udp_filters_subtask(pf);
+
+#endif
+#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_UDP_ENC_RX_OFFLOAD */
+	} else {
+		i40e_reset_subtask(pf);
+	}
+
+	i40e_clean_adminq_subtask(pf);
+
+	/* flush memory to make sure state is correct before next watchdog */
+	smp_mb__before_atomic();
+	clear_bit(__I40E_SERVICE_SCHED, pf->state);
+
+	/* If the tasks have taken longer than one timer cycle or there
+	 * is more work to be done, reschedule the service task now
+	 * rather than wait for the timer to tick again.
+	 */
+	if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
+	    test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state)		 ||
+	    test_bit(__I40E_MDD_EVENT_PENDING, pf->state)		 ||
+	    test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
+		i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_service_timer - timer callback
+ * @t: pointer to timer_list struct
+ **/
+static void i40e_service_timer(struct timer_list *t)
+{
+	struct i40e_pf *pf = from_timer(pf, t, service_timer);
+
+	mod_timer(&pf->service_timer,
+		  round_jiffies(jiffies + pf->service_timer_period));
+	i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
+
+	switch (vsi->type) {
+	case I40E_VSI_MAIN:
+		vsi->alloc_queue_pairs = pf->num_lan_qps;
+		if (!vsi->num_tx_desc)
+			vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+						 I40E_REQ_DESCRIPTOR_MULTIPLE);
+		if (!vsi->num_rx_desc)
+			vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+						 I40E_REQ_DESCRIPTOR_MULTIPLE);
+		if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+			vsi->num_q_vectors = pf->num_lan_msix;
+		}
+		else
+			vsi->num_q_vectors = 1;
+
+		break;
+
+	case I40E_VSI_FDIR:
+		vsi->alloc_queue_pairs = 1;
+		vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
+					 I40E_REQ_DESCRIPTOR_MULTIPLE);
+		vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
+					 I40E_REQ_DESCRIPTOR_MULTIPLE);
+		vsi->num_q_vectors = pf->num_fdsb_msix;
+		break;
+
+	case I40E_VSI_VMDQ2:
+		vsi->alloc_queue_pairs = pf->num_vmdq_qps;
+		if (!vsi->num_tx_desc)
+			vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+						 I40E_REQ_DESCRIPTOR_MULTIPLE);
+		if (!vsi->num_rx_desc)
+			vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+						 I40E_REQ_DESCRIPTOR_MULTIPLE);
+		vsi->num_q_vectors = pf->num_vmdq_msix;
+		break;
+
+	case I40E_VSI_SRIOV:
+		vsi->alloc_queue_pairs = pf->num_vf_qps;
+		if (!vsi->num_tx_desc)
+			vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+						 I40E_REQ_DESCRIPTOR_MULTIPLE);
+		if (!vsi->num_rx_desc)
+			vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+						 I40E_REQ_DESCRIPTOR_MULTIPLE);
+		break;
+
+	default:
+		WARN_ON(1);
+		return -ENODATA;
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
+ * @vsi: VSI pointer
+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
+{
+	struct i40e_ring **next_rings;
+	int size;
+	int ret = 0;
+
+	/* allocate memory for both Tx, XDP Tx and Rx ring pointers */
+	size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
+	       (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
+
+	vsi->tx_rings = kzalloc(size, GFP_KERNEL);
+	if (!vsi->tx_rings)
+		return -ENOMEM;
+	next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
+	if (i40e_enabled_xdp_vsi(vsi)) {
+		vsi->xdp_rings = next_rings;
+		next_rings += vsi->alloc_queue_pairs;
+	}
+	vsi->rx_rings = next_rings;
+
+	if (alloc_qvectors) {
+		/* allocate memory for q_vector pointers */
+		size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
+		vsi->q_vectors = kzalloc(size, GFP_KERNEL);
+		if (!vsi->q_vectors) {
+			ret = -ENOMEM;
+			goto err_vectors;
+		}
+	}
+	return ret;
+
+err_vectors:
+	kfree(vsi->tx_rings);
+	return ret;
+}
+
+/**
+ * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
+ * @pf: board private structure
+ * @type: type of VSI
+ *
+ * On error: returns error code (negative)
+ * On success: returns vsi index in PF (positive)
+ **/
+int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
+{
+	int ret = -ENODEV;
+	struct i40e_vsi *vsi;
+	int vsi_idx;
+	int i;
+
+	/* Need to protect the allocation of the VSIs at the PF level */
+	mutex_lock(&pf->switch_mutex);
+
+	/* VSI list may be fragmented if VSI creation/destruction has
+	 * been happening.  We can afford to do a quick scan to look
+	 * for any free VSIs in the list.
+	 *
+	 * find next empty vsi slot, looping back around if necessary
+	 */
+	i = pf->next_vsi;
+	while (i < pf->num_alloc_vsi && pf->vsi[i])
+		i++;
+	if (i >= pf->num_alloc_vsi) {
+		i = 0;
+		while (i < pf->next_vsi && pf->vsi[i])
+			i++;
+	}
+
+	if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
+		vsi_idx = i;             /* Found one! */
+	} else {
+		ret = -ENODEV;
+		goto unlock_pf;  /* out of VSI slots! */
+	}
+	pf->next_vsi = ++i;
+
+	vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
+	if (!vsi) {
+		ret = -ENOMEM;
+		goto unlock_pf;
+	}
+	vsi->type = type;
+	vsi->back = pf;
+	set_bit(__I40E_VSI_DOWN, vsi->state);
+	vsi->flags = 0;
+	vsi->idx = vsi_idx;
+	vsi->int_rate_limit = 0;
+	vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
+				pf->rss_table_size : 64;
+	vsi->netdev_registered = false;
+	vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
+	hash_init(vsi->mac_filter_hash);
+	vsi->irqs_ready = false;
+
+	ret = i40e_set_num_rings_in_vsi(vsi);
+	if (ret)
+		goto err_rings;
+
+	vsi->block_tx_timeout = false;
+
+	ret = i40e_vsi_alloc_arrays(vsi, true);
+	if (ret)
+		goto err_rings;
+
+	/* Setup default MSIX irq handler for VSI */
+	i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
+
+	/* Initialize VSI lock */
+	spin_lock_init(&vsi->mac_filter_hash_lock);
+	pf->vsi[vsi_idx] = vsi;
+	ret = vsi_idx;
+
+	goto unlock_pf;
+
+err_rings:
+	pf->next_vsi = i - 1;
+	kfree(vsi);
+unlock_pf:
+	mutex_unlock(&pf->switch_mutex);
+	return ret;
+}
+
+/**
+ * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
+ * @vsi: VSI pointer
+ * @free_qvectors: a bool to specify if q_vectors need to be freed.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
+{
+	/* free the ring and vector containers */
+	if (free_qvectors) {
+		kfree(vsi->q_vectors);
+		vsi->q_vectors = NULL;
+	}
+	kfree(vsi->tx_rings);
+	vsi->tx_rings = NULL;
+	vsi->rx_rings = NULL;
+	vsi->xdp_rings = NULL;
+}
+
+/**
+ * i40e_vsi_clear - Deallocate the VSI provided
+ * @vsi: the VSI being un-configured
+ **/
+static int i40e_vsi_clear(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf;
+
+	if (!vsi)
+		return 0;
+
+	if (!vsi->back)
+		goto free_vsi;
+	pf = vsi->back;
+
+	mutex_lock(&pf->switch_mutex);
+	if (!pf->vsi[vsi->idx]) {
+		dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
+			vsi->idx, vsi->idx, vsi->type);
+		goto unlock_vsi;
+	}
+
+	if (pf->vsi[vsi->idx] != vsi) {
+		dev_err(&pf->pdev->dev,
+			"pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
+			pf->vsi[vsi->idx]->idx,
+			pf->vsi[vsi->idx]->type,
+			vsi->idx, vsi->type);
+		goto unlock_vsi;
+	}
+
+	/* updates the PF for this cleared vsi */
+	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+	i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+
+	i40e_vsi_free_arrays(vsi, true);
+	i40e_clear_rss_config_user(vsi);
+
+	pf->vsi[vsi->idx] = NULL;
+	if (vsi->idx < pf->next_vsi)
+		pf->next_vsi = vsi->idx;
+
+unlock_vsi:
+	mutex_unlock(&pf->switch_mutex);
+free_vsi:
+	kfree(vsi);
+
+	return 0;
+}
+
+/**
+ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being cleaned
+ **/
+static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+{
+	int i;
+
+	if (vsi->tx_rings && vsi->tx_rings[0]) {
+		for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+			kfree_rcu(vsi->tx_rings[i], rcu);
+			vsi->tx_rings[i] = NULL;
+			vsi->rx_rings[i] = NULL;
+			if (vsi->xdp_rings)
+				vsi->xdp_rings[i] = NULL;
+		}
+	}
+}
+
+/**
+ * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_alloc_rings(struct i40e_vsi *vsi)
+{
+	int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_ring *ring;
+
+	/* Set basic values in the rings to be used later during open() */
+	for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+		/* allocate space for both Tx and Rx in one shot */
+		ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
+		if (!ring)
+			goto err_out;
+
+		ring->queue_index = i;
+		ring->reg_idx = vsi->base_queue + i;
+		ring->ring_active = false;
+		ring->vsi = vsi;
+		ring->netdev = vsi->netdev;
+		ring->dev = &pf->pdev->dev;
+		ring->count = vsi->num_tx_desc;
+		ring->size = 0;
+		ring->dcb_tc = 0;
+
+		if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
+			ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+		ring->itr_setting = pf->tx_itr_default;
+		vsi->tx_rings[i] = ring++;
+
+		if (!i40e_enabled_xdp_vsi(vsi))
+			goto setup_rx;
+
+		ring->queue_index = vsi->alloc_queue_pairs + i;
+		ring->reg_idx = vsi->base_queue + ring->queue_index;
+		ring->ring_active = false;
+		ring->vsi = vsi;
+		ring->netdev = NULL;
+		ring->dev = &pf->pdev->dev;
+		ring->count = vsi->num_tx_desc;
+		ring->size = 0;
+		ring->dcb_tc = 0;
+		if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
+			ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+		set_ring_xdp(ring);
+		ring->itr_setting = pf->tx_itr_default;
+		vsi->xdp_rings[i] = ring++;
+
+setup_rx:
+		ring->queue_index = i;
+		ring->reg_idx = vsi->base_queue + i;
+		ring->ring_active = false;
+		ring->vsi = vsi;
+		ring->netdev = vsi->netdev;
+		ring->dev = &pf->pdev->dev;
+		ring->count = vsi->num_rx_desc;
+		ring->size = 0;
+		ring->dcb_tc = 0;
+		ring->itr_setting = pf->rx_itr_default;
+		vsi->rx_rings[i] = ring;
+	}
+
+	return 0;
+
+err_out:
+	i40e_vsi_clear_rings(vsi);
+	return -ENOMEM;
+}
+#if !defined(I40E_LEGACY_INTERRUPT) && !defined(I40E_MSI_INTERRUPT)
+
+/**
+ * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
+ * @pf: board private structure
+ * @v_budget: the number of MSI-X vectors to request
+ *
+ * Returns the number of vectors reserved, or error
+ **/
+static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int v_budget)
+{
+	int v_actual = 0;
+
+	v_actual = pci_enable_msix_range(pf->pdev,
+					 pf->msix_entries,
+					 I40E_MIN_MSIX,
+					 v_budget);
+	if (v_actual < 0)
+		dev_info(&pf->pdev->dev,
+			 "MSI-X vector reservation failed: %d\n", v_actual);
+
+	return v_actual;
+}
+
+/**
+ * i40e_init_msix - Setup the MSIX capability
+ * @pf: board private structure
+ *
+ * Work with the OS to set up the MSIX vectors needed.
+ *
+ * Returns the number of vectors reserved or negative on failure
+ **/
+static int i40e_init_msix(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	int cpus, extra_vectors;
+	int vectors_left;
+	int v_budget, i;
+	int v_actual;
+	int iwarp_requested = 0;
+
+	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+		return -ENODEV;
+
+	/* The number of vectors we'll request will be comprised of:
+	 *   - Add 1 for "other" cause for Admin Queue events, etc.
+	 *   - The number of LAN queue pairs
+	 *	- This takes into account queues for each TC in DCB mode.
+	 *	- Queues being used for RSS.
+	 *		We don't need as many as max_rss_size vectors.
+	 *		use rss_size instead in the calculation since that
+	 *		is governed by number of cpus in the system.
+	 *	- assumes symmetric Tx/Rx pairing
+	 *   - The number of VMDq pairs
+	 *   - The CPU count within the NUMA node if iWARP is enabled
+	 * Once we count this up, try the request.
+	 *
+	 * If we can't get what we want, we'll simplify to nearly nothing
+	 * and try again.  If that still fails, we punt.
+	 */
+	vectors_left = hw->func_caps.num_msix_vectors;
+	v_budget = 0;
+
+	/* reserve one vector for miscellaneous handler */
+	if (vectors_left) {
+		v_budget++;
+		vectors_left--;
+	}
+
+	/* reserve some vectors for the main PF traffic queues. Initially we
+	 * only reserve at most 50% of the available vectors, in the case that
+	 * the number of online CPUs is large. This ensures that we can enable
+	 * extra features as well. Once we've enabled the other features, we
+	 * will use any remaining vectors to reach as close as we can to the
+	 * number of online CPUs.
+	 */
+	cpus = num_online_cpus();
+	pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
+	vectors_left -= pf->num_lan_msix;
+
+	/* reserve one vector for sideband flow director */
+	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+		if (vectors_left) {
+			pf->num_fdsb_msix = 1;
+			v_budget++;
+			vectors_left--;
+		} else {
+			pf->num_fdsb_msix = 0;
+		}
+	}
+
+	/* can we reserve enough for iWARP? */
+	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+		iwarp_requested = pf->num_iwarp_msix;
+
+		if (!vectors_left)
+			pf->num_iwarp_msix = 0;
+		else if (vectors_left < pf->num_iwarp_msix)
+			pf->num_iwarp_msix = 1;
+		v_budget += pf->num_iwarp_msix;
+		vectors_left -= pf->num_iwarp_msix;
+	}
+
+	/* any vectors left over go for VMDq support */
+	if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
+		if (!vectors_left) {
+			pf->num_vmdq_msix = 0;
+			pf->num_vmdq_qps = 0;
+		} else {
+			int vmdq_vecs_wanted =
+				pf->num_vmdq_vsis * pf->num_vmdq_qps;
+			int vmdq_vecs =
+				min_t(int, vectors_left, vmdq_vecs_wanted);
+
+			/* if we're short on vectors for what's desired, we
+			 * limit the queues per vmdq.  If this is still more
+			 * than are available, the user will need to change
+			 * the number of queues/vectors used by the PF later
+			 * with the ethtool channels command
+			 */
+			if (vectors_left < vmdq_vecs_wanted) {
+				pf->num_vmdq_qps = 1;
+				vmdq_vecs_wanted = pf->num_vmdq_vsis;
+				vmdq_vecs = min_t(int,
+						  vectors_left,
+						  vmdq_vecs_wanted);
+			}
+			pf->num_vmdq_msix = pf->num_vmdq_qps;
+
+			v_budget += vmdq_vecs;
+			vectors_left -= vmdq_vecs;
+		}
+	}
+
+	/* On systems with a large number of SMP cores, we previously limited
+	 * the number of vectors for num_lan_msix to be at most 50% of the
+	 * available vectors, to allow for other features. Now, we add back
+	 * the remaining vectors. However, we ensure that the total
+	 * num_lan_msix will not exceed num_online_cpus(). To do this, we
+	 * calculate the number of vectors we can add without going over the
+	 * cap of CPUs. For systems with a small number of CPUs this will be
+	 * zero.
+	 */
+	extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
+	pf->num_lan_msix += extra_vectors;
+	vectors_left -= extra_vectors;
+
+	WARN(vectors_left < 0,
+	     "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
+
+	v_budget += pf->num_lan_msix;
+	pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
+				   GFP_KERNEL);
+	if (!pf->msix_entries)
+		return -ENOMEM;
+
+	for (i = 0; i < v_budget; i++)
+		pf->msix_entries[i].entry = i;
+	v_actual = i40e_reserve_msix_vectors(pf, v_budget);
+	if (v_actual < I40E_MIN_MSIX) {
+		pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
+		kfree(pf->msix_entries);
+		pf->msix_entries = NULL;
+		pci_disable_msix(pf->pdev);
+		return -ENODEV;
+	}
+
+	if (v_actual == I40E_MIN_MSIX) {
+		/* Adjust for minimal MSIX use */
+		pf->num_vmdq_vsis = 0;
+		pf->num_vmdq_qps = 0;
+		pf->num_lan_qps = 1;
+		pf->num_lan_msix = 1;
+
+	} else if (!vectors_left) {
+		/* If we have limited resources, we will start with no vectors
+		 * for the special features and then allocate vectors to some
+		 * of these features based on the policy and at the end disable
+		 * the features that did not get any vectors.
+		 */
+		int vec;
+
+		dev_info(&pf->pdev->dev,
+			 "MSI-X vector limit reached, attempting to redistribute vectors\n");
+		/* reserve the misc vector */
+		vec = v_actual - 1;
+
+		/* Scale vector usage down */
+		pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
+		pf->num_vmdq_vsis = 1;
+		pf->num_vmdq_qps = 1;
+
+		/* partition out the remaining vectors */
+		switch (vec) {
+		case 2:
+			pf->num_lan_msix = 1;
+			break;
+		case 3:
+			if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+				pf->num_lan_msix = 1;
+				pf->num_iwarp_msix = 1;
+			}
+			break;
+		default:
+			if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+				pf->num_iwarp_msix = min_t(int, (vec / 3),
+						 iwarp_requested);
+				pf->num_vmdq_vsis = min_t(int, (vec / 3),
+						  I40E_DEFAULT_NUM_VMDQ_VSI);
+			} else {
+				pf->num_vmdq_vsis = min_t(int, (vec / 2),
+						  I40E_DEFAULT_NUM_VMDQ_VSI);
+			}
+			if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+				pf->num_fdsb_msix = 1;
+				vec--;
+			}
+			pf->num_lan_msix = min_t(int,
+			       (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
+							      pf->num_lan_msix);
+			break;
+		}
+	}
+
+	if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+	    (pf->num_fdsb_msix == 0)) {
+		dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
+		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+		pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+	}
+	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
+	    (pf->num_vmdq_msix == 0)) {
+		dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
+		pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
+	}
+
+	if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
+	    (pf->num_iwarp_msix == 0)) {
+		dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
+		pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
+	}
+	return v_actual;
+}
+#endif
+
+/**
+ * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the vsi struct
+ *
+ * We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ **/
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+	struct i40e_q_vector *q_vector;
+
+	/* allocate q_vector */
+	q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+	if (!q_vector)
+		return -ENOMEM;
+
+	q_vector->vsi = vsi;
+	q_vector->v_idx = v_idx;
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
+	cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
+#endif
+	if (vsi->netdev)
+		netif_napi_add(vsi->netdev, &q_vector->napi,
+			       i40e_napi_poll, NAPI_POLL_WEIGHT);
+
+	/* tie q_vector and vsi together */
+	vsi->q_vectors[v_idx] = q_vector;
 
-err_rings:
-	pf->next_vsi = i - 1;
-	kfree(vsi);
-unlock_pf:
-	mutex_unlock(&pf->switch_mutex);
-	return ret;
+	return 0;
 }
 
 /**
- * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
- * @type: VSI pointer
- * @free_qvectors: a bool to specify if q_vectors need to be freed.
+ * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @vsi: the VSI being configured
  *
- * On error: returns error code (negative)
- * On success: returns 0
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
  **/
-static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
+static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
 {
-	/* free the ring and vector containers */
-	if (free_qvectors) {
-		kfree(vsi->q_vectors);
-		vsi->q_vectors = NULL;
+	struct i40e_pf *pf = vsi->back;
+	int v_idx, num_q_vectors;
+	int err;
+
+	/* if not MSIX, give the one vector only to the LAN VSI */
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+		num_q_vectors = vsi->num_q_vectors;
+	else if (vsi == pf->vsi[pf->lan_vsi])
+		num_q_vectors = 1;
+	else
+		return -EINVAL;
+
+	for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+		err = i40e_vsi_alloc_q_vector(vsi, v_idx);
+		if (err)
+			goto err_out;
 	}
-	kfree(vsi->tx_rings);
-	vsi->tx_rings = NULL;
-	vsi->rx_rings = NULL;
-	vsi->xdp_rings = NULL;
-}
 
-/**
- * i40e_clear_rss_config_user - clear the user configured RSS hash keys
- * and lookup table
- * @vsi: Pointer to VSI structure
- */
-static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
-{
-	if (!vsi)
-		return;
+	return 0;
 
-	kfree(vsi->rss_hkey_user);
-	vsi->rss_hkey_user = NULL;
+err_out:
+	while (v_idx--)
+		i40e_free_q_vector(vsi, v_idx);
 
-	kfree(vsi->rss_lut_user);
-	vsi->rss_lut_user = NULL;
+	return err;
 }
 
 /**
- * i40e_vsi_clear - Deallocate the VSI provided
- * @vsi: the VSI being un-configured
+ * i40e_init_interrupt_scheme - Determine proper interrupt scheme
+ * @pf: board private structure to initialize
  **/
-static int i40e_vsi_clear(struct i40e_vsi *vsi)
+static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
 {
-	struct i40e_pf *pf;
-
-	if (!vsi)
-		return 0;
+	int vectors = 0;
+	ssize_t size;
 
-	if (!vsi->back)
-		goto free_vsi;
-	pf = vsi->back;
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+#if !defined(I40E_LEGACY_INTERRUPT) && !defined(I40E_MSI_INTERRUPT)
+		vectors = i40e_init_msix(pf);
+#else
+		vectors = -1;
+#endif
+		if (vectors < 0) {
+			pf->flags &= ~(I40E_FLAG_MSIX_ENABLED	|
+				       I40E_FLAG_IWARP_ENABLED	|
+				       I40E_FLAG_RSS_ENABLED	|
+				       I40E_FLAG_DCB_CAPABLE	|
+				       I40E_FLAG_DCB_ENABLED	|
+				       I40E_FLAG_SRIOV_ENABLED	|
+				       I40E_FLAG_FD_SB_ENABLED	|
+				       I40E_FLAG_FD_ATR_ENABLED	|
+				       I40E_FLAG_VMDQ_ENABLED);
+			pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
 
-	mutex_lock(&pf->switch_mutex);
-	if (!pf->vsi[vsi->idx]) {
-		dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
-			vsi->idx, vsi->idx, vsi, vsi->type);
-		goto unlock_vsi;
+			/* rework the queue expectations without MSIX */
+			i40e_determine_queue_usage(pf);
+		}
 	}
 
-	if (pf->vsi[vsi->idx] != vsi) {
-		dev_err(&pf->pdev->dev,
-			"pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
-			pf->vsi[vsi->idx]->idx,
-			pf->vsi[vsi->idx],
-			pf->vsi[vsi->idx]->type,
-			vsi->idx, vsi, vsi->type);
-		goto unlock_vsi;
+	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+	    (pf->flags & I40E_FLAG_MSI_ENABLED)) {
+		dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
+#ifndef I40E_LEGACY_INTERRUPT
+		vectors = pci_enable_msi(pf->pdev);
+#else
+		vectors = -1;
+#endif
+		if (vectors < 0) {
+			dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
+				 vectors);
+			pf->flags &= ~I40E_FLAG_MSI_ENABLED;
+		}
+		vectors = 1;  /* one MSI or Legacy vector */
 	}
 
-	/* updates the PF for this cleared vsi */
-	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
-	i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
-
-	i40e_vsi_free_arrays(vsi, true);
-	i40e_clear_rss_config_user(vsi);
+	if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
+		dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
 
-	pf->vsi[vsi->idx] = NULL;
-	if (vsi->idx < pf->next_vsi)
-		pf->next_vsi = vsi->idx;
+	/* set up vector assignment tracking */
+	size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
+	pf->irq_pile = kzalloc(size, GFP_KERNEL);
+	if (!pf->irq_pile) {
+		dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
+		return -ENOMEM;
+	}
+	pf->irq_pile->num_entries = vectors;
+	pf->irq_pile->search_hint = 0;
 
-unlock_vsi:
-	mutex_unlock(&pf->switch_mutex);
-free_vsi:
-	kfree(vsi);
+	/* track first vector for misc interrupts, ignore return */
+	(void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
 
 	return 0;
 }
 
 /**
- * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
- * @vsi: the VSI being cleaned
+ * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
+ * non queue events in recovery mode
+ * @pf: board private structure
+ *
+ * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
+ * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
+ * This is handled differently than in recovery mode since no Tx/Rx resources
+ * are being allocated.
  **/
-static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
 {
-	int i;
+	int err;
 
-	if (vsi->tx_rings && vsi->tx_rings[0]) {
-		for (i = 0; i < vsi->alloc_queue_pairs; i++) {
-			kfree_rcu(vsi->tx_rings[i], rcu);
-			vsi->tx_rings[i] = NULL;
-			vsi->rx_rings[i] = NULL;
-			if (vsi->xdp_rings)
-				vsi->xdp_rings[i] = NULL;
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+		err = i40e_setup_misc_vector(pf);
+
+		if (err) {
+			dev_info(&pf->pdev->dev,
+				 "MSI-X misc vector request failed, error %d\n",
+				 err);
+			return err;
+		}
+	} else {
+		u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
+
+		err = request_irq(pf->pdev->irq, i40e_intr, flags,
+				  pf->int_name, pf);
+
+		if (err) {
+			dev_info(&pf->pdev->dev,
+				 "MSI/legacy misc vector request failed, error %d\n",
+				 err);
+			return err;
 		}
+		i40e_enable_misc_int_causes(pf);
+		i40e_irq_dynamic_enable_icr0(pf);
 	}
+
+	return 0;
 }
 
 /**
- * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
- * @vsi: the VSI being configured
+ * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
+ * @pf: board private structure
+ *
+ * This sets up the handler for MSIX 0, which is used to manage the
+ * non-queue interrupts, e.g. AdminQ and errors.  This is not used
+ * when in MSI or Legacy interrupt mode.
  **/
-static int i40e_alloc_rings(struct i40e_vsi *vsi)
+static int i40e_setup_misc_vector(struct i40e_pf *pf)
 {
-	int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
-	struct i40e_pf *pf = vsi->back;
-	struct i40e_ring *ring;
+	struct i40e_hw *hw = &pf->hw;
+	int err = 0;
 
-	/* Set basic values in the rings to be used later during open() */
-	for (i = 0; i < vsi->alloc_queue_pairs; i++) {
-		/* allocate space for both Tx and Rx in one shot */
-		ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
-		if (!ring)
-			goto err_out;
+	/* Only request the IRQ once, the first time through. */
+	if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
+		err = request_irq(pf->msix_entries[0].vector,
+				  i40e_intr, 0, pf->int_name, pf);
+		if (err) {
+			clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
+			dev_info(&pf->pdev->dev,
+				 "request_irq for %s failed: %d\n",
+				 pf->int_name, err);
+			return -EFAULT;
+		}
+	}
 
-		ring->queue_index = i;
-		ring->reg_idx = vsi->base_queue + i;
-		ring->ring_active = false;
-		ring->vsi = vsi;
-		ring->netdev = vsi->netdev;
-		ring->dev = &pf->pdev->dev;
-		ring->count = vsi->num_desc;
-		ring->size = 0;
-		ring->dcb_tc = 0;
-		if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
-			ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
-		ring->tx_itr_setting = pf->tx_itr_default;
-		vsi->tx_rings[i] = ring++;
+	i40e_enable_misc_int_causes(pf);
 
-		if (!i40e_enabled_xdp_vsi(vsi))
-			goto setup_rx;
+	/* associate no queues to the misc vector */
+	wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
+	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
 
-		ring->queue_index = vsi->alloc_queue_pairs + i;
-		ring->reg_idx = vsi->base_queue + ring->queue_index;
-		ring->ring_active = false;
-		ring->vsi = vsi;
-		ring->netdev = NULL;
-		ring->dev = &pf->pdev->dev;
-		ring->count = vsi->num_desc;
-		ring->size = 0;
-		ring->dcb_tc = 0;
-		if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
-			ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
-		set_ring_xdp(ring);
-		ring->tx_itr_setting = pf->tx_itr_default;
-		vsi->xdp_rings[i] = ring++;
+	i40e_flush(hw);
 
-setup_rx:
-		ring->queue_index = i;
-		ring->reg_idx = vsi->base_queue + i;
-		ring->ring_active = false;
-		ring->vsi = vsi;
-		ring->netdev = vsi->netdev;
-		ring->dev = &pf->pdev->dev;
-		ring->count = vsi->num_desc;
-		ring->size = 0;
-		ring->dcb_tc = 0;
-		ring->rx_itr_setting = pf->rx_itr_default;
-		vsi->rx_rings[i] = ring;
+	i40e_irq_dynamic_enable_icr0(pf);
+
+	return err;
+}
+
+/**
+ * i40e_restore_interrupt_scheme - Restore the interrupt scheme
+ * @pf: private board data structure
+ *
+ * Restore the interrupt scheme that was cleared when we suspended the
+ * device. This should be called during resume to re-allocate the q_vectors
+ * and reacquire IRQs.
+ */
+static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
+{
+	int err, i;
+
+	/* We cleared the MSI and MSI-X flags when disabling the old interrupt
+	 * scheme. We need to re-enabled them here in order to attempt to
+	 * re-acquire the MSI or MSI-X vectors
+	 */
+	pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
+
+	err = i40e_init_interrupt_scheme(pf);
+	if (err)
+		return err;
+
+	/* Now that we've re-acquired IRQs, we need to remap the vectors and
+	 * rings together again.
+	 */
+	for (i = 0; i < pf->num_alloc_vsi; i++) {
+		if (pf->vsi[i]) {
+			err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
+			if (err)
+				goto err_unwind;
+			i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
+		}
 	}
 
+	err = i40e_setup_misc_vector(pf);
+	if (err)
+		goto err_unwind;
+
+	if (pf->flags & I40E_FLAG_IWARP_ENABLED)
+		i40e_client_update_msix_info(pf);
+
 	return 0;
 
-err_out:
-	i40e_vsi_clear_rings(vsi);
-	return -ENOMEM;
+err_unwind:
+	while (i--) {
+		if (pf->vsi[i])
+			i40e_vsi_free_q_vectors(pf->vsi[i]);
+	}
+
+	return err;
 }
 
 /**
- * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
- * @pf: board private structure
- * @vectors: the number of MSI-X vectors to request
+ * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
+ * @vsi: Pointer to vsi structure
+ * @seed: Buffter to store the hash keys
+ * @lut: Buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
  *
- * Returns the number of vectors reserved, or error
- **/
-static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
+ * Return 0 on success, negative on failure
+ */
+static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+			   u8 *lut, u16 lut_size)
 {
-	vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
-					I40E_MIN_MSIX, vectors);
-	if (vectors < 0) {
-		dev_info(&pf->pdev->dev,
-			 "MSI-X vector reservation failed: %d\n", vectors);
-		vectors = 0;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	int ret = 0;
+
+	if (seed) {
+		ret = i40e_aq_get_rss_key(hw, vsi->id,
+			(struct i40e_aqc_get_set_rss_key_data *)seed);
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "Cannot get RSS key, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
+			return ret;
+		}
 	}
 
-	return vectors;
+	if (lut) {
+		bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
+
+		ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "Cannot get RSS lut, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
+			return ret;
+		}
+	}
+	return ret;
 }
 
 /**
- * i40e_init_msix - Setup the MSIX capability
- * @pf: board private structure
- *
- * Work with the OS to set up the MSIX vectors needed.
+ * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
+ * @vsi: Pointer to vsi structure
+ * @seed: RSS hash seed
+ * @lut: Lookup table
+ * @lut_size: Lookup table size
  *
- * Returns the number of vectors reserved or negative on failure
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_init_msix(struct i40e_pf *pf)
+static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
+			       const u8 *lut, u16 lut_size)
 {
+	struct i40e_pf *pf = vsi->back;
 	struct i40e_hw *hw = &pf->hw;
-	int cpus, extra_vectors;
-	int vectors_left;
-	int v_budget, i;
-	int v_actual;
-	int iwarp_requested = 0;
-
-	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
-		return -ENODEV;
-
-	/* The number of vectors we'll request will be comprised of:
-	 *   - Add 1 for "other" cause for Admin Queue events, etc.
-	 *   - The number of LAN queue pairs
-	 *	- Queues being used for RSS.
-	 *		We don't need as many as max_rss_size vectors.
-	 *		use rss_size instead in the calculation since that
-	 *		is governed by number of cpus in the system.
-	 *	- assumes symmetric Tx/Rx pairing
-	 *   - The number of VMDq pairs
-	 *   - The CPU count within the NUMA node if iWARP is enabled
-	 * Once we count this up, try the request.
-	 *
-	 * If we can't get what we want, we'll simplify to nearly nothing
-	 * and try again.  If that still fails, we punt.
-	 */
-	vectors_left = hw->func_caps.num_msix_vectors;
-	v_budget = 0;
+	u16 vf_id = vsi->vf_id;
+	u8 i;
 
-	/* reserve one vector for miscellaneous handler */
-	if (vectors_left) {
-		v_budget++;
-		vectors_left--;
+	/* Fill out hash function seed */
+	if (seed) {
+		u32 *seed_dw = (u32 *)seed;
+		if (vsi->type == I40E_VSI_MAIN) {
+			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+				wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+		} else if (vsi->type == I40E_VSI_SRIOV) {
+			for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
+				wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
+		} else {
+			dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
+		}
 	}
 
-	/* reserve some vectors for the main PF traffic queues. Initially we
-	 * only reserve at most 50% of the available vectors, in the case that
-	 * the number of online CPUs is large. This ensures that we can enable
-	 * extra features as well. Once we've enabled the other features, we
-	 * will use any remaining vectors to reach as close as we can to the
-	 * number of online CPUs.
-	 */
-	cpus = num_online_cpus();
-	pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
-	vectors_left -= pf->num_lan_msix;
+	if (lut) {
+		u32 *lut_dw = (u32 *)lut;
 
-	/* reserve one vector for sideband flow director */
-	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
-		if (vectors_left) {
-			pf->num_fdsb_msix = 1;
-			v_budget++;
-			vectors_left--;
+		if (vsi->type == I40E_VSI_MAIN) {
+			if (lut_size != I40E_HLUT_ARRAY_SIZE)
+				return -EINVAL;
+			for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+				wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+		} else if (vsi->type == I40E_VSI_SRIOV) {
+			if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
+				return -EINVAL;
+			for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
+				wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
 		} else {
-			pf->num_fdsb_msix = 0;
+			dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
 		}
 	}
+	i40e_flush(hw);
 
-	/* can we reserve enough for iWARP? */
-	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
-		iwarp_requested = pf->num_iwarp_msix;
+	return 0;
+}
 
-		if (!vectors_left)
-			pf->num_iwarp_msix = 0;
-		else if (vectors_left < pf->num_iwarp_msix)
-			pf->num_iwarp_msix = 1;
-		v_budget += pf->num_iwarp_msix;
-		vectors_left -= pf->num_iwarp_msix;
-	}
+/**
+ * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
+ * @vsi: Pointer to VSI structure
+ * @seed: Buffer to store the keys
+ * @lut: Buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
+			    u8 *lut, u16 lut_size)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	u16 i;
 
-	/* any vectors left over go for VMDq support */
-	if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
-		int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
-		int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
+	if (seed) {
+		u32 *seed_dw = (u32 *)seed;
 
-		if (!vectors_left) {
-			pf->num_vmdq_msix = 0;
-			pf->num_vmdq_qps = 0;
-		} else {
-			/* if we're short on vectors for what's desired, we limit
-			 * the queues per vmdq.  If this is still more than are
-			 * available, the user will need to change the number of
-			 * queues/vectors used by the PF later with the ethtool
-			 * channels command
-			 */
-			if (vmdq_vecs < vmdq_vecs_wanted)
-				pf->num_vmdq_qps = 1;
-			pf->num_vmdq_msix = pf->num_vmdq_qps;
+		for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+			seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
+	}
+	if (lut) {
+		u32 *lut_dw = (u32 *)lut;
 
-			v_budget += vmdq_vecs;
-			vectors_left -= vmdq_vecs;
-		}
+		if (lut_size != I40E_HLUT_ARRAY_SIZE)
+			return -EINVAL;
+		for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+			lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
 	}
 
-	/* On systems with a large number of SMP cores, we previously limited
-	 * the number of vectors for num_lan_msix to be at most 50% of the
-	 * available vectors, to allow for other features. Now, we add back
-	 * the remaining vectors. However, we ensure that the total
-	 * num_lan_msix will not exceed num_online_cpus(). To do this, we
-	 * calculate the number of vectors we can add without going over the
-	 * cap of CPUs. For systems with a small number of CPUs this will be
-	 * zero.
-	 */
-	extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
-	pf->num_lan_msix += extra_vectors;
-	vectors_left -= extra_vectors;
+	return 0;
+}
+
+/**
+ * i40e_config_rss - Configure RSS keys and lut
+ * @vsi: Pointer to VSI structure
+ * @seed: RSS hash seed
+ * @lut: Lookup table
+ * @lut_size: Lookup table size
+ *
+ * Returns 0 on success, negative on failure
+ */
+int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
+{
+	struct i40e_pf *pf = vsi->back;
+
+	if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
+		return i40e_config_rss_aq(vsi, seed, lut, lut_size);
+	else
+		return i40e_config_rss_reg(vsi, seed, lut, lut_size);
+}
+
+/**
+ * i40e_get_rss - Get RSS keys and lut
+ * @vsi: Pointer to VSI structure
+ * @seed: Buffer to store the keys
+ * @lut: Buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
+ *
+ * Returns 0 on success, negative on failure
+ */
+int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
+{
+	struct i40e_pf *pf = vsi->back;
+
+	if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
+		return i40e_get_rss_aq(vsi, seed, lut, lut_size);
+	else
+		return i40e_get_rss_reg(vsi, seed, lut, lut_size);
+}
+
+/**
+ * i40e_fill_rss_lut - Fill the RSS lookup table with default values
+ * @pf: Pointer to board private structure
+ * @lut: Lookup table
+ * @rss_table_size: Lookup table size
+ * @rss_size: Range of queue number for hashing
+ */
+void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+		       u16 rss_table_size, u16 rss_size)
+{
+	u16 i;
 
-	WARN(vectors_left < 0,
-	     "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
+	for (i = 0; i < rss_table_size; i++) {
+		lut[i] = i % rss_size;
+	}
+}
 
-	v_budget += pf->num_lan_msix;
-	pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
-				   GFP_KERNEL);
-	if (!pf->msix_entries)
-		return -ENOMEM;
+/**
+ * i40e_pf_config_rss - Prepare for RSS if used
+ * @pf: board private structure
+ **/
+static int i40e_pf_config_rss(struct i40e_pf *pf)
+{
+	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+	u8 seed[I40E_HKEY_ARRAY_SIZE];
+	struct i40e_hw *hw = &pf->hw;
+	u32 reg_val;
+	u64 hena;
+	u8 *lut;
+	int ret;
 
-	for (i = 0; i < v_budget; i++)
-		pf->msix_entries[i].entry = i;
-	v_actual = i40e_reserve_msix_vectors(pf, v_budget);
+	/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
+	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
+		((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
+	hena |= i40e_pf_get_default_rss_hena(pf);
 
-	if (v_actual < I40E_MIN_MSIX) {
-		pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
-		kfree(pf->msix_entries);
-		pf->msix_entries = NULL;
-		pci_disable_msix(pf->pdev);
-		return -ENODEV;
+	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
+	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
 
-	} else if (v_actual == I40E_MIN_MSIX) {
-		/* Adjust for minimal MSIX use */
-		pf->num_vmdq_vsis = 0;
-		pf->num_vmdq_qps = 0;
-		pf->num_lan_qps = 1;
-		pf->num_lan_msix = 1;
+	/* Determine the RSS table size based on the hardware capabilities */
+	reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
+	reg_val = (pf->rss_table_size == 512) ?
+			(reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
+			(reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
+	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
 
-	} else if (!vectors_left) {
-		/* If we have limited resources, we will start with no vectors
-		 * for the special features and then allocate vectors to some
-		 * of these features based on the policy and at the end disable
-		 * the features that did not get any vectors.
+	/* Determine the RSS size of the VSI */
+	if (!vsi->rss_size) {
+		u16 qcount;
+		/* If the firmware does something weird during VSI init, we
+		 * could end up with zero TCs. Check for that to avoid
+		 * divide-by-zero. It probably won't pass traffic, but it also
+		 * won't panic.
 		 */
-		int vec;
+		qcount = vsi->num_queue_pairs /
+			 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
+		vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
+	}
+	if (!vsi->rss_size)
+		return -EINVAL;
 
-		dev_info(&pf->pdev->dev,
-			 "MSI-X vector limit reached, attempting to redistribute vectors\n");
-		/* reserve the misc vector */
-		vec = v_actual - 1;
+	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+	if (!lut)
+		return -ENOMEM;
 
-		/* Scale vector usage down */
-		pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
-		pf->num_vmdq_vsis = 1;
-		pf->num_vmdq_qps = 1;
+	/* Use user configured lut if there is one, otherwise use default */
+	if (vsi->rss_lut_user)
+		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+	else
+		i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
 
-		/* partition out the remaining vectors */
-		switch (vec) {
-		case 2:
-			pf->num_lan_msix = 1;
-			break;
-		case 3:
-			if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
-				pf->num_lan_msix = 1;
-				pf->num_iwarp_msix = 1;
-			} else {
-				pf->num_lan_msix = 2;
-			}
-			break;
-		default:
-			if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
-				pf->num_iwarp_msix = min_t(int, (vec / 3),
-						 iwarp_requested);
-				pf->num_vmdq_vsis = min_t(int, (vec / 3),
-						  I40E_DEFAULT_NUM_VMDQ_VSI);
-			} else {
-				pf->num_vmdq_vsis = min_t(int, (vec / 2),
-						  I40E_DEFAULT_NUM_VMDQ_VSI);
-			}
-			if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
-				pf->num_fdsb_msix = 1;
-				vec--;
-			}
-			pf->num_lan_msix = min_t(int,
-			       (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
-							      pf->num_lan_msix);
-			pf->num_lan_qps = pf->num_lan_msix;
-			break;
-		}
-	}
+	/* Use user configured hash key if there is one, otherwise
+	 * use default.
+	 */
+	if (vsi->rss_hkey_user)
+		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
+	else
+		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+	ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
+	kfree(lut);
 
-	if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
-	    (pf->num_fdsb_msix == 0)) {
-		dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
-		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
-	}
-	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
-	    (pf->num_vmdq_msix == 0)) {
-		dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
-		pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
-	}
+	return ret;
+}
 
-	if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
-	    (pf->num_iwarp_msix == 0)) {
-		dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
-		pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
-	}
-	i40e_debug(&pf->hw, I40E_DEBUG_INIT,
-		   "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
-		   pf->num_lan_msix,
-		   pf->num_vmdq_msix * pf->num_vmdq_vsis,
-		   pf->num_fdsb_msix,
-		   pf->num_iwarp_msix);
+/**
+ * i40e_clear_rss_config_user - clear the user configured RSS hash keys
+ * and lookup table
+ * @vsi: Pointer to VSI structure
+ */
+static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
+{
+	if (!vsi)
+		return;
 
-	return v_actual;
+	kfree(vsi->rss_hkey_user);
+	vsi->rss_hkey_user = NULL;
+
+	kfree(vsi->rss_lut_user);
+	vsi->rss_lut_user = NULL;
 }
 
 /**
- * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
- * @vsi: the VSI being configured
- * @v_idx: index of the vector in the vsi struct
- * @cpu: cpu to be used on affinity_mask
+ * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
+ * @pf: board private structure
+ * @queue_count: the requested queue count for rss.
  *
- * We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ * returns 0 if rss is not enabled, if enabled returns the final rss queue
+ * count which may be different from the requested queue count.
+ * Note: expects to be called while under rtnl_lock()
  **/
-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
 {
-	struct i40e_q_vector *q_vector;
+	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+	int new_rss_size;
 
-	/* allocate q_vector */
-	q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
-	if (!q_vector)
-		return -ENOMEM;
+	if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
+		return 0;
 
-	q_vector->vsi = vsi;
-	q_vector->v_idx = v_idx;
-	cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
+	new_rss_size = min_t(int, queue_count, pf->rss_size_max);
 
-	if (vsi->netdev)
-		netif_napi_add(vsi->netdev, &q_vector->napi,
-			       i40e_napi_poll, NAPI_POLL_WEIGHT);
+	if (queue_count != vsi->num_queue_pairs) {
+		u16 qcount;
 
-	q_vector->rx.latency_range = I40E_LOW_LATENCY;
-	q_vector->tx.latency_range = I40E_LOW_LATENCY;
+		vsi->req_queue_pairs = queue_count;
+		i40e_prep_for_reset(pf, true);
 
-	/* tie q_vector and vsi together */
-	vsi->q_vectors[v_idx] = q_vector;
+		pf->alloc_rss_size = new_rss_size;
 
-	return 0;
+		i40e_reset_and_rebuild(pf, true, true);
+
+		/* Discard the user configured hash keys and lut, if less
+		 * queues are enabled.
+		 */
+		if (queue_count < vsi->rss_size) {
+			i40e_clear_rss_config_user(vsi);
+			dev_dbg(&pf->pdev->dev,
+				"discard user configured hash keys and lut\n");
+		}
+
+		/* Reset vsi->rss_size, as number of enabled queues changed */
+		qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
+		vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
+
+		i40e_pf_config_rss(pf);
+	}
+	dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count:  %d/%d\n",
+		 vsi->req_queue_pairs, pf->rss_size_max);
+	return pf->alloc_rss_size;
 }
 
 /**
- * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
- * @vsi: the VSI being configured
- *
- * We allocate one q_vector per queue interrupt.  If allocation fails we
- * return -ENOMEM.
+ * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
+ * @pf: board private structure
  **/
-static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
+i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
 {
-	struct i40e_pf *pf = vsi->back;
-	int err, v_idx, num_q_vectors, current_cpu;
-
-	/* if not MSIX, give the one vector only to the LAN VSI */
-	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
-		num_q_vectors = vsi->num_q_vectors;
-	else if (vsi == pf->vsi[pf->lan_vsi])
-		num_q_vectors = 1;
-	else
-		return -EINVAL;
+	i40e_status status;
+	bool min_valid, max_valid;
+	u32 max_bw, min_bw;
 
-	current_cpu = cpumask_first(cpu_online_mask);
+	status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
+			&min_valid, &max_valid);
 
-	for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
-		err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
-		if (err)
-			goto err_out;
-		current_cpu = cpumask_next(current_cpu, cpu_online_mask);
-		if (unlikely(current_cpu >= nr_cpu_ids))
-			current_cpu = cpumask_first(cpu_online_mask);
+	if (!status) {
+		if (min_valid)
+			pf->min_bw = min_bw;
+		if (max_valid)
+			pf->max_bw = max_bw;
 	}
 
-	return 0;
+	return status;
+}
 
-err_out:
-	while (v_idx--)
-		i40e_free_q_vector(vsi, v_idx);
+/**
+ * i40e_set_partition_bw_setting - Set BW settings for this PF partition
+ * @pf: board private structure
+ **/
+i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
+{
+	struct i40e_aqc_configure_partition_bw_data bw_data;
+	i40e_status status;
+
+	/* Set the valid bit for this PF */
+	bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
+	bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
+	bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
+
+	/* Set the new bandwidths */
+	status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
 
-	return err;
+	return status;
 }
 
 /**
- * i40e_init_interrupt_scheme - Determine proper interrupt scheme
- * @pf: board private structure to initialize
+ * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
+ * @pf: board private structure
  **/
-static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
+i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
 {
-	int vectors = 0;
-	ssize_t size;
+	/* Commit temporary BW setting to permanent NVM image */
+	enum i40e_admin_queue_err last_aq_status;
+	i40e_status ret;
+	u16 nvm_word;
 
-	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
-		vectors = i40e_init_msix(pf);
-		if (vectors < 0) {
-			pf->flags &= ~(I40E_FLAG_MSIX_ENABLED	|
-				       I40E_FLAG_IWARP_ENABLED	|
-				       I40E_FLAG_RSS_ENABLED	|
-				       I40E_FLAG_DCB_CAPABLE	|
-				       I40E_FLAG_DCB_ENABLED	|
-				       I40E_FLAG_SRIOV_ENABLED	|
-				       I40E_FLAG_FD_SB_ENABLED	|
-				       I40E_FLAG_FD_ATR_ENABLED	|
-				       I40E_FLAG_VMDQ_ENABLED);
+	if (pf->hw.partition_id != 1) {
+		dev_info(&pf->pdev->dev,
+			 "Commit BW only works on partition 1! This is partition %d",
+			 pf->hw.partition_id);
+		ret = I40E_NOT_SUPPORTED;
+		goto bw_commit_out;
+	}
 
-			/* rework the queue expectations without MSIX */
-			i40e_determine_queue_usage(pf);
-		}
+	/* Acquire NVM for read access */
+	ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
+	last_aq_status = pf->hw.aq.asq_last_status;
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "Cannot acquire NVM for read access, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, last_aq_status));
+		goto bw_commit_out;
 	}
 
-	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
-	    (pf->flags & I40E_FLAG_MSI_ENABLED)) {
-		dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
-		vectors = pci_enable_msi(pf->pdev);
-		if (vectors < 0) {
-			dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
-				 vectors);
-			pf->flags &= ~I40E_FLAG_MSI_ENABLED;
-		}
-		vectors = 1;  /* one MSI or Legacy vector */
+	/* Read word 0x10 of NVM - SW compatibility word 1 */
+	ret = i40e_aq_read_nvm(&pf->hw,
+			       I40E_SR_NVM_CONTROL_WORD,
+			       0x10, sizeof(nvm_word), &nvm_word,
+			       false, NULL);
+	/* Save off last admin queue command status before releasing
+	 * the NVM
+	 */
+	last_aq_status = pf->hw.aq.asq_last_status;
+	i40e_release_nvm(&pf->hw);
+	if (ret) {
+		dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, last_aq_status));
+		goto bw_commit_out;
 	}
 
-	if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
-		dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
+	/* Wait a bit for NVM release to complete */
+	msleep(50);
 
-	/* set up vector assignment tracking */
-	size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
-	pf->irq_pile = kzalloc(size, GFP_KERNEL);
-	if (!pf->irq_pile) {
-		dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
-		return -ENOMEM;
+	/* Acquire NVM for write access */
+	ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
+	last_aq_status = pf->hw.aq.asq_last_status;
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "Cannot acquire NVM for write access, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, last_aq_status));
+		goto bw_commit_out;
 	}
-	pf->irq_pile->num_entries = vectors;
-	pf->irq_pile->search_hint = 0;
-
-	/* track first vector for misc interrupts, ignore return */
-	(void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
+	/* Write it back out unchanged to initiate update NVM,
+	 * which will force a write of the shadow (alt) RAM to
+	 * the NVM - thus storing the bandwidth values permanently.
+	 */
+	ret = i40e_aq_update_nvm(&pf->hw,
+				 I40E_SR_NVM_CONTROL_WORD,
+				 0x10, sizeof(nvm_word),
+				 &nvm_word, true, 0, NULL);
+	/* Save off last admin queue command status before releasing
+	 * the NVM
+	 */
+	last_aq_status = pf->hw.aq.asq_last_status;
+	i40e_release_nvm(&pf->hw);
+	if (ret)
+		dev_info(&pf->pdev->dev,
+			 "BW settings NOT SAVED, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, last_aq_status));
+bw_commit_out:
 
-	return 0;
+	return ret;
 }
 
 /**
- * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
+ * i40e_is_total_port_shutdown_enabled - read nvm and return value
+ * if total port shutdown feature is enabled for this pf
  * @pf: board private structure
+ **/
+static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
+{
+#define I40E_TOTAL_PORT_SHUTDOWN_ENABLED	BIT(4)
+#define I40E_FEATURES_ENABLE_PTR		0x2A
+#define I40E_CURRENT_SETTING_PTR		0x2B
+#define I40E_LINK_BEHAVIOR_WORD_OFFSET		0x2D
+#define I40E_LINK_BEHAVIOR_WORD_LENGTH		0x1
+#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED	BIT(0)
+#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH	4
+	i40e_status read_status = I40E_SUCCESS;
+	u16 sr_emp_sr_settings_ptr = 0;
+	u16 features_enable = 0;
+	u16 link_behavior = 0;
+	bool ret = false;
+
+	read_status = i40e_read_nvm_word(&pf->hw,
+					 I40E_SR_EMP_SR_SETTINGS_PTR,
+					 &sr_emp_sr_settings_ptr);
+	if (read_status)
+		goto err_nvm;
+	read_status = i40e_read_nvm_word(&pf->hw,
+					 sr_emp_sr_settings_ptr +
+					 I40E_FEATURES_ENABLE_PTR,
+					 &features_enable);
+	if (read_status)
+		goto err_nvm;
+	if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
+		read_status =
+		i40e_read_nvm_module_data(&pf->hw,
+					  I40E_SR_EMP_SR_SETTINGS_PTR,
+					  I40E_CURRENT_SETTING_PTR,
+					  I40E_LINK_BEHAVIOR_WORD_OFFSET,
+					  I40E_LINK_BEHAVIOR_WORD_LENGTH,
+					  &link_behavior);
+		if (read_status)
+			goto err_nvm;
+		link_behavior >>=
+		(pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
+		ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
+	}
+	return ret;
+
+err_nvm:
+	dev_warn(&pf->pdev->dev,
+		 "Total Port Shutdown feature is off due to read nvm error:%d\n",
+		 read_status);
+	return ret;
+}
+
+/**
+ * i40e_sw_init - Initialize general software structures (struct i40e_pf)
+ * @pf: board private structure to initialize
  *
- * This sets up the handler for MSIX 0, which is used to manage the
- * non-queue interrupts, e.g. AdminQ and errors.  This is not used
- * when in MSI or Legacy interrupt mode.
+ * i40e_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
  **/
-static int i40e_setup_misc_vector(struct i40e_pf *pf)
+static int i40e_sw_init(struct i40e_pf *pf)
 {
-	struct i40e_hw *hw = &pf->hw;
 	int err = 0;
+	int size;
+	u16 pow;
 
-	/* Only request the irq if this is the first time through, and
-	 * not when we're rebuilding after a Reset
+	pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
+				(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
+	if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
+		if (I40E_DEBUG_USER & debug)
+			pf->hw.debug_mask = debug;
+		pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
+						I40E_DEFAULT_MSG_ENABLE);
+	}
+
+	/* Set default capability flags */
+	pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
+		    I40E_FLAG_MSI_ENABLED     |
+		    I40E_FLAG_MSIX_ENABLED;
+
+	/* Set default ITR */
+	pf->rx_itr_default = I40E_ITR_RX_DEF;
+	pf->tx_itr_default = I40E_ITR_TX_DEF;
+	/* Depending on PF configurations, it is possible that the RSS
+	 * maximum might end up larger than the available queues
 	 */
-	if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
-		err = request_irq(pf->msix_entries[0].vector,
-				  i40e_intr, 0, pf->int_name, pf);
-		if (err) {
+	pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
+	pf->alloc_rss_size = 1;
+	pf->rss_table_size = pf->hw.func_caps.rss_table_size;
+	pf->rss_size_max = min_t(int, pf->rss_size_max,
+				 pf->hw.func_caps.num_tx_qp);
+
+	/* find the next higher power-of-2 of num cpus */
+	pow = roundup_pow_of_two(num_online_cpus());
+	pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
+
+	if (pf->hw.func_caps.rss) {
+		pf->flags |= I40E_FLAG_RSS_ENABLED;
+		pf->alloc_rss_size = min_t(int, pf->rss_size_max,
+					   num_online_cpus());
+	}
+	/* MFP mode enabled */
+	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
+		pf->flags |= I40E_FLAG_MFP_ENABLED;
+
+		dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+		if (i40e_get_partition_bw_setting(pf)) {
+			dev_warn(&pf->pdev->dev,
+				 "Could not get partition bw settings\n");
+		} else {
 			dev_info(&pf->pdev->dev,
-				 "request_irq for %s failed: %d\n",
-				 pf->int_name, err);
-			return -EFAULT;
+				 "Partition BW Min = %8.8x, Max = %8.8x\n",
+				 pf->min_bw, pf->max_bw);
+
+			/* nudge the Tx scheduler */
+			i40e_set_partition_bw_setting(pf);
 		}
 	}
 
-	i40e_enable_misc_int_causes(pf);
+	if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
+	    (pf->hw.func_caps.fd_filters_best_effort > 0)) {
+		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+		pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
+		if (pf->flags & I40E_FLAG_MFP_ENABLED &&
+		    pf->hw.num_partitions > 1)
+			dev_info(&pf->pdev->dev,
+				 "Flow Director Sideband mode Disabled in MFP mode\n");
+		else
+			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+		pf->fdir_pf_filter_count =
+				 pf->hw.func_caps.fd_filters_guaranteed;
+		pf->hw.fdir_shared_filter_count =
+				 pf->hw.func_caps.fd_filters_best_effort;
+	}
 
-	/* associate no queues to the misc vector */
-	wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
-	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
+	if (pf->hw.mac.type == I40E_MAC_X722) {
+		pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
+				    I40E_HW_128_QP_RSS_CAPABLE |
+				    I40E_HW_ATR_EVICT_CAPABLE |
+				    I40E_HW_WB_ON_ITR_CAPABLE |
+				    I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
+				    I40E_HW_NO_PCI_LINK_CHECK |
+				    I40E_HW_USE_SET_LLDP_MIB |
+				    I40E_HW_GENEVE_OFFLOAD_CAPABLE |
+				    I40E_HW_PTP_L4_CAPABLE |
+				    I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
+				    I40E_HW_OUTER_UDP_CSUM_CAPABLE);
 
-	i40e_flush(hw);
+#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
+		if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
+						I40E_FDEVICT_PCTYPE_DEFAULT) {
+			dev_warn(&pf->pdev->dev, "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
+			pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
+		}
+	} else if ((pf->hw.aq.api_maj_ver > 1) ||
+		   ((pf->hw.aq.api_maj_ver == 1) &&
+		    (pf->hw.aq.api_min_ver > 4))) {
+		/* Supported in FW API version higher than 1.4 */
+		pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
 
-	i40e_irq_dynamic_enable_icr0(pf, true);
+		/* supports mpls header skip and csum for following headers */
+		pf->hw_features |= I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE;
+	}
 
-	return err;
-}
+	/* Enable HW ATR eviction if possible */
+	if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
+		pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
 
-/**
- * i40e_config_rss_aq - Prepare for RSS using AQ commands
- * @vsi: vsi structure
- * @seed: RSS hash seed
- **/
-static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
-			      u8 *lut, u16 lut_size)
-{
-	struct i40e_pf *pf = vsi->back;
-	struct i40e_hw *hw = &pf->hw;
-	int ret = 0;
+	if ((pf->hw.mac.type == I40E_MAC_XL710) &&
+	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+	    (pf->hw.aq.fw_maj_ver < 4))) {
+		pf->hw_features |= I40E_HW_RESTART_AUTONEG;
+		/* No DCB support  for FW < v4.33 */
+		pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
+	}
+
+	/* Disable FW LLDP if FW < v4.3 */
+	if ((pf->hw.mac.type == I40E_MAC_XL710) &&
+	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
+	    (pf->hw.aq.fw_maj_ver < 4)))
+		pf->hw_features |= I40E_HW_STOP_FW_LLDP;
+
+	/* Use the FW Set LLDP MIB API if FW > v4.40 */
+	if ((pf->hw.mac.type == I40E_MAC_XL710) &&
+	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
+	    (pf->hw.aq.fw_maj_ver >= 5)))
+		pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
+
+	/* Enable PTP L4 if FW > v6.0 */
+	if ((pf->hw.mac.type == I40E_MAC_XL710) &&
+	    (pf->hw.aq.fw_maj_ver >= 6))
+		pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
 
-	if (seed) {
-		struct i40e_aqc_get_set_rss_key_data *seed_dw =
-			(struct i40e_aqc_get_set_rss_key_data *)seed;
-		ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
-		if (ret) {
-			dev_info(&pf->pdev->dev,
-				 "Cannot set RSS key, err %s aq_err %s\n",
-				 i40e_stat_str(hw, ret),
-				 i40e_aq_str(hw, hw->aq.asq_last_status));
-			return ret;
-		}
+	if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
+		pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
+		pf->flags |= I40E_FLAG_VMDQ_ENABLED;
+		pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
 	}
-	if (lut) {
-		bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
 
-		ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
-		if (ret) {
+	if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
+		pf->flags |= I40E_FLAG_IWARP_ENABLED;
+		/* IWARP needs one extra vector for CQP just like MISC.*/
+		pf->num_iwarp_msix = (int)num_online_cpus() + 1;
+	}
+	/* Stopping FW LLDP engine is supported on XL710 and X722
+	 * starting from FW versions determined in i40e_init_adminq.
+	 * Stopping the FW LLDP engine is not supported on XL710
+	 * if NPAR is functioning so unset this hw flag in this case.
+	 */
+	if (pf->hw.mac.type == I40E_MAC_XL710 &&
+	    pf->hw.func_caps.npar_enable &&
+	    (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+		pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+
+#ifndef HAVE_SWIOTLB_SKIP_CPU_SYNC
+	/* force legacy Rx if SKIP_CPU_SYNC is not supported */
+	pf->flags |= I40E_FLAG_LEGACY_RX;
+#endif
+#ifdef CONFIG_PCI_IOV
+	if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
+#if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE)
+		pf->num_req_vfs = 0;
+		if (max_vfs[pf->instance] > 0 &&
+		    max_vfs[pf->instance] <= pf->hw.func_caps.num_vfs) {
+			pf->flags |= I40E_FLAG_SRIOV_ENABLED;
+			/* assign number of SR-IOV VFs */
+			pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
+			pf->num_req_vfs = max_vfs[pf->instance];
+		} else if (max_vfs[pf->instance] == 0)  {
 			dev_info(&pf->pdev->dev,
-				 "Cannot set RSS lut, err %s aq_err %s\n",
-				 i40e_stat_str(hw, ret),
-				 i40e_aq_str(hw, hw->aq.asq_last_status));
-			return ret;
+				 " SR-IOV is disabled, Module Parameter max_vfs value %d = disabled\n",
+				 max_vfs[pf->instance]);
+		} else if (max_vfs[pf->instance] != -1) {
+			dev_err(&pf->pdev->dev,
+				"Module Parameter max_vfs value %d is out of range. Maximum value for the device: %d - resetting to zero\n",
+				max_vfs[pf->instance],
+				pf->hw.func_caps.num_vfs);
 		}
+#else
+		pf->flags |= I40E_FLAG_SRIOV_ENABLED;
+		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
+		pf->num_req_vfs = min_t(int,
+					pf->hw.func_caps.num_vfs,
+					I40E_MAX_VF_COUNT);
+#endif /* HAVE_SRIOV_CONFIGURE */
 	}
-	return ret;
-}
+#endif /* CONFIG_PCI_IOV */
+	pf->eeprom_version = 0xDEAD;
+	pf->lan_veb = I40E_NO_VEB;
+	pf->lan_vsi = I40E_NO_VSI;
 
-/**
- * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
- * @vsi: Pointer to vsi structure
- * @seed: Buffter to store the hash keys
- * @lut: Buffer to store the lookup table entries
- * @lut_size: Size of buffer to store the lookup table entries
- *
- * Return 0 on success, negative on failure
- */
-static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
-			   u8 *lut, u16 lut_size)
-{
-	struct i40e_pf *pf = vsi->back;
-	struct i40e_hw *hw = &pf->hw;
-	int ret = 0;
+	/* By default FW has this off for performance reasons */
+	pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
 
-	if (seed) {
-		ret = i40e_aq_get_rss_key(hw, vsi->id,
-			(struct i40e_aqc_get_set_rss_key_data *)seed);
-		if (ret) {
-			dev_info(&pf->pdev->dev,
-				 "Cannot get RSS key, err %s aq_err %s\n",
-				 i40e_stat_str(&pf->hw, ret),
-				 i40e_aq_str(&pf->hw,
-					     pf->hw.aq.asq_last_status));
-			return ret;
-		}
+	/* set up queue assignment tracking */
+	size = sizeof(struct i40e_lump_tracking)
+		+ (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
+	pf->qp_pile = kzalloc(size, GFP_KERNEL);
+	if (!pf->qp_pile) {
+		err = -ENOMEM;
+		goto sw_init_done;
 	}
+	pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
+	pf->qp_pile->search_hint = 0;
 
-	if (lut) {
-		bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
+	pf->tx_timeout_recovery_level = 1;
 
-		ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
-		if (ret) {
-			dev_info(&pf->pdev->dev,
-				 "Cannot get RSS lut, err %s aq_err %s\n",
-				 i40e_stat_str(&pf->hw, ret),
-				 i40e_aq_str(&pf->hw,
-					     pf->hw.aq.asq_last_status));
-			return ret;
-		}
+	if (pf->hw.mac.type != I40E_MAC_X722 &&
+	    i40e_is_total_port_shutdown_enabled(pf)) {
+		/* Link down on close must be on when total port shutdown
+		 * is enabled for a given port
+		 */
+		pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN
+			  | I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
+		dev_info(&pf->pdev->dev,
+			 "Total Port Shutdown is enabled, link-down-on-close forced on\n");
 	}
 
-	return ret;
+	/* Add default values for ingress and egress vlan */
+	pf->ingress_vlan = I40E_NO_VF_MIRROR;
+	pf->egress_vlan = I40E_NO_VF_MIRROR;
+
+	mutex_init(&pf->switch_mutex);
+
+sw_init_done:
+	return err;
 }
 
+#ifdef HAVE_NDO_SET_FEATURES
+#endif
 /**
- * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
- * @vsi: VSI structure
+ * i40e_set_ntuple - set the ntuple feature flag and take action
+ * @pf: board private structure to initialize
+ * @features: the feature set that the stack is suggesting
+ *
+ * returns a bool to indicate if reset needs to happen
  **/
-static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
+bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
 {
-	u8 seed[I40E_HKEY_ARRAY_SIZE];
-	struct i40e_pf *pf = vsi->back;
-	u8 *lut;
-	int ret;
-
-	if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
-		return 0;
-
-	if (!vsi->rss_size)
-		vsi->rss_size = min_t(int, pf->alloc_rss_size,
-				      vsi->num_queue_pairs);
-	if (!vsi->rss_size)
-		return -EINVAL;
+	bool need_reset = false;
 
-	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
-	if (!lut)
-		return -ENOMEM;
-	/* Use the user configured hash keys and lookup table if there is one,
-	 * otherwise use default
+	/* Check if Flow Director n-tuple support was enabled or disabled.  If
+	 * the state changed, we need to reset.
 	 */
-	if (vsi->rss_lut_user)
-		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
-	else
-		i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
-	if (vsi->rss_hkey_user)
-		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
-	else
-		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
-	ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
-	kfree(lut);
-
-	return ret;
+	if (features & NETIF_F_NTUPLE) {
+		/* Enable filters and mark for reset */
+		if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+			need_reset = true;
+		/* enable FD_SB only if there is MSI-X vector and no cloud
+		 * filters exist
+		 */
+		if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
+			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+			pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+		}
+	} else {
+		/* turn off filters, mark for reset and clear SW filter list */
+		if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+			need_reset = true;
+			i40e_fdir_filter_exit(pf);
+			i40e_cloud_filter_exit(pf);
+		}
+		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+		clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
+		pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+		/* reset fd counters */
+		pf->fd_add_err = pf->fd_atr_cnt = 0;
+		/* if ATR was auto disabled it can be re-enabled. */
+		if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
+			if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+			    (I40E_DEBUG_FD & pf->hw.debug_mask))
+				dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+	}
+	return need_reset;
 }
 
+#ifdef HAVE_NDO_SET_FEATURES
 /**
- * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
- *
- * Returns 0 on success, negative on failure
+ * i40e_clear_rss_lut - clear the rx hash lookup table
+ * @vsi: the VSI being configured
  **/
-static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
-			       const u8 *lut, u16 lut_size)
+static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
 {
 	struct i40e_pf *pf = vsi->back;
 	struct i40e_hw *hw = &pf->hw;
 	u16 vf_id = vsi->vf_id;
 	u8 i;
 
-	/* Fill out hash function seed */
-	if (seed) {
-		u32 *seed_dw = (u32 *)seed;
-
-		if (vsi->type == I40E_VSI_MAIN) {
-			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-				wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
-		} else if (vsi->type == I40E_VSI_SRIOV) {
-			for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
-				wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
-		} else {
-			dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
-		}
-	}
-
-	if (lut) {
-		u32 *lut_dw = (u32 *)lut;
-
-		if (vsi->type == I40E_VSI_MAIN) {
-			if (lut_size != I40E_HLUT_ARRAY_SIZE)
-				return -EINVAL;
-			for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
-				wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
-		} else if (vsi->type == I40E_VSI_SRIOV) {
-			if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
-				return -EINVAL;
-			for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
-				wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
-		} else {
-			dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
-		}
+	if (vsi->type == I40E_VSI_MAIN) {
+		for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+			wr32(hw, I40E_PFQF_HLUT(i), 0);
+	} else if (vsi->type == I40E_VSI_SRIOV) {
+		for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
+			i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
+	} else {
+		dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
 	}
-	i40e_flush(hw);
-
-	return 0;
 }
 
 /**
- * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
- * @vsi: Pointer to VSI structure
- * @seed: Buffer to store the keys
- * @lut: Buffer to store the lookup table entries
- * @lut_size: Size of buffer to store the lookup table entries
- *
- * Returns 0 on success, negative on failure
- */
-static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
-			    u8 *lut, u16 lut_size)
+ * i40e_set_features - set the netdev feature flags
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ * Note: expects to be called while under rtnl_lock()
+ **/
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+static int i40e_set_features(struct net_device *netdev, u32 features)
+#else
+static int i40e_set_features(struct net_device *netdev,
+			     netdev_features_t features)
+#endif
 {
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
 	struct i40e_pf *pf = vsi->back;
-	struct i40e_hw *hw = &pf->hw;
-	u16 i;
+	bool need_reset;
 
-	if (seed) {
-		u32 *seed_dw = (u32 *)seed;
+	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
+		i40e_pf_config_rss(pf);
+	else if (!(features & NETIF_F_RXHASH) &&
+		 netdev->features & NETIF_F_RXHASH)
+		i40e_clear_rss_lut(vsi);
 
-		for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-			seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
-	}
-	if (lut) {
-		u32 *lut_dw = (u32 *)lut;
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+#else
+	if (features & NETIF_F_HW_VLAN_RX)
+#endif
+		i40e_vlan_stripping_enable(vsi);
+	else
+		i40e_vlan_stripping_disable(vsi);
 
-		if (lut_size != I40E_HLUT_ARRAY_SIZE)
-			return -EINVAL;
-		for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
-			lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
+#ifdef NETIF_F_HW_TC
+	if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
+		dev_err(&pf->pdev->dev,
+			"Offloaded tc filters active, can't turn hw_tc_offload off");
+		return -EINVAL;
 	}
+#endif
+	need_reset = i40e_set_ntuple(pf, features);
+
+	if (need_reset)
+		i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
 
 	return 0;
 }
 
+#endif /* HAVE_NDO_SET_FEATURES */
 /**
- * i40e_config_rss - Configure RSS keys and lut
- * @vsi: Pointer to VSI structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
+ * @pf: board private structure
+ * @port: The UDP port to look up
  *
- * Returns 0 on success, negative on failure
- */
-int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
+ * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
+ **/
+static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
 {
-	struct i40e_pf *pf = vsi->back;
+	u8 i;
 
-	if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
-		return i40e_config_rss_aq(vsi, seed, lut, lut_size);
-	else
-		return i40e_config_rss_reg(vsi, seed, lut, lut_size);
+	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+		/* Do not report ports with pending deletions as
+		 * being available.
+		 */
+		if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
+			continue;
+		if (pf->udp_ports[i].port == port)
+			return i;
+	}
+
+	return i;
 }
 
 /**
- * i40e_get_rss - Get RSS keys and lut
- * @vsi: Pointer to VSI structure
- * @seed: Buffer to store the keys
- * @lut: Buffer to store the lookup table entries
- * lut_size: Size of buffer to store the lookup table entries
- *
- * Returns 0 on success, negative on failure
- */
-int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
+ * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
+ * @netdev: This physical port's netdev
+ * @ti: Tunnel endpoint information
+ **/
+__maybe_unused
+static void i40e_udp_tunnel_add(struct net_device *netdev,
+				struct udp_tunnel_info *ti)
 {
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
 	struct i40e_pf *pf = vsi->back;
+	u16 port = ntohs(ti->port);
+	u8 next_idx;
+	u8 idx;
 
-	if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
-		return i40e_get_rss_aq(vsi, seed, lut, lut_size);
-	else
-		return i40e_get_rss_reg(vsi, seed, lut, lut_size);
-}
+	idx = i40e_get_udp_port_idx(pf, port);
 
-/**
- * i40e_fill_rss_lut - Fill the RSS lookup table with default values
- * @pf: Pointer to board private structure
- * @lut: Lookup table
- * @rss_table_size: Lookup table size
- * @rss_size: Range of queue number for hashing
- */
-void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
-		       u16 rss_table_size, u16 rss_size)
-{
-	u16 i;
+	/* Check if port already exists */
+	if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+		netdev_info(netdev, "port %d already offloaded\n", port);
+		return;
+	}
 
-	for (i = 0; i < rss_table_size; i++)
-		lut[i] = i % rss_size;
+	/* Now check if there is space to add the new port */
+	next_idx = i40e_get_udp_port_idx(pf, 0);
+
+	if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+		netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
+			    port);
+		return;
+	}
+
+	switch (ti->type) {
+	case UDP_TUNNEL_TYPE_VXLAN:
+		pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
+		break;
+	case UDP_TUNNEL_TYPE_GENEVE:
+		if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
+			return;
+		pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
+		break;
+	default:
+		return;
+	}
+
+	/* New port: add it and mark its index in the bitmap */
+	pf->udp_ports[next_idx].port = port;
+	pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
+	pf->pending_udp_bitmap |= BIT_ULL(next_idx);
+	set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
 }
 
 /**
- * i40e_pf_config_rss - Prepare for RSS if used
- * @pf: board private structure
+ * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
+ * @netdev: This physical port's netdev
+ * @ti: Tunnel endpoint information
  **/
-static int i40e_pf_config_rss(struct i40e_pf *pf)
+__maybe_unused
+static void i40e_udp_tunnel_del(struct net_device *netdev,
+				struct udp_tunnel_info *ti)
 {
-	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
-	u8 seed[I40E_HKEY_ARRAY_SIZE];
-	u8 *lut;
-	struct i40e_hw *hw = &pf->hw;
-	u32 reg_val;
-	u64 hena;
-	int ret;
-
-	/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
-	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
-		((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
-	hena |= i40e_pf_get_default_rss_hena(pf);
-
-	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
-	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	u16 port = ntohs(ti->port);
+	u8 idx;
 
-	/* Determine the RSS table size based on the hardware capabilities */
-	reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
-	reg_val = (pf->rss_table_size == 512) ?
-			(reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
-			(reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
-	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
+	idx = i40e_get_udp_port_idx(pf, port);
 
-	/* Determine the RSS size of the VSI */
-	if (!vsi->rss_size) {
-		u16 qcount;
+	/* Check if port already exists */
+	if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
+		goto not_found;
 
-		qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
-		vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
+	switch (ti->type) {
+	case UDP_TUNNEL_TYPE_VXLAN:
+		if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
+			goto not_found;
+		break;
+	case UDP_TUNNEL_TYPE_GENEVE:
+		if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
+			return;
+		if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
+			goto not_found;
+		break;
+	default:
+		goto not_found;
 	}
-	if (!vsi->rss_size)
-		return -EINVAL;
-
-	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
-	if (!lut)
-		return -ENOMEM;
 
-	/* Use user configured lut if there is one, otherwise use default */
-	if (vsi->rss_lut_user)
-		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
-	else
-		i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
+	/* if port exists, set it to 0 (mark for deletion)
+	 * and make it pending
+	 */
+	pf->udp_ports[idx].port = 0;
 
-	/* Use user configured hash key if there is one, otherwise
-	 * use default.
+	/* Toggle pending bit instead of setting it. This way if we are
+	 * deleting a port that has yet to be added we just clear the pending
+	 * bit and don't have to worry about it.
 	 */
-	if (vsi->rss_hkey_user)
-		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
-	else
-		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
-	ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
-	kfree(lut);
+	pf->pending_udp_bitmap ^= BIT_ULL(idx);
+	set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
 
-	return ret;
+	return;
+not_found:
+	netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
+		    port);
 }
 
+#if defined(HAVE_VXLAN_RX_OFFLOAD) && !defined(HAVE_UDP_ENC_RX_OFFLOAD)
+#if IS_ENABLED(CONFIG_VXLAN)
 /**
- * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
- * @pf: board private structure
- * @queue_count: the requested queue count for rss.
- *
- * returns 0 if rss is not enabled, if enabled returns the final rss queue
- * count which may be different from the requested queue count.
- * Note: expects to be called while under rtnl_lock()
+ * i40e_add_vxlan_port - Get notifications about vxlan ports that come up
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that vxlan is notifying us about
+ * @port: New UDP port number that vxlan started listening to
  **/
-int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
+static void i40e_add_vxlan_port(struct net_device *netdev,
+				sa_family_t sa_family, __be16 port)
 {
-	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
-	int new_rss_size;
-
-	if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
-		return 0;
-
-	new_rss_size = min_t(int, queue_count, pf->rss_size_max);
-
-	if (queue_count != vsi->num_queue_pairs) {
-		u16 qcount;
+	struct udp_tunnel_info ti = {
+		.type = UDP_TUNNEL_TYPE_VXLAN,
+		.sa_family = sa_family,
+		.port = port,
+	};
 
-		vsi->req_queue_pairs = queue_count;
-		i40e_prep_for_reset(pf, true);
-
-		pf->alloc_rss_size = new_rss_size;
-
-		i40e_reset_and_rebuild(pf, true, true);
-
-		/* Discard the user configured hash keys and lut, if less
-		 * queues are enabled.
-		 */
-		if (queue_count < vsi->rss_size) {
-			i40e_clear_rss_config_user(vsi);
-			dev_dbg(&pf->pdev->dev,
-				"discard user configured hash keys and lut\n");
-		}
+	i40e_udp_tunnel_add(netdev, &ti);
+}
 
-		/* Reset vsi->rss_size, as number of enabled queues changed */
-		qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
-		vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
+/**
+ * i40e_del_vxlan_port - Get notifications about vxlan ports that go away
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that vxlan is notifying us about
+ * @port: UDP port number that vxlan stopped listening to
+ **/
+static void i40e_del_vxlan_port(struct net_device *netdev,
+				sa_family_t sa_family, __be16 port)
+{
+	struct udp_tunnel_info ti = {
+		.type = UDP_TUNNEL_TYPE_VXLAN,
+		.sa_family = sa_family,
+		.port = port,
+	};
 
-		i40e_pf_config_rss(pf);
-	}
-	dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count:  %d/%d\n",
-		 vsi->req_queue_pairs, pf->rss_size_max);
-	return pf->alloc_rss_size;
+	i40e_udp_tunnel_del(netdev, &ti);
 }
-
+#endif /* CONFIG_VXLAN */
+#endif /* HAVE_VXLAN_RX_OFFLOAD && !HAVE_UDP_ENC_RX_OFFLOAD */
+#if defined(HAVE_GENEVE_RX_OFFLOAD) && !defined(HAVE_UDP_ENC_RX_OFFLOAD)
+#if IS_ENABLED(CONFIG_GENEVE)
 /**
- * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
- * @pf: board private structure
+ * i40e_add_geneve_port - Get notifications about GENEVE ports that come up
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that GENEVE is notifying us about
+ * @port: New UDP port number that GENEVE started listening to
  **/
-i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
+static void i40e_add_geneve_port(struct net_device *netdev,
+				 sa_family_t sa_family, __be16 port)
 {
-	i40e_status status;
-	bool min_valid, max_valid;
-	u32 max_bw, min_bw;
+	struct udp_tunnel_info ti = {
+		.type = UDP_TUNNEL_TYPE_GENEVE,
+		.sa_family = sa_family,
+		.port = port,
+	};
 
-	status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
-					   &min_valid, &max_valid);
+	i40e_udp_tunnel_add(netdev, &ti);
+}
 
-	if (!status) {
-		if (min_valid)
-			pf->min_bw = min_bw;
-		if (max_valid)
-			pf->max_bw = max_bw;
-	}
+/*
+ * i40e_del_geneve_port - Get notifications about GENEVE ports that go away
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that GENEVE is notifying us about
+ * @port: UDP port number that GENEVE stopped listening to
+ **/
+static void i40e_del_geneve_port(struct net_device *netdev,
+				 sa_family_t sa_family, __be16 port)
+{
+	struct udp_tunnel_info ti = {
+		.type = UDP_TUNNEL_TYPE_GENEVE,
+		.sa_family = sa_family,
+		.port = port,
+	};
 
-	return status;
+	i40e_udp_tunnel_del(netdev, &ti);
 }
 
-/**
- * i40e_set_partition_bw_setting - Set BW settings for this PF partition
- * @pf: board private structure
- **/
-i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
+#endif /* CONFIG_GENEVE */
+#endif /* HAVE_GENEVE_RX_OFFLOAD  && !HAVE_UDP_ENC_RX_OFFLOAD */
+#ifdef HAVE_NDO_GET_PHYS_PORT_ID
+static int i40e_get_phys_port_id(struct net_device *netdev,
+				 struct netdev_phys_item_id *ppid)
 {
-	struct i40e_aqc_configure_partition_bw_data bw_data;
-	i40e_status status;
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_hw *hw = &pf->hw;
 
-	/* Set the valid bit for this PF */
-	bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
-	bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
-	bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
+	if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
+		return -EOPNOTSUPP;
 
-	/* Set the new bandwidths */
-	status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
+	ppid->id_len = min_t(int, sizeof(hw->mac.port_addr),
+			     sizeof(ppid->id));
+	memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
 
-	return status;
+	return 0;
 }
 
+#endif /* HAVE_NDO_GET_PHYS_PORT_ID */
 /**
- * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
- * @pf: board private structure
- **/
-i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ * i40e_ndo_fdb_add - add an entry to the hardware database
+ * @ndm: the input from the stack
+ * @tb: pointer to array of nladdr (unused)
+ * @dev: the net device pointer
+ * @addr: the MAC address entry being added
+ * @vid: VLAN ID
+ * @flags: instructions from stack about fdb operation
+ * @extack: netdev extended ack structure
+ */
+#ifdef HAVE_FDB_OPS
+#if defined(HAVE_NDO_FDB_ADD_EXTACK)
+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+			    struct net_device *dev, const unsigned char *addr,
+			    u16 vid, u16 flags, struct netlink_ext_ack *extack)
+#elif defined(HAVE_NDO_FDB_ADD_VID)
+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+			    struct net_device *dev, const unsigned char *addr,
+			    u16 vid, u16 flags)
+#elif defined(HAVE_NDO_FDB_ADD_NLATTR)
+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+			    struct net_device *dev, const unsigned char *addr,
+			    u16 flags)
+#elif defined(USE_CONST_DEV_UC_CHAR)
+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+			    const unsigned char *addr, u16 flags)
+#else
+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+			    unsigned char *addr, u16 flags)
+#endif
 {
-	/* Commit temporary BW setting to permanent NVM image */
-	enum i40e_admin_queue_err last_aq_status;
-	i40e_status ret;
-	u16 nvm_word;
-
-	if (pf->hw.partition_id != 1) {
-		dev_info(&pf->pdev->dev,
-			 "Commit BW only works on partition 1! This is partition %d",
-			 pf->hw.partition_id);
-		ret = I40E_NOT_SUPPORTED;
-		goto bw_commit_out;
-	}
+	struct i40e_netdev_priv *np = netdev_priv(dev);
+	struct i40e_pf *pf = np->vsi->back;
+	int err = 0;
 
-	/* Acquire NVM for read access */
-	ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
-	last_aq_status = pf->hw.aq.asq_last_status;
-	if (ret) {
-		dev_info(&pf->pdev->dev,
-			 "Cannot acquire NVM for read access, err %s aq_err %s\n",
-			 i40e_stat_str(&pf->hw, ret),
-			 i40e_aq_str(&pf->hw, last_aq_status));
-		goto bw_commit_out;
-	}
+	if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
+		return -EOPNOTSUPP;
 
-	/* Read word 0x10 of NVM - SW compatibility word 1 */
-	ret = i40e_aq_read_nvm(&pf->hw,
-			       I40E_SR_NVM_CONTROL_WORD,
-			       0x10, sizeof(nvm_word), &nvm_word,
-			       false, NULL);
-	/* Save off last admin queue command status before releasing
-	 * the NVM
+	/* Hardware does not support aging addresses so if a
+	 * ndm_state is given only allow permanent addresses
 	 */
-	last_aq_status = pf->hw.aq.asq_last_status;
-	i40e_release_nvm(&pf->hw);
-	if (ret) {
-		dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
-			 i40e_stat_str(&pf->hw, ret),
-			 i40e_aq_str(&pf->hw, last_aq_status));
-		goto bw_commit_out;
+	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+		netdev_info(dev, "FDB only supports static addresses\n");
+		return -EINVAL;
 	}
 
-	/* Wait a bit for NVM release to complete */
-	msleep(50);
+	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+		err = dev_uc_add_excl(dev, addr);
+	else if (is_multicast_ether_addr(addr))
+		err = dev_mc_add_excl(dev, addr);
+	else
+		err = -EINVAL;
 
-	/* Acquire NVM for write access */
-	ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
-	last_aq_status = pf->hw.aq.asq_last_status;
-	if (ret) {
-		dev_info(&pf->pdev->dev,
-			 "Cannot acquire NVM for write access, err %s aq_err %s\n",
-			 i40e_stat_str(&pf->hw, ret),
-			 i40e_aq_str(&pf->hw, last_aq_status));
-		goto bw_commit_out;
-	}
-	/* Write it back out unchanged to initiate update NVM,
-	 * which will force a write of the shadow (alt) RAM to
-	 * the NVM - thus storing the bandwidth values permanently.
-	 */
-	ret = i40e_aq_update_nvm(&pf->hw,
-				 I40E_SR_NVM_CONTROL_WORD,
-				 0x10, sizeof(nvm_word),
-				 &nvm_word, true, NULL);
-	/* Save off last admin queue command status before releasing
-	 * the NVM
-	 */
-	last_aq_status = pf->hw.aq.asq_last_status;
-	i40e_release_nvm(&pf->hw);
-	if (ret)
-		dev_info(&pf->pdev->dev,
-			 "BW settings NOT SAVED, err %s aq_err %s\n",
-			 i40e_stat_str(&pf->hw, ret),
-			 i40e_aq_str(&pf->hw, last_aq_status));
-bw_commit_out:
+	/* Only return duplicate errors if NLM_F_EXCL is set */
+	if (err == -EEXIST && !(flags & NLM_F_EXCL))
+		err = 0;
 
-	return ret;
+	return err;
 }
 
+#ifdef HAVE_NDO_FEATURES_CHECK
 /**
- * i40e_sw_init - Initialize general software structures (struct i40e_pf)
- * @pf: board private structure to initialize
- *
- * i40e_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
+ * i40e_features_check - Validate encapsulated packet conforms to limits
+ * @skb: skb buff
+ * @dev: This physical port's netdev
+ * @features: Offload features that the stack believes apply
  **/
-static int i40e_sw_init(struct i40e_pf *pf)
+static netdev_features_t i40e_features_check(struct sk_buff *skb,
+					     struct net_device *dev,
+					     netdev_features_t features)
 {
-	int err = 0;
-	int size;
-
-	/* Set default capability flags */
-	pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
-		    I40E_FLAG_MSI_ENABLED     |
-		    I40E_FLAG_MSIX_ENABLED;
-
-	/* Set default ITR */
-	pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
-	pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
+	size_t len;
 
-	/* Depending on PF configurations, it is possible that the RSS
-	 * maximum might end up larger than the available queues
+	/* No point in doing any of this if neither checksum nor GSO are
+	 * being requested for this frame.  We can rule out both by just
+	 * checking for CHECKSUM_PARTIAL
 	 */
-	pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
-	pf->alloc_rss_size = 1;
-	pf->rss_table_size = pf->hw.func_caps.rss_table_size;
-	pf->rss_size_max = min_t(int, pf->rss_size_max,
-				 pf->hw.func_caps.num_tx_qp);
-	if (pf->hw.func_caps.rss) {
-		pf->flags |= I40E_FLAG_RSS_ENABLED;
-		pf->alloc_rss_size = min_t(int, pf->rss_size_max,
-					   num_online_cpus());
-	}
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return features;
 
-	/* MFP mode enabled */
-	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
-		pf->flags |= I40E_FLAG_MFP_ENABLED;
-		dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
-		if (i40e_get_partition_bw_setting(pf)) {
-			dev_warn(&pf->pdev->dev,
-				 "Could not get partition bw settings\n");
-		} else {
-			dev_info(&pf->pdev->dev,
-				 "Partition BW Min = %8.8x, Max = %8.8x\n",
-				 pf->min_bw, pf->max_bw);
+	/* We cannot support GSO if the MSS is going to be less than
+	 * 64 bytes.  If it is then we need to drop support for GSO.
+	 */
+	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
+		features &= ~NETIF_F_GSO_MASK;
 
-			/* nudge the Tx scheduler */
-			i40e_set_partition_bw_setting(pf);
-		}
-	}
+	/* MACLEN can support at most 63 words */
+	len = skb_network_header(skb) - skb->data;
+	if (len & ~(63 * 2))
+		goto out_err;
 
-	if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
-	    (pf->hw.func_caps.fd_filters_best_effort > 0)) {
-		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
-		pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
-		if (pf->flags & I40E_FLAG_MFP_ENABLED &&
-		    pf->hw.num_partitions > 1)
-			dev_info(&pf->pdev->dev,
-				 "Flow Director Sideband mode Disabled in MFP mode\n");
-		else
-			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
-		pf->fdir_pf_filter_count =
-				 pf->hw.func_caps.fd_filters_guaranteed;
-		pf->hw.fdir_shared_filter_count =
-				 pf->hw.func_caps.fd_filters_best_effort;
-	}
+	/* IPLEN and EIPLEN can support at most 127 dwords */
+	len = skb_transport_header(skb) - skb_network_header(skb);
+	if (len & ~(127 * 4))
+		goto out_err;
 
-	if (pf->hw.mac.type == I40E_MAC_X722) {
-		pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
-				    I40E_HW_128_QP_RSS_CAPABLE |
-				    I40E_HW_ATR_EVICT_CAPABLE |
-				    I40E_HW_WB_ON_ITR_CAPABLE |
-				    I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
-				    I40E_HW_NO_PCI_LINK_CHECK |
-				    I40E_HW_USE_SET_LLDP_MIB |
-				    I40E_HW_GENEVE_OFFLOAD_CAPABLE |
-				    I40E_HW_PTP_L4_CAPABLE |
-				    I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
-				    I40E_HW_OUTER_UDP_CSUM_CAPABLE);
+	if (skb->encapsulation) {
+		/* L4TUNLEN can support 127 words */
+		len = skb_inner_network_header(skb) - skb_transport_header(skb);
+		if (len & ~(127 * 2))
+			goto out_err;
 
-#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
-		if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
-		    I40E_FDEVICT_PCTYPE_DEFAULT) {
-			dev_warn(&pf->pdev->dev,
-				 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
-			pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
-		}
-	} else if ((pf->hw.aq.api_maj_ver > 1) ||
-		   ((pf->hw.aq.api_maj_ver == 1) &&
-		    (pf->hw.aq.api_min_ver > 4))) {
-		/* Supported in FW API version higher than 1.4 */
-		pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
+		/* IPLEN can support at most 127 dwords */
+		len = skb_inner_transport_header(skb) -
+		      skb_inner_network_header(skb);
+		if (len & ~(127 * 4))
+			goto out_err;
 	}
 
-	/* Enable HW ATR eviction if possible */
-	if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
-		pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
-
-	if ((pf->hw.mac.type == I40E_MAC_XL710) &&
-	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
-	    (pf->hw.aq.fw_maj_ver < 4))) {
-		pf->hw_features |= I40E_HW_RESTART_AUTONEG;
-		/* No DCB support  for FW < v4.33 */
-		pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
-	}
+	/* No need to validate L4LEN as TCP is the only protocol with a
+	 * a flexible value and we support all possible values supported
+	 * by TCP, which is at most 15 dwords
+	 */
 
-	/* Disable FW LLDP if FW < v4.3 */
-	if ((pf->hw.mac.type == I40E_MAC_XL710) &&
-	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
-	    (pf->hw.aq.fw_maj_ver < 4)))
-		pf->hw_features |= I40E_HW_STOP_FW_LLDP;
+	return features;
+out_err:
+	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
 
-	/* Use the FW Set LLDP MIB API if FW > v4.40 */
-	if ((pf->hw.mac.type == I40E_MAC_XL710) &&
-	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
-	    (pf->hw.aq.fw_maj_ver >= 5)))
-		pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
+#ifdef HAVE_XDP_SUPPORT
+/**
+ * i40e_enter_busy_conf - Enters busy config state
+ * @vsi: vsi
+ *
+ * Returns 0 on success, <0 for failure.
+ **/
+static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
+	int timeout = 50;
 
-	if (pf->hw.func_caps.vmdq) {
-		pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
-		pf->flags |= I40E_FLAG_VMDQ_ENABLED;
-		pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
+	while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
+		timeout--;
+		if (!timeout)
+			return -EBUSY;
+		usleep_range(1000, 2000);
 	}
 
-	if (pf->hw.func_caps.iwarp) {
-		pf->flags |= I40E_FLAG_IWARP_ENABLED;
-		/* IWARP needs one extra vector for CQP just like MISC.*/
-		pf->num_iwarp_msix = (int)num_online_cpus() + 1;
-	}
+	return 0;
+}
 
-#ifdef CONFIG_PCI_IOV
-	if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
-		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
-		pf->flags |= I40E_FLAG_SRIOV_ENABLED;
-		pf->num_req_vfs = min_t(int,
-					pf->hw.func_caps.num_vfs,
-					I40E_MAX_VF_COUNT);
-	}
-#endif /* CONFIG_PCI_IOV */
-	pf->eeprom_version = 0xDEAD;
-	pf->lan_veb = I40E_NO_VEB;
-	pf->lan_vsi = I40E_NO_VSI;
+/**
+ * i40e_exit_busy_conf - Exits busy config state
+ * @vsi: vsi
+ **/
+static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
 
-	/* By default FW has this off for performance reasons */
-	pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+	clear_bit(__I40E_CONFIG_BUSY, pf->state);
+}
 
-	/* set up queue assignment tracking */
-	size = sizeof(struct i40e_lump_tracking)
-		+ (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
-	pf->qp_pile = kzalloc(size, GFP_KERNEL);
-	if (!pf->qp_pile) {
-		err = -ENOMEM;
-		goto sw_init_done;
+/**
+ * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ **/
+static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
+{
+	memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
+	       sizeof(vsi->rx_rings[queue_pair]->rx_stats));
+	memset(&vsi->tx_rings[queue_pair]->stats, 0,
+	       sizeof(vsi->tx_rings[queue_pair]->stats));
+	if (i40e_enabled_xdp_vsi(vsi)) {
+		memset(&vsi->xdp_rings[queue_pair]->stats, 0,
+		       sizeof(vsi->xdp_rings[queue_pair]->stats));
 	}
-	pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
-	pf->qp_pile->search_hint = 0;
+}
 
-	pf->tx_timeout_recovery_level = 1;
+/**
+ * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ **/
+static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
+{
+	i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
+	if (i40e_enabled_xdp_vsi(vsi))
+		i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
+	i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
+}
 
-	mutex_init(&pf->switch_mutex);
+/**
+ * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ * @enable: true for enable, false for disable
+ **/
+static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
+					bool enable)
+{
+	struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
+	struct i40e_q_vector *q_vector = rxr->q_vector;
 
-sw_init_done:
-	return err;
+	if (!vsi->netdev)
+		return;
+
+	/* All rings in a qp belong to the same qvector. */
+	if (q_vector->rx.ring || q_vector->tx.ring) {
+		if (enable)
+			napi_enable(&q_vector->napi);
+		else
+			napi_disable(&q_vector->napi);
+	}
 }
 
 /**
- * i40e_set_ntuple - set the ntuple feature flag and take action
- * @pf: board private structure to initialize
- * @features: the feature set that the stack is suggesting
+ * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ * @enable: true for enable, false for disable
  *
- * returns a bool to indicate if reset needs to happen
+ * Returns 0 on success, <0 on failure.
  **/
-bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
+static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
+					bool enable)
 {
-	bool need_reset = false;
+	struct i40e_pf *pf = vsi->back;
+	int pf_q, ret = 0;
 
-	/* Check if Flow Director n-tuple support was enabled or disabled.  If
-	 * the state changed, we need to reset.
+	pf_q = vsi->base_queue + queue_pair;
+	ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
+				     false /*is xdp*/, enable);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "VSI seid %d Tx ring %d %sable timeout\n",
+			 vsi->seid, pf_q, (enable ? "en" : "dis"));
+		return ret;
+	}
+
+	i40e_control_rx_q(pf, pf_q, enable);
+	ret = i40e_pf_rxq_wait(pf, pf_q, enable);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "VSI seid %d Rx ring %d %sable timeout\n",
+			 vsi->seid, pf_q, (enable ? "en" : "dis"));
+		return ret;
+	}
+
+	/* Due to HW errata, on Rx disable only, the register can
+	 * indicate done before it really is. Needs 50ms to be sure
 	 */
-	if (features & NETIF_F_NTUPLE) {
-		/* Enable filters and mark for reset */
-		if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
-			need_reset = true;
-		/* enable FD_SB only if there is MSI-X vector */
-		if (pf->num_fdsb_msix > 0)
-			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
-	} else {
-		/* turn off filters, mark for reset and clear SW filter list */
-		if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
-			need_reset = true;
-			i40e_fdir_filter_exit(pf);
-		}
-		pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED |
-			       I40E_FLAG_FD_SB_AUTO_DISABLED);
-		/* reset fd counters */
-		pf->fd_add_err = 0;
-		pf->fd_atr_cnt = 0;
-		/* if ATR was auto disabled it can be re-enabled. */
-		if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
-			pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
-			if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
-			    (I40E_DEBUG_FD & pf->hw.debug_mask))
-				dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
-		}
+	if (!enable)
+		mdelay(50);
+
+	if (!i40e_enabled_xdp_vsi(vsi))
+		return ret;
+
+	ret = i40e_control_wait_tx_q(vsi->seid, pf,
+				     pf_q + vsi->alloc_queue_pairs,
+				     true /*is xdp*/, enable);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "VSI seid %d XDP Tx ring %d %sable timeout\n",
+			 vsi->seid, pf_q, (enable ? "en" : "dis"));
 	}
-	return need_reset;
+
+	return ret;
 }
 
 /**
- * i40e_clear_rss_lut - clear the rx hash lookup table
- * @vsi: the VSI being configured
+ * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue_pair
  **/
-static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
+static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
 {
+	struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
 	struct i40e_pf *pf = vsi->back;
 	struct i40e_hw *hw = &pf->hw;
-	u16 vf_id = vsi->vf_id;
-	u8 i;
 
-	if (vsi->type == I40E_VSI_MAIN) {
-		for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
-			wr32(hw, I40E_PFQF_HLUT(i), 0);
-	} else if (vsi->type == I40E_VSI_SRIOV) {
-		for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
-			i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
-	} else {
-		dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
-	}
+	/* All rings in a qp belong to the same qvector. */
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+		i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
+	else
+		i40e_irq_dynamic_enable_icr0(pf);
+
+	i40e_flush(hw);
 }
 
 /**
- * i40e_set_features - set the netdev feature flags
- * @netdev: ptr to the netdev being adjusted
- * @features: the feature set that the stack is suggesting
- * Note: expects to be called while under rtnl_lock()
+ * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue_pair
  **/
-static int i40e_set_features(struct net_device *netdev,
-			     netdev_features_t features)
+static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
 {
-	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
 	struct i40e_pf *pf = vsi->back;
-	bool need_reset;
-
-	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
-		i40e_pf_config_rss(pf);
-	else if (!(features & NETIF_F_RXHASH) &&
-		 netdev->features & NETIF_F_RXHASH)
-		i40e_clear_rss_lut(vsi);
-
-	if (features & NETIF_F_HW_VLAN_CTAG_RX)
-		i40e_vlan_stripping_enable(vsi);
-	else
-		i40e_vlan_stripping_disable(vsi);
-
-	need_reset = i40e_set_ntuple(pf, features);
+	struct i40e_hw *hw = &pf->hw;
 
-	if (need_reset)
-		i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
+	/* For simplicity, instead of removing the qp interrupt causes
+	 * from the interrupt linked list, we simply disable the interrupt, and
+	 * leave the list intact.
+	 *
+	 * All rings in a qp belong to the same qvector.
+	 */
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+		u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
 
-	return 0;
+		wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
+		i40e_flush(hw);
+		synchronize_irq(pf->msix_entries[intpf].vector);
+	} else {
+		/* Legacy and MSI mode - this stops all interrupt handling */
+		wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+		wr32(hw, I40E_PFINT_DYN_CTL0, 0);
+		i40e_flush(hw);
+		synchronize_irq(pf->pdev->irq);
+	}
 }
 
 /**
- * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
- * @pf: board private structure
- * @port: The UDP port to look up
+ * i40e_queue_pair_disable - Disables a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
  *
- * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
+ * Returns 0 on success, <0 on failure.
  **/
-static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
+int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
 {
-	u8 i;
+	int err;
 
-	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
-		if (pf->udp_ports[i].port == port)
-			return i;
-	}
+	err = i40e_enter_busy_conf(vsi);
+	if (err)
+		return err;
 
-	return i;
+	i40e_queue_pair_disable_irq(vsi, queue_pair);
+	err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
+	i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
+	i40e_queue_pair_clean_rings(vsi, queue_pair);
+	i40e_queue_pair_reset_stats(vsi, queue_pair);
+
+	return err;
 }
 
 /**
- * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
+ * i40e_queue_pair_enable - Enables a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ *
+ * Returns 0 on success, <0 on failure.
  **/
-static void i40e_udp_tunnel_add(struct net_device *netdev,
-				struct udp_tunnel_info *ti)
+int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
 {
-	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_vsi *vsi = np->vsi;
-	struct i40e_pf *pf = vsi->back;
-	u16 port = ntohs(ti->port);
-	u8 next_idx;
-	u8 idx;
+	int err;
 
-	idx = i40e_get_udp_port_idx(pf, port);
+	err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
+	if (err)
+		return err;
 
-	/* Check if port already exists */
-	if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
-		netdev_info(netdev, "port %d already offloaded\n", port);
-		return;
+	if (i40e_enabled_xdp_vsi(vsi)) {
+		err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
+		if (err)
+			return err;
 	}
 
-	/* Now check if there is space to add the new port */
-	next_idx = i40e_get_udp_port_idx(pf, 0);
+	err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
+	if (err)
+		return err;
 
-	if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
-		netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
-			    port);
-		return;
-	}
+	err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
+	i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
+	i40e_queue_pair_enable_irq(vsi, queue_pair);
 
-	switch (ti->type) {
-	case UDP_TUNNEL_TYPE_VXLAN:
-		pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
-		break;
-	case UDP_TUNNEL_TYPE_GENEVE:
-		if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
-			return;
-		pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
-		break;
-	default:
-		return;
-	}
+	i40e_exit_busy_conf(vsi);
 
-	/* New port: add it and mark its index in the bitmap */
-	pf->udp_ports[next_idx].port = port;
-	pf->pending_udp_bitmap |= BIT_ULL(next_idx);
-	pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+	return err;
 }
 
 /**
- * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
+ * i40e_xdp_setup - add/remove an XDP program
+ * @vsi: VSI to changed
+ * @prog: XDP program
  **/
-static void i40e_udp_tunnel_del(struct net_device *netdev,
-				struct udp_tunnel_info *ti)
+static int i40e_xdp_setup(struct i40e_vsi *vsi,
+			  struct bpf_prog *prog)
 {
-	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_vsi *vsi = np->vsi;
+	int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
 	struct i40e_pf *pf = vsi->back;
-	u16 port = ntohs(ti->port);
-	u8 idx;
+	struct bpf_prog *old_prog;
+	bool need_reset;
+	int i;
 
-	idx = i40e_get_udp_port_idx(pf, port);
+	/* Don't allow frames that span over multiple buffers */
+	if (frame_size > vsi->rx_buf_len)
+		return -EINVAL;
 
-	/* Check if port already exists */
-	if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
-		goto not_found;
+	if (!i40e_enabled_xdp_vsi(vsi) && !prog)
+		return 0;
 
-	switch (ti->type) {
-	case UDP_TUNNEL_TYPE_VXLAN:
-		if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
-			goto not_found;
-		break;
-	case UDP_TUNNEL_TYPE_GENEVE:
-		if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
-			goto not_found;
-		break;
-	default:
-		goto not_found;
-	}
+	/* When turning XDP on->off/off->on we reset and rebuild the rings. */
+	need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
 
-	/* if port exists, set it to 0 (mark for deletion)
-	 * and make it pending
-	 */
-	pf->udp_ports[idx].port = 0;
-	pf->pending_udp_bitmap |= BIT_ULL(idx);
-	pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+	if (need_reset)
+		i40e_prep_for_reset(pf, true);
 
-	return;
-not_found:
-	netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
-		    port);
-}
+	old_prog = xchg(&vsi->xdp_prog, prog);
 
-static int i40e_get_phys_port_id(struct net_device *netdev,
-				 struct netdev_phys_item_id *ppid)
-{
-	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_pf *pf = np->vsi->back;
-	struct i40e_hw *hw = &pf->hw;
+	if (need_reset)
+		i40e_reset_and_rebuild(pf, true, true);
 
-	if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
-		return -EOPNOTSUPP;
+	for (i = 0; i < vsi->num_queue_pairs; i++)
+		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
 
-	ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
-	memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
+	if (old_prog)
+		bpf_prog_put(old_prog);
 
 	return 0;
 }
 
 /**
- * i40e_ndo_fdb_add - add an entry to the hardware database
- * @ndm: the input from the stack
- * @tb: pointer to array of nladdr (unused)
- * @dev: the net device pointer
- * @addr: the MAC address entry being added
- * @flags: instructions from stack about fdb operation
- */
-static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
-			    struct net_device *dev,
-			    const unsigned char *addr, u16 vid,
-			    u16 flags)
+ * i40e_xdp - implements ndo_xdp for i40e
+ * @dev: netdevice
+ * @xdp: XDP command
+ **/
+#ifdef HAVE_NDO_BPF
+static int i40e_xdp(struct net_device *dev,
+		    struct netdev_bpf *xdp)
+#else
+static int i40e_xdp(struct net_device *dev,
+		    struct netdev_xdp *xdp)
+#endif
 {
 	struct i40e_netdev_priv *np = netdev_priv(dev);
-	struct i40e_pf *pf = np->vsi->back;
-	int err = 0;
+	struct i40e_vsi *vsi = np->vsi;
 
-	if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
-		return -EOPNOTSUPP;
+	if (vsi->type != I40E_VSI_MAIN)
+		return -EINVAL;
 
-	if (vid) {
-		pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
+	switch (xdp->command) {
+	case XDP_SETUP_PROG:
+		return i40e_xdp_setup(vsi, xdp->prog);
+	case XDP_QUERY_PROG:
+#ifndef NO_NETDEV_BPF_PROG_ATTACHED
+		xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
+#endif /* !NO_NETDEV_BPF_PROG_ATTACHED */
+		xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
+		return 0;
+	default:
 		return -EINVAL;
 	}
+}
+#endif /* HAVE_XDP_SUPPORT */
+#endif /* HAVE_NDO_FEATURES_CHECK */
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+#if defined(HAVE_NDO_FDB_ADD_VID)
+static int i40e_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+			    struct net_device *dev, const unsigned char *addr,
+			    u16 vid)
+#elif defined(HAVE_FDB_DEL_NLATTR)
+static int i40e_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+			    struct net_device *dev, const unsigned char *addr)
+#elif defined(USE_CONST_DEV_UC_CHAR)
+static int i40e_ndo_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+			    const unsigned char *addr)
+#else
+static int i40e_ndo_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+			    unsigned char *addr)
+#endif /* HAVE_NDO_FDB_ADD_VID */
+{
+	struct i40e_netdev_priv *np = netdev_priv(dev);
+	struct i40e_pf *pf = np->vsi->back;
+	int err = -EOPNOTSUPP;
 
-	/* Hardware does not support aging addresses so if a
-	 * ndm_state is given only allow permanent addresses
-	 */
-	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+	if (ndm->ndm_state & NUD_PERMANENT) {
 		netdev_info(dev, "FDB only supports static addresses\n");
 		return -EINVAL;
 	}
 
-	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
-		err = dev_uc_add_excl(dev, addr);
-	else if (is_multicast_ether_addr(addr))
-		err = dev_mc_add_excl(dev, addr);
-	else
-		err = -EINVAL;
-
-	/* Only return duplicate errors if NLM_F_EXCL is set */
-	if (err == -EEXIST && !(flags & NLM_F_EXCL))
-		err = 0;
+	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+		if (is_unicast_ether_addr(addr))
+			err = dev_uc_del(dev, addr);
+		else if (is_multicast_ether_addr(addr))
+			err = dev_mc_del(dev, addr);
+		else
+			err = -EINVAL;
+	}
 
 	return err;
 }
 
+static int i40e_ndo_fdb_dump(struct sk_buff *skb,
+			      struct netlink_callback *cb,
+			      struct net_device *dev,
+			      int idx)
+{
+	struct i40e_netdev_priv *np = netdev_priv(dev);
+	struct i40e_pf *pf = np->vsi->back;
+
+	if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
+		idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+	return idx;
+}
+
+#endif /* USE_DEFAULT_FDB_DEL_DUMP */
+#ifdef HAVE_BRIDGE_ATTRIBS
 /**
  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
  * @dev: the netdev being configured
  * @nlh: RTNL message
+ * @flags: bridge flags
+ * @extack: netdev extended ack structure
  *
  * Inserts a new hardware bridge if not already created and
  * enables the bridging mode requested (VEB or VEPA). If the
@@ -9363,9 +13118,19 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
  *
  * Note: expects to be called while under rtnl_lock()
  **/
+#if defined(HAVE_NDO_BRIDGE_SETLINK_EXTACK)
+static int i40e_ndo_bridge_setlink(struct net_device *dev,
+				   struct nlmsghdr *nlh,
+				   u16 flags,
+				   struct netlink_ext_ack *extack)
+#elif defined(HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS)
 static int i40e_ndo_bridge_setlink(struct net_device *dev,
 				   struct nlmsghdr *nlh,
 				   u16 flags)
+#else
+static int i40e_ndo_bridge_setlink(struct net_device *dev,
+				   struct nlmsghdr *nlh)
+#endif
 {
 	struct i40e_netdev_priv *np = netdev_priv(dev);
 	struct i40e_vsi *vsi = np->vsi;
@@ -9412,13 +13177,11 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
 		} else if (mode != veb->bridge_mode) {
 			/* Existing HW bridge but different mode needs reset */
 			veb->bridge_mode = mode;
-			/* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
 			if (mode == BRIDGE_MODE_VEB)
 				pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
 			else
 				pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
-			i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED),
-				      true);
+			i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
 			break;
 		}
 	}
@@ -9435,196 +13198,239 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
  * @filter_mask: unused
  * @nlflags: netlink flags passed in
  *
- * Return the mode in which the hardware bridge is operating in
- * i.e VEB or VEPA.
- **/
-static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-				   struct net_device *dev,
-				   u32 __always_unused filter_mask,
-				   int nlflags)
-{
-	struct i40e_netdev_priv *np = netdev_priv(dev);
-	struct i40e_vsi *vsi = np->vsi;
-	struct i40e_pf *pf = vsi->back;
-	struct i40e_veb *veb = NULL;
-	int i;
-
-	/* Only for PF VSI for now */
-	if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
-		return -EOPNOTSUPP;
-
-	/* Find the HW bridge for the PF VSI */
-	for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
-		if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
-			veb = pf->veb[i];
-	}
-
-	if (!veb)
-		return 0;
-
-	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
-				       0, 0, nlflags, filter_mask, NULL);
-}
-
-/**
- * i40e_features_check - Validate encapsulated packet conforms to limits
- * @skb: skb buff
- * @dev: This physical port's netdev
- * @features: Offload features that the stack believes apply
- **/
-static netdev_features_t i40e_features_check(struct sk_buff *skb,
-					     struct net_device *dev,
-					     netdev_features_t features)
-{
-	size_t len;
-
-	/* No point in doing any of this if neither checksum nor GSO are
-	 * being requested for this frame.  We can rule out both by just
-	 * checking for CHECKSUM_PARTIAL
-	 */
-	if (skb->ip_summed != CHECKSUM_PARTIAL)
-		return features;
-
-	/* We cannot support GSO if the MSS is going to be less than
-	 * 64 bytes.  If it is then we need to drop support for GSO.
-	 */
-	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
-		features &= ~NETIF_F_GSO_MASK;
-
-	/* MACLEN can support at most 63 words */
-	len = skb_network_header(skb) - skb->data;
-	if (len & ~(63 * 2))
-		goto out_err;
-
-	/* IPLEN and EIPLEN can support at most 127 dwords */
-	len = skb_transport_header(skb) - skb_network_header(skb);
-	if (len & ~(127 * 4))
-		goto out_err;
-
-	if (skb->encapsulation) {
-		/* L4TUNLEN can support 127 words */
-		len = skb_inner_network_header(skb) - skb_transport_header(skb);
-		if (len & ~(127 * 2))
-			goto out_err;
-
-		/* IPLEN can support at most 127 dwords */
-		len = skb_inner_transport_header(skb) -
-		      skb_inner_network_header(skb);
-		if (len & ~(127 * 4))
-			goto out_err;
-	}
-
-	/* No need to validate L4LEN as TCP is the only protocol with a
-	 * a flexible value and we support all possible values supported
-	 * by TCP, which is at most 15 dwords
-	 */
-
-	return features;
-out_err:
-	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
-}
-
-/**
- * i40e_xdp_setup - add/remove an XDP program
- * @vsi: VSI to changed
- * @prog: XDP program
- **/
-static int i40e_xdp_setup(struct i40e_vsi *vsi,
-			  struct bpf_prog *prog)
-{
-	int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-	struct i40e_pf *pf = vsi->back;
-	struct bpf_prog *old_prog;
-	bool need_reset;
-	int i;
-
-	/* Don't allow frames that span over multiple buffers */
-	if (frame_size > vsi->rx_buf_len)
-		return -EINVAL;
-
-	if (!i40e_enabled_xdp_vsi(vsi) && !prog)
-		return 0;
-
-	/* When turning XDP on->off/off->on we reset and rebuild the rings. */
-	need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
-
-	if (need_reset)
-		i40e_prep_for_reset(pf, true);
-
-	old_prog = xchg(&vsi->xdp_prog, prog);
-
-	if (need_reset)
-		i40e_reset_and_rebuild(pf, true, true);
-
-	for (i = 0; i < vsi->num_queue_pairs; i++)
-		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
-
-	if (old_prog)
-		bpf_prog_put(old_prog);
-
-	return 0;
-}
-
-/**
- * i40e_xdp - implements ndo_xdp for i40e
- * @dev: netdevice
- * @xdp: XDP command
+ * Return the mode in which the hardware bridge is operating in
+ * i.e VEB or VEPA.
  **/
-static int i40e_xdp(struct net_device *dev,
-		    struct netdev_xdp *xdp)
+#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+				   struct net_device *dev,
+				   u32 __always_unused filter_mask,
+				   int nlflags)
+#elif defined(HAVE_BRIDGE_FILTER)
+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+				   struct net_device *dev,
+				   u32 __always_unused filter_mask)
+#else
+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+				   struct net_device *dev)
+#endif /* NDO_BRIDGE_STUFF */
 {
 	struct i40e_netdev_priv *np = netdev_priv(dev);
 	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_veb *veb = NULL;
+	int i;
 
-	if (vsi->type != I40E_VSI_MAIN)
-		return -EINVAL;
+	/* Only for PF VSI for now */
+	if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+		return -EOPNOTSUPP;
 
-	switch (xdp->command) {
-	case XDP_SETUP_PROG:
-		return i40e_xdp_setup(vsi, xdp->prog);
-	case XDP_QUERY_PROG:
-		xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
-		xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
-		return 0;
-	default:
-		return -EINVAL;
+	/* Find the HW bridge for the PF VSI */
+	for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+		if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+			veb = pf->veb[i];
 	}
+
+	if (!veb)
+		return 0;
+
+#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
+	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
+				       0, 0, nlflags, filter_mask, NULL);
+#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS)
+	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
+				       0, 0, nlflags);
+#elif defined(HAVE_NDO_FDB_ADD_VID) || \
+	defined NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS
+	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
+				       0, 0);
+#else
+	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode);
+#endif /* HAVE_NDO_BRIDGE_XX */
 }
+#endif /* HAVE_BRIDGE_ATTRIBS */
+#endif /* HAVE_FDB_OPS */
 
+#ifdef HAVE_NET_DEVICE_OPS
 static const struct net_device_ops i40e_netdev_ops = {
 	.ndo_open		= i40e_open,
 	.ndo_stop		= i40e_close,
 	.ndo_start_xmit		= i40e_lan_xmit_frame,
+#if defined(HAVE_NDO_GET_STATS64) || defined(HAVE_VOID_NDO_GET_STATS64)
 	.ndo_get_stats64	= i40e_get_netdev_stats_struct,
+#else
+	.ndo_get_stats		= i40e_get_netdev_stats_struct,
+#endif
 	.ndo_set_rx_mode	= i40e_set_rx_mode,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_mac_address	= i40e_set_mac,
+#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU
+	.extended.ndo_change_mtu = i40e_change_mtu,
+#else
 	.ndo_change_mtu		= i40e_change_mtu,
+#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */
+#if defined(HAVE_PTP_1588_CLOCK) || defined(HAVE_I40E_INTELCIM_IOCTL)
 	.ndo_do_ioctl		= i40e_ioctl,
+#endif
 	.ndo_tx_timeout		= i40e_tx_timeout,
+#ifdef HAVE_VLAN_RX_REGISTER
+	.ndo_vlan_rx_register	= i40e_vlan_rx_register,
+#endif
 	.ndo_vlan_rx_add_vid	= i40e_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid	= i40e_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= i40e_netpoll,
 #endif
+#ifdef HAVE_SETUP_TC
+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC
+	.extended.ndo_setup_tc_rh = __i40e_setup_tc,
+#else
+#ifdef NETIF_F_HW_TC
 	.ndo_setup_tc		= __i40e_setup_tc,
-	.ndo_set_features	= i40e_set_features,
+#else
+	.ndo_setup_tc		= i40e_setup_tc,
+#endif /* NETIF_F_HW_TC */
+#endif /* HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC */
+#endif /* HAVE_SETUP_TC */
+#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
+/* RHEL7 requires this to be defined to enable extended ops.  RHEL7 uses the
+ * function get_ndo_ext to retrieve offsets for extended fields from with the
+ * net_device_ops struct and ndo_size is checked to determine whether or not
+ * the offset is valid.
+ */
+	.ndo_size		= sizeof(const struct net_device_ops),
+#endif
+#ifdef IFLA_VF_MAX
 	.ndo_set_vf_mac		= i40e_ndo_set_vf_mac,
+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
+	.extended.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
+#else
 	.ndo_set_vf_vlan	= i40e_ndo_set_vf_port_vlan,
+#endif
+#ifdef HAVE_VF_STATS
+	.ndo_get_vf_stats	= i40e_get_vf_stats,
+#endif
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
 	.ndo_set_vf_rate	= i40e_ndo_set_vf_bw,
+#else
+	.ndo_set_vf_tx_rate	= i40e_ndo_set_vf_bw,
+#endif
 	.ndo_get_vf_config	= i40e_ndo_get_vf_config,
-	.ndo_set_vf_link_state	= i40e_ndo_set_vf_link_state,
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
 	.ndo_set_vf_spoofchk	= i40e_ndo_set_vf_spoofchk,
+#endif
+#ifdef HAVE_NDO_SET_VF_TRUST
+#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
+	.extended.ndo_set_vf_trust = i40e_ndo_set_vf_trust,
+#else
 	.ndo_set_vf_trust	= i40e_ndo_set_vf_trust,
+#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */
+#endif /* HAVE_NDO_SET_VF_TRUST */
+#endif /* IFLA_VF_MAX */
+#ifdef HAVE_UDP_ENC_RX_OFFLOAD
+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL
+	.extended.ndo_udp_tunnel_add = i40e_udp_tunnel_add,
+	.extended.ndo_udp_tunnel_del = i40e_udp_tunnel_del,
+#else
 	.ndo_udp_tunnel_add	= i40e_udp_tunnel_add,
 	.ndo_udp_tunnel_del	= i40e_udp_tunnel_del,
-	.ndo_get_phys_port_id	= i40e_get_phys_port_id,
+#endif
+#else /* !HAVE_UDP_ENC_RX_OFFLOAD */
+#ifdef HAVE_VXLAN_RX_OFFLOAD
+#if IS_ENABLED(CONFIG_VXLAN)
+	.ndo_add_vxlan_port	= i40e_add_vxlan_port,
+	.ndo_del_vxlan_port	= i40e_del_vxlan_port,
+#endif
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+#ifdef HAVE_GENEVE_RX_OFFLOAD
+#if IS_ENABLED(CONFIG_GENEVE)
+	.ndo_add_geneve_port	= i40e_add_geneve_port,
+	.ndo_del_geneve_port	= i40e_del_geneve_port,
+#endif
+#endif /* HAVE_GENEVE_RX_OFFLOAD */
+#endif /* HAVE_UDP_ENC_RX_OFFLOAD */
+#ifdef HAVE_NDO_GET_PHYS_PORT_ID
+	.ndo_get_phys_port_id   = i40e_get_phys_port_id,
+#endif /* HAVE_NDO_GET_PHYS_PORT_ID */
+#ifdef HAVE_FDB_OPS
 	.ndo_fdb_add		= i40e_ndo_fdb_add,
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+	.ndo_fdb_del		= i40e_ndo_fdb_del,
+	.ndo_fdb_dump		= i40e_ndo_fdb_dump,
+#endif
+#ifdef HAVE_NDO_FEATURES_CHECK
 	.ndo_features_check	= i40e_features_check,
+#endif /* HAVE_NDO_FEATURES_CHECK */
+#ifdef HAVE_BRIDGE_ATTRIBS
 	.ndo_bridge_getlink	= i40e_ndo_bridge_getlink,
 	.ndo_bridge_setlink	= i40e_ndo_bridge_setlink,
-	.ndo_xdp		= i40e_xdp,
+#endif /* HAVE_BRIDGE_ATTRIBS */
+#endif /* HAVE_FDB_OPS */
+#ifdef HAVE_XDP_SUPPORT
+#ifdef HAVE_NDO_BPF
+	.ndo_bpf		= i40e_xdp,
+	.ndo_xdp_xmit		= i40e_xdp_xmit,
+#else
+	.ndo_xdp                = i40e_xdp,
+	.ndo_xdp_xmit		= i40e_xdp_xmit,
+	.ndo_xdp_flush		= i40e_xdp_flush,
+#endif /* HAVE_NDO_BPF */
+#endif /* HAVE_XDP_SUPPORT */
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+};
+
+/* RHEL6 keeps these operations in a separate structure */
+static const struct net_device_ops_ext i40e_netdev_ops_ext = {
+	.size			= sizeof(struct net_device_ops_ext),
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+#ifdef HAVE_NDO_SET_FEATURES
+	.ndo_set_features	= i40e_set_features,
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+	.ndo_set_vf_link_state	= i40e_ndo_set_vf_link_state,
+#endif
 };
+#else /* HAVE_NET_DEVICE_OPS */
+/**
+ * i40e_assign_netdev_ops - Initialize netdev operations function pointers
+ * @dev: ptr to the netdev struct
+ **/
+#ifdef HAVE_CONFIG_HOTPLUG
+static void __devinit i40e_assign_netdev_ops(struct net_device *dev)
+#else
+static void i40e_assign_netdev_ops(struct net_device *dev)
+#endif
+{
+	dev->open = i40e_open;
+	dev->stop = i40e_close;
+	dev->hard_start_xmit = i40e_lan_xmit_frame;
+	dev->get_stats = i40e_get_netdev_stats_struct;
+
+#ifdef HAVE_SET_RX_MODE
+	dev->set_rx_mode = i40e_set_rx_mode;
+#endif
+	dev->set_multicast_list = i40e_set_rx_mode;
+	dev->set_mac_address = i40e_set_mac;
+	dev->change_mtu = i40e_change_mtu;
+#if defined(HAVE_PTP_1588_CLOCK) || defined(HAVE_I40E_INTELCIM_IOCTL)
+	dev->do_ioctl = i40e_ioctl;
+#endif
+	dev->tx_timeout = i40e_tx_timeout;
+#ifdef NETIF_F_HW_VLAN_TX
+#ifdef HAVE_VLAN_RX_REGISTER
+	dev->vlan_rx_register = i40e_vlan_rx_register;
+#endif
+	dev->vlan_rx_add_vid = i40e_vlan_rx_add_vid;
+	dev->vlan_rx_kill_vid = i40e_vlan_rx_kill_vid;
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	dev->poll_controller = i40e_netpoll;
+#endif
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+	dev->select_queue = i40e_lan_select_queue;
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+}
+#endif /* HAVE_NET_DEVICE_OPS */
 
 /**
  * i40e_config_netdev - Setup the netdev flags
@@ -9655,42 +13461,112 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
 
 	hw_enc_features = NETIF_F_SG			|
 			  NETIF_F_IP_CSUM		|
+#ifdef NETIF_F_IPV6_CSUM
 			  NETIF_F_IPV6_CSUM		|
+#endif
 			  NETIF_F_HIGHDMA		|
+#ifdef NETIF_F_SOFT_FEATURES
 			  NETIF_F_SOFT_FEATURES		|
+#endif
 			  NETIF_F_TSO			|
+#ifdef HAVE_ENCAP_TSO_OFFLOAD
 			  NETIF_F_TSO_ECN		|
 			  NETIF_F_TSO6			|
+#ifdef HAVE_GRE_ENCAP_OFFLOAD
 			  NETIF_F_GSO_GRE		|
+#ifdef NETIF_F_GSO_PARTIAL
 			  NETIF_F_GSO_GRE_CSUM		|
 			  NETIF_F_GSO_PARTIAL		|
+#endif
+#ifdef NETIF_F_GSO_IPXIP4
+			  NETIF_F_GSO_IPXIP4		|
+#ifdef NETIF_F_GSO_IPXIP6
+			  NETIF_F_GSO_IPXIP6		|
+#endif
+#else
+#ifdef NETIF_F_GSO_IPIP
+			  NETIF_F_GSO_IPIP		|
+#endif
+#ifdef NETIF_F_GSO_SIT
+			  NETIF_F_GSO_SIT		|
+#endif
+#endif
+#endif
 			  NETIF_F_GSO_UDP_TUNNEL	|
 			  NETIF_F_GSO_UDP_TUNNEL_CSUM	|
+#endif /* HAVE_ENCAP_TSO_OFFLOAD */
 			  NETIF_F_SCTP_CRC		|
+#ifdef NETIF_F_RXHASH
 			  NETIF_F_RXHASH		|
+#endif
+#ifdef HAVE_NDO_SET_FEATURES
 			  NETIF_F_RXCSUM		|
+#endif
 			  0;
 
 	if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
+#ifndef NETIF_F_GSO_PARTIAL
+		hw_enc_features ^= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+#else
 		netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
 
 	netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+#endif
 
+#ifdef HAVE_ENCAP_CSUM_OFFLOAD
 	netdev->hw_enc_features |= hw_enc_features;
+#endif
 
+#ifdef HAVE_NETDEV_VLAN_FEATURES
 	/* record features VLANs can make use of */
+#ifdef NETIF_F_GSO_PARTIAL
 	netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
+#else
+	netdev->vlan_features |= hw_enc_features;
+#endif
+#endif
 
-	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
-		netdev->hw_features |= NETIF_F_NTUPLE;
 	hw_features = hw_enc_features		|
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
 		      NETIF_F_HW_VLAN_CTAG_TX	|
 		      NETIF_F_HW_VLAN_CTAG_RX;
+#else /* NETIF_F_HW_VLAN_CTAG_RX */
+		      NETIF_F_HW_VLAN_TX	|
+		      NETIF_F_HW_VLAN_RX;
+#endif /* !NETIF_F_HW_VLAN_CTAG_RX */
+
+#if defined(HAVE_NDO_SET_FEATURES) || defined(ETHTOOL_GRXRINGS)
+	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+#ifdef NETIF_F_HW_TC
+		hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
+#else
+		hw_features |= NETIF_F_NTUPLE;
+#endif
+#endif
 
+#ifdef HAVE_NDO_SET_FEATURES
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+	hw_features |= get_netdev_hw_features(netdev);
+	set_netdev_hw_features(netdev, hw_features);
+#else
 	netdev->hw_features |= hw_features;
+#endif
+#endif /* HAVE_NDO_SET_FEATURES */
 
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
 	netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+#else
+	netdev->features |= hw_features | NETIF_F_HW_VLAN_FILTER;
+#endif
+#ifdef NETIF_F_GSO_PARTIAL
 	netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+#endif
+
+#ifndef HAVE_NDO_SET_FEATURES
+#ifdef NETIF_F_GRO
+	netdev->features |= NETIF_F_GRO;
+#endif
+#endif
 
 	if (vsi->type == I40E_VSI_MAIN) {
 		SET_NETDEV_DEV(netdev, &pf->pdev->dev);
@@ -9718,7 +13594,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
 		snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
 			 IFNAMSIZ - 4,
 			 pf->vsi[pf->lan_vsi]->netdev->name);
-		random_ether_addr(mac_addr);
+		eth_random_addr(mac_addr);
 
 		spin_lock_bh(&vsi->mac_filter_hash_lock);
 		i40e_add_mac_filter(vsi, mac_addr);
@@ -9744,20 +13620,47 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
 
 	ether_addr_copy(netdev->dev_addr, mac_addr);
+#ifdef ETHTOOL_GPERMADDR
 	ether_addr_copy(netdev->perm_addr, mac_addr);
+#endif
+
+#ifdef HAVE_MPLS_FEATURES
+	if (pf->hw_features & I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE)
+		netdev->mpls_features =  NETIF_F_HW_CSUM;
+#endif
 
+#ifdef IFF_UNICAST_FLT
 	netdev->priv_flags |= IFF_UNICAST_FLT;
+#endif
+#ifdef IFF_SUPP_NOFCS
 	netdev->priv_flags |= IFF_SUPP_NOFCS;
+#endif
 	/* Setup netdev TC information */
 	i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
 
+#ifdef HAVE_NET_DEVICE_OPS
 	netdev->netdev_ops = &i40e_netdev_ops;
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+	set_netdev_ops_ext(netdev, &i40e_netdev_ops_ext);
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+#else /* HAVE_NET_DEVICE_OPS */
+	i40e_assign_netdev_ops(netdev);
+#endif /* HAVE_NET_DEVICE_OPS */
 	netdev->watchdog_timeo = 5 * HZ;
+#ifdef SIOCETHTOOL
 	i40e_set_ethtool_ops(netdev);
+#endif
 
+#ifdef HAVE_NETDEVICE_MIN_MAX_MTU
 	/* MTU range: 68 - 9706 */
+#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU
+	netdev->extended->min_mtu = ETH_MIN_MTU;
+	netdev->extended->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
+#else
 	netdev->min_mtu = ETH_MIN_MTU;
 	netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
+#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */
+#endif /* HAVE_NETDEVICE_MIN_MAX_NTU */
 
 	return 0;
 }
@@ -9789,7 +13692,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
 	struct i40e_pf *pf = vsi->back;
 
 	/* Uplink is not a bridge so default to VEB */
-	if (vsi->veb_idx == I40E_NO_VEB)
+	if (vsi->veb_idx >= I40E_MAX_VEB)
 		return 1;
 
 	veb = pf->veb[vsi->veb_idx];
@@ -9799,6 +13702,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
 		return -ENOENT;
 	}
 
+#ifdef HAVE_BRIDGE_ATTRIBS
 	/* Uplink is a bridge in VEPA mode */
 	if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
 		return 0;
@@ -9806,6 +13710,10 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
 		/* Uplink is a bridge in VEB mode */
 		return 1;
 	}
+#else
+	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+		return 1;
+#endif
 
 	/* VEPA is now default bridge, so return 0 */
 	return 0;
@@ -9830,6 +13738,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
 
 	u8 enabled_tc = 0x1; /* TC0 enabled */
 	int f_count = 0;
+	u32 val;
 
 	memset(&ctxt, 0, sizeof(ctxt));
 	switch (vsi->type) {
@@ -9860,6 +13769,31 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
 
 		enabled_tc = i40e_pf_get_tc_map(pf);
 
+		/* Source pruning is enabled by default, so the flag is
+		 * negative logic - if it's set, we need to fiddle with
+		 * the VSI to disable source pruning.
+		 */
+		if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
+			memset(&ctxt, 0, sizeof(ctxt));
+			ctxt.seid = pf->main_vsi_seid;
+			ctxt.pf_num = pf->hw.pf_id;
+			ctxt.vf_num = 0;
+			ctxt.info.valid_sections |=
+			     cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+			ctxt.info.switch_id =
+				   cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
+			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+			if (ret) {
+				dev_info(&pf->pdev->dev,
+					 "update vsi failed, err %s aq_err %s\n",
+					 i40e_stat_str(&pf->hw, ret),
+					 i40e_aq_str(&pf->hw,
+						    pf->hw.aq.asq_last_status));
+				ret = -ENOENT;
+				goto err;
+			}
+		}
+
 		/* MFP mode setup queue map and update VSI */
 		if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
 		    !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
@@ -9967,12 +13901,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
 
 		ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
-		if (pf->vf[vsi->vf_id].spoofchk) {
+		if (pf->vf[vsi->vf_id].mac_anti_spoof) {
 			ctxt.info.valid_sections |=
 				cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
 			ctxt.info.sec_flags |=
-				(I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
-				 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
+				I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
 		}
 		/* Setup the VSI tx/rx queue map for TC0 only for now */
 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
@@ -10001,6 +13934,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
 		vsi->info.valid_sections = 0;
 		vsi->seid = ctxt.seid;
 		vsi->id = ctxt.vsi_number;
+		val = rd32(&pf->hw, 0x208800 + (4*(vsi->id)));
+		if (!(val & 0x1)) /* MACVSIPRUNEENABLE = 1*/
+			dev_warn(&vsi->back->pdev->dev,
+				 "Note: VSI source pruning is not being set correctly by FW\n");
 	}
 
 	vsi->active_filters = 0;
@@ -10015,7 +13952,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
 
 	if (f_count) {
 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
-		pf->flags |= I40E_FLAG_FILTER_SYNC;
+		set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
 	}
 
 	/* Update VSI BW information */
@@ -10062,6 +13999,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
 		return -ENODEV;
 	}
 
+	set_bit(__I40E_VSI_RELEASING, vsi->state);
 	uplink_seid = vsi->uplink_seid;
 	if (vsi->type != I40E_VSI_SRIOV) {
 		if (vsi->netdev_registered) {
@@ -10166,6 +14104,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
 		goto vector_setup_out;
 	}
 
+#if !defined(I40E_LEGACY_INTERRUPT) && !defined(I40E_MSI_INTERRUPT)
 	/* In Legacy mode, we do not have to get any other vector since we
 	 * piggyback on the misc/ICR0 for queue interrupts.
 	*/
@@ -10183,6 +14122,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
 		goto vector_setup_out;
 	}
 
+#endif
 vector_setup_out:
 	return ret;
 }
@@ -10280,7 +14220,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
 {
 	struct i40e_vsi *vsi = NULL;
 	struct i40e_veb *veb = NULL;
-	u16 alloc_queue_pairs;
 	int ret, i;
 	int v_idx;
 
@@ -10330,6 +14269,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
 					 "New VSI creation error, uplink seid of LAN VSI expected.\n");
 				return NULL;
 			}
+#ifdef HAVE_BRIDGE_ATTRIBS
 			/* We come up by default in VEPA mode if SRIOV is not
 			 * already enabled, in which case we can't force VEPA
 			 * mode.
@@ -10338,6 +14278,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
 				veb->bridge_mode = BRIDGE_MODE_VEPA;
 				pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
 			}
+#endif
 			i40e_config_bridge_mode(veb);
 		}
 		for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
@@ -10368,14 +14309,12 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
 	else if (type == I40E_VSI_SRIOV)
 		vsi->vf_id = param1;
 	/* assign it some queues */
-	alloc_queue_pairs = vsi->alloc_queue_pairs *
-			    (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
-
-	ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
+	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
+				vsi->idx);
 	if (ret < 0) {
 		dev_info(&pf->pdev->dev,
 			 "failed to get tracking for %d queues for VSI %d err=%d\n",
-			 alloc_queue_pairs, vsi->seid, ret);
+			 vsi->alloc_queue_pairs, vsi->seid, ret);
 		goto err_vsi;
 	}
 	vsi->base_queue = ret;
@@ -10398,10 +14337,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
 			goto err_netdev;
 		vsi->netdev_registered = true;
 		netif_carrier_off(vsi->netdev);
-#ifdef CONFIG_I40E_DCB
+		/* make sure transmit queues start off as stopped */
+		netif_tx_stop_all_queues(vsi->netdev);
+#ifdef CONFIG_DCB
+#ifdef HAVE_DCBNL_IEEE
 		/* Setup DCB netlink interface */
 		i40e_dcbnl_setup(vsi);
-#endif /* CONFIG_I40E_DCB */
+#endif /* HAVE_DCBNL_IEEE */
+#endif /* CONFIG_DCB */
 		/* fall through */
 
 	case I40E_VSI_FDIR:
@@ -10484,16 +14427,16 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
 		goto out;
 	}
 
-	veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
+	veb->bw_limit = LE16_TO_CPU(ets_data.port_bw_limit);
 	veb->bw_max_quanta = ets_data.tc_bw_max;
 	veb->is_abs_credits = bw_data.absolute_credits_enable;
 	veb->enabled_tc = ets_data.tc_valid_bits;
-	tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
-		    (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
+	tc_bw_max = LE16_TO_CPU(bw_data.tc_bw_max[0]) |
+		    (LE16_TO_CPU(bw_data.tc_bw_max[1]) << 16);
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
 		veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
 		veb->bw_tc_limit_credits[i] =
-					le16_to_cpu(bw_data.tc_bw_limits[i]);
+					LE16_TO_CPU(bw_data.tc_bw_limits[i]);
 		veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
 	}
 
@@ -10745,7 +14688,7 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
 	for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
 		if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
 			break;
-	if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
+	if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
 		dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
 			 vsi_seid);
 		return NULL;
@@ -10822,7 +14765,7 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
 		/* Main VEB? */
 		if (uplink_seid != pf->mac_seid)
 			break;
-		if (pf->lan_veb == I40E_NO_VEB) {
+		if (pf->lan_veb >= I40E_MAX_VEB) {
 			int v;
 
 			/* find existing or else empty VEB */
@@ -10832,13 +14775,15 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
 					break;
 				}
 			}
-			if (pf->lan_veb == I40E_NO_VEB) {
+			if (pf->lan_veb >= I40E_MAX_VEB) {
 				v = i40e_veb_mem_alloc(pf);
 				if (v < 0)
 					break;
 				pf->lan_veb = v;
 			}
 		}
+		if (pf->lan_veb >= I40E_MAX_VEB)
+			break;
 
 		pf->veb[pf->lan_veb]->seid = seid;
 		pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
@@ -10962,14 +14907,16 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
 	*/
 
 	if ((pf->hw.pf_id == 0) &&
-	    !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+	    !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
 		flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+		pf->last_sw_conf_flags = flags;
+	}
 
 	if (pf->hw.pf_id == 0) {
 		u16 valid_flags;
 
 		valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
-		ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
+		ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
 						NULL);
 		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
 			dev_info(&pf->pdev->dev,
@@ -10979,6 +14926,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
 					     pf->hw.aq.asq_last_status));
 			/* not a fatal problem, just keep going */
 		}
+		pf->last_sw_conf_valid_flags = valid_flags;
 	}
 
 	/* first time setup */
@@ -10989,7 +14937,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
 		/* Set up the PF VSI associated with the PF's main VSI
 		 * that is already in the HW switch
 		 */
-		if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
+		if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
 			uplink_seid = pf->veb[pf->lan_veb]->seid;
 		else
 			uplink_seid = pf->mac_seid;
@@ -10999,6 +14947,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
 			vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
 		if (!vsi) {
 			dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
+			i40e_cloud_filter_exit(pf);
 			i40e_fdir_teardown(pf);
 			return -EAGAIN;
 		}
@@ -11035,11 +14984,17 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
 	pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
 				  I40E_AQ_AN_COMPLETED) ? true : false);
 
+#ifdef HAVE_PTP_1588_CLOCK
 	i40e_ptp_init(pf);
 
+#endif /* HAVE_PTP_1588_CLOCK */
+#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
+#if defined(HAVE_UDP_ENC_TUNNEL) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
 	/* repopulate tunnel port filters */
 	i40e_sync_udp_filters(pf);
 
+#endif /* HAVE_UDP_ENC_TUNNEL || HAVE_UDP_ENC_RX_OFFLOAD */
+#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_UDP_ENC_RX_OFFLOAD */
 	return ret;
 }
 
@@ -11074,6 +15029,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
 			       I40E_FLAG_DCB_ENABLED	|
 			       I40E_FLAG_SRIOV_ENABLED	|
 			       I40E_FLAG_VMDQ_ENABLED);
+		pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
 	} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
 				  I40E_FLAG_FD_SB_ENABLED |
 				  I40E_FLAG_FD_ATR_ENABLED |
@@ -11088,8 +15044,11 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
 			       I40E_FLAG_FD_ATR_ENABLED	|
 			       I40E_FLAG_DCB_ENABLED	|
 			       I40E_FLAG_VMDQ_ENABLED);
+		pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
 	} else {
 		/* Not enough queues for all TCs */
+		bool is_max_n_of_queues_required;
+
 		if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
 		    (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
 			pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
@@ -11098,8 +15057,21 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
 		}
 		pf->num_lan_qps = max_t(int, pf->rss_size_max,
 					num_online_cpus());
-		pf->num_lan_qps = min_t(int, pf->num_lan_qps,
-					pf->hw.func_caps.num_tx_qp);
+		is_max_n_of_queues_required = pf->hw.func_caps.num_tx_qp <
+			pf->num_req_vfs * pf->num_vf_qps + pf->num_vf_qps;
+		if (is_max_n_of_queues_required)
+			dev_warn(&pf->pdev->dev, "not enough %u queues for PF and %u VFs. Using maximum available queues for PF.\n",
+				 pf->hw.func_caps.num_tx_qp, pf->num_req_vfs);
+		if (pf->hw.func_caps.npar_enable ||
+		    is_max_n_of_queues_required)
+			pf->num_lan_qps = min_t
+				(int, pf->num_lan_qps,
+				 pf->hw.func_caps.num_tx_qp);
+		else
+			pf->num_lan_qps = min_t
+				(int, pf->num_lan_qps,
+				 pf->hw.func_caps.num_tx_qp -
+				 pf->num_req_vfs * pf->num_vf_qps);
 
 		queues_left -= pf->num_lan_qps;
 	}
@@ -11109,6 +15081,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
 			queues_left -= 1; /* save 1 queue for FD */
 		} else {
 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+			pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
 			dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
 		}
 	}
@@ -11195,36 +15168,271 @@ static void i40e_print_features(struct i40e_pf *pf)
 		i += snprintf(&buf[i], REMAIN(i), " FD_SB");
 		i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
 	}
+	i += snprintf(&buf[i], REMAIN(i), " CloudF");
 	if (pf->flags & I40E_FLAG_DCB_CAPABLE)
 		i += snprintf(&buf[i], REMAIN(i), " DCB");
+#if IS_ENABLED(CONFIG_VXLAN)
 	i += snprintf(&buf[i], REMAIN(i), " VxLAN");
+#endif
+#if IS_ENABLED(CONFIG_GENEVE)
 	i += snprintf(&buf[i], REMAIN(i), " Geneve");
+#endif
+#ifdef HAVE_GRE_ENCAP_OFFLOAD
+	i += snprintf(&buf[i], REMAIN(i), " NVGRE");
+#endif
+#ifdef HAVE_PTP_1588_CLOCK
 	if (pf->flags & I40E_FLAG_PTP)
 		i += snprintf(&buf[i], REMAIN(i), " PTP");
+#endif
 	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
 		i += snprintf(&buf[i], REMAIN(i), " VEB");
 	else
 		i += snprintf(&buf[i], REMAIN(i), " VEPA");
 
-	dev_info(&pf->pdev->dev, "%s\n", buf);
-	kfree(buf);
-	WARN_ON(i > INFO_STRING_LEN);
+	dev_info(&pf->pdev->dev, "%s\n", buf);
+	kfree(buf);
+	WARN_ON(i > INFO_STRING_LEN);
+}
+
+/**
+ * i40e_get_platform_mac_addr - get platform-specific MAC address
+ * @pdev: PCI device information struct
+ * @pf: board private structure
+ *
+ * Look up the MAC address for the device. First we'll try
+ * eth_platform_get_mac_address, which will check Open Firmware, or arch
+ * specific fallback. Otherwise, we'll default to the stored value in
+ * firmware.
+ **/
+static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
+{
+	if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
+		i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
+}
+
+/**
+ * i40e_check_recovery_mode - check if we are running transition firmware
+ * @pf: board private structure
+ *
+ * Check registers indicating the firmware runs in recovery mode. Sets the
+ * appropriate driver state.
+ *
+ * Returns true if the recovery mode was detected, false otherwise
+ **/
+static bool i40e_check_recovery_mode(struct i40e_pf *pf)
+{
+	u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
+
+	if (val & I40E_GL_FWSTS_FWS1B_MASK) {
+		dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
+		dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
+		set_bit(__I40E_RECOVERY_MODE, pf->state);
+
+		return true;
+	}
+	if (test_bit(__I40E_RECOVERY_MODE, pf->state))
+		dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
+
+	return false;
+}
+
+/**
+ * i40e_pf_loop_reset - perform reset in a loop.
+ * @pf: board private structure
+ *
+ * This function is useful when a NIC is about to enter recovery mode.
+ * When a NIC's internal data structures are corrupted the NIC's
+ * firmware is going to enter recovery mode.
+ * Right after a POR it takes about 7 minutes for firmware to enter
+ * recovery mode. Until that time a NIC is in some kind of intermediate
+ * state. After that time period the NIC almost surely enters
+ * recovery mode. The only way for a driver to detect intermediate
+ * state is to issue a series of pf-resets and check a return value.
+ * If a PF reset returns success then the firmware could be in recovery
+ * mode so the caller of this code needs to check for recovery mode
+ * if this function returns success. There is a little chance that
+ * firmware will hang in intermediate state forever.
+ * Since waiting 7 minutes is quite a lot of time this function waits
+ * 10 seconds and then gives up by returning an error.
+ *
+ * Return 0 on success, negative on failure.
+ **/
+static i40e_status i40e_pf_loop_reset(struct i40e_pf * const pf)
+{
+	/* wait max 10 seconds for PF reset to succeed */
+	const unsigned long time_end = jiffies + 10 * HZ;
+
+	struct i40e_hw *hw = &pf->hw;
+	i40e_status ret;
+
+	ret = i40e_pf_reset(hw);
+	while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
+		usleep_range(10000, 20000);
+		ret = i40e_pf_reset(hw);
+	}
+
+	if (ret == I40E_SUCCESS)
+		pf->pfr_count++;
+	else
+		dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
+
+	return ret;
+}
+
+/**
+ * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
+ * @pf: board private structure
+ *
+ * Check FW registers to determine if FW issued unexpected EMP Reset.
+ * Every time when unexpected EMP Reset occurs the FW increments
+ * a counter of unexpected EMP Resets. When the counter reaches 10
+ * the FW should enter the Recovery mode
+ *
+ * Returns true if FW issued unexpected EMP Reset
+ **/
+static inline bool i40e_check_fw_empr(struct i40e_pf * const pf)
+{
+	const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
+			   I40E_GL_FWSTS_FWS1B_MASK;
+	const bool is_empr = (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
+			     (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
+
+	return is_empr;
+}
+
+/**
+ * i40e_handle_resets - handle EMP resets and PF resets
+ * @pf: board private structure
+ *
+ * Handle both EMP resets and PF resets and conclude whether there are
+ * any issues regarding these resets. If there are any issues then
+ * generate log entry.
+ *
+ * Return 0 if NIC is healthy or negative value when there are issues
+ * with resets
+ **/
+static inline i40e_status i40e_handle_resets(struct i40e_pf * const pf)
+{
+	const i40e_status pfr = i40e_pf_loop_reset(pf);
+	const bool is_empr = i40e_check_fw_empr(pf);
+
+	if (is_empr || pfr != I40E_SUCCESS)
+		dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
+
+	return is_empr ? I40E_ERR_RESET_FAILED : pfr;
+}
+
+/**
+ * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
+ * @pf: board private structure
+ * @hw: ptr to the hardware info
+ *
+ * This function does a minimal setup of all subsystems needed for running
+ * recovery mode.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
+{
+	struct i40e_vsi *vsi;
+	int err;
+	int v_idx;
+
+#ifdef HAVE_PCI_ERS
+	pci_save_state(pf->pdev);
+#endif
+
+	/* set up periodic task facility */
+	timer_setup(&pf->service_timer, i40e_service_timer, 0);
+	pf->service_timer_period = HZ;
+
+	INIT_WORK(&pf->service_task, i40e_service_task);
+	clear_bit(__I40E_SERVICE_SCHED, pf->state);
+
+	err = i40e_init_interrupt_scheme(pf);
+	if (err)
+		goto err_switch_setup;
+
+	/* The number of VSIs reported by the FW is the minimum guaranteed
+	 * to us; HW supports far more and we share the remaining pool with
+	 * the other PFs. We allocate space for more than the guarantee with
+	 * the understanding that we might not get them all later.
+	 */
+	if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
+		pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
+	else
+		pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
+
+	/* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
+	pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
+			  GFP_KERNEL);
+	if (!pf->vsi) {
+		err = -ENOMEM;
+		goto err_switch_setup;
+	}
+
+	/* We allocate one VSI which is needed as absolute minimum
+	 * in order to register the netdev
+	 */
+	v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
+	if (v_idx < 0)
+		goto err_switch_setup;
+	pf->lan_vsi = v_idx;
+	vsi = pf->vsi[v_idx];
+	if (!vsi)
+		goto err_switch_setup;
+	vsi->alloc_queue_pairs = 1;
+	err = i40e_config_netdev(vsi);
+	if (err)
+		goto err_switch_setup;
+	err = register_netdev(vsi->netdev);
+	if (err)
+		goto err_switch_setup;
+	vsi->netdev_registered = true;
+	i40e_dbg_pf_init(pf);
+
+	err = i40e_setup_misc_vector_for_recovery_mode(pf);
+	if (err)
+		goto err_switch_setup;
+
+	/* tell the firmware that we're starting */
+	i40e_send_version(pf);
+
+	/* since everything's happy, start the service_task timer */
+	mod_timer(&pf->service_timer,
+		  round_jiffies(jiffies + pf->service_timer_period));
+
+	return 0;
+
+err_switch_setup:
+	i40e_reset_interrupt_capability(pf);
+	del_timer_sync(&pf->service_timer);
+	dev_warn(&pf->pdev->dev, "previous errors forcing module to load in debug mode\n");
+	i40e_dbg_pf_init(pf);
+	set_bit(__I40E_DEBUG_MODE, pf->state);
+	return 0;
 }
 
 /**
- * i40e_get_platform_mac_addr - get platform-specific MAC address
- * @pdev: PCI device information struct
- * @pf: board private structure
- *
- * Look up the MAC address for the device. First we'll try
- * eth_platform_get_mac_address, which will check Open Firmware, or arch
- * specific fallback. Otherwise, we'll default to the stored value in
- * firmware.
+ * i40e_set_fec_in_flags - helper function for setting FEC options in flags
+ * @fec_cfg: FEC option to set in flags
+ * @flags: ptr to flags in which we set FEC option
  **/
-static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
+void i40e_set_fec_in_flags(u8 fec_cfg, u64 *flags)
 {
-	if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
-		i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
+	if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
+		*flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
+	else if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
+		 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
+		*flags |= I40E_FLAG_RS_FEC;
+		*flags &= ~I40E_FLAG_BASE_R_FEC;
+	} else if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
+		   (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
+		*flags |= I40E_FLAG_BASE_R_FEC;
+		*flags &= ~I40E_FLAG_RS_FEC;
+	}
+	if (fec_cfg == 0)
+		*flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
 }
 
 /**
@@ -11238,15 +15446,24 @@ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
  *
  * Returns 0 on success, negative on failure
  **/
+#ifdef HAVE_CONFIG_HOTPLUG
+static int __devinit i40e_probe(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+#else
 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+#endif
 {
 	struct i40e_aq_get_phy_abilities_resp abilities;
+#ifdef CONFIG_DCB
+	enum i40e_get_fw_lldp_status_resp lldp_status;
+	i40e_status status;
+#endif /* CONFIG_DCB */
 	struct i40e_pf *pf;
 	struct i40e_hw *hw;
 	static u16 pfs_found;
 	u16 wol_nvm_bits;
 	u16 link_status;
-	int err;
+	int err = 0;
 	u32 val;
 	u32 i;
 	u8 set_fc_aq_fail;
@@ -11270,7 +15487,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	err = pci_request_mem_regions(pdev, i40e_driver_name);
 	if (err) {
 		dev_info(&pdev->dev,
-			 "pci_request_selected_regions failed %d\n", err);
+			 "pci_request_mem_regions failed %d\n", err);
 		goto err_pci_reg;
 	}
 
@@ -11289,6 +15506,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	}
 	pf->next_vsi = 0;
 	pf->pdev = pdev;
+	pci_set_drvdata(pdev, pf);
 	set_bit(__I40E_DOWN, pf->state);
 
 	hw = &pf->hw;
@@ -11296,7 +15514,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
 				I40E_MAX_CSR_SPACE);
-
+	/* We believe that the highest register to read is
+	 * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
+	 * is not less than that before mapping to prevent a
+	 * kernel panic.
+	 */
+	if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
+		dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
+			pf->ioremap_len);
+		err = -ENOMEM;
+		goto err_ioremap;
+	}
 	hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
 	if (!hw->hw_addr) {
 		err = -EIO;
@@ -11315,41 +15543,42 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	hw->bus.bus_id = pdev->bus->number;
 	pf->instance = pfs_found;
 
+	/* Select something other than the 802.1ad ethertype for the
+	 * switch to use internally and drop on ingress.
+	 */
+	hw->switch_tag = 0xffff;
+	hw->first_tag = ETH_P_8021AD;
+	hw->second_tag = ETH_P_8021Q;
+
 	INIT_LIST_HEAD(&pf->l3_flex_pit_list);
 	INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+	INIT_LIST_HEAD(&pf->ddp_old_prof);
 
-	/* set up the locks for the AQ, do this only once in probe
+	/* set up the spinlocks for the AQ, do this only once in probe
 	 * and destroy them only once in remove
 	 */
-	mutex_init(&hw->aq.asq_mutex);
-	mutex_init(&hw->aq.arq_mutex);
-
-	pf->msg_enable = netif_msg_init(debug,
-					NETIF_MSG_DRV |
-					NETIF_MSG_PROBE |
-					NETIF_MSG_LINK);
-	if (debug < -1)
-		pf->hw.debug_mask = debug;
-
-	/* do a special CORER for clearing PXE mode once at init */
-	if (hw->revision_id == 0 &&
-	    (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
-		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
-		i40e_flush(hw);
-		msleep(200);
-		pf->corer_count++;
+	i40e_init_spinlock_d(&hw->aq.asq_spinlock);
+	i40e_init_spinlock_d(&hw->aq.arq_spinlock);
 
-		i40e_clear_pxe_mode(hw);
-	}
+	if (debug != -1)
+		pf->msg_enable = pf->hw.debug_mask = debug;
 
 	/* Reset here to make sure all is clean and to define PF 'n' */
+	/* have to do the PF reset first to "graceful abort" all queues */
 	i40e_clear_hw(hw);
-	err = i40e_pf_reset(hw);
+
+	err = i40e_set_mac_type(hw);
 	if (err) {
-		dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
+		dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
+			 err);
 		goto err_pf_reset;
 	}
-	pf->pfr_count++;
+
+	err = i40e_handle_resets(pf);
+	if (err)
+		goto err_pf_reset;
+
+	i40e_check_recovery_mode(pf);
 
 	hw->aq.num_arq_entries = I40E_AQ_LEN;
 	hw->aq.num_asq_entries = I40E_AQ_LEN;
@@ -11375,7 +15604,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (err) {
 		if (err == I40E_ERR_FIRMWARE_API_VERSION)
 			dev_info(&pdev->dev,
-				 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
+				 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
+				 hw->aq.api_maj_ver,
+				 hw->aq.api_min_ver,
+				 I40E_FW_API_VERSION_MAJOR,
+				 I40E_FW_MINOR_VERSION(hw));
 		else
 			dev_info(&pdev->dev,
 				 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
@@ -11391,13 +15624,20 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		 i40e_nvm_version_str(hw));
 
 	if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
-	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
+	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
 		dev_info(&pdev->dev,
-			 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
-	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
-		 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
+			 "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
+			 hw->aq.api_maj_ver,
+			 hw->aq.api_min_ver,
+			 I40E_FW_API_VERSION_MAJOR,
+			 I40E_FW_MINOR_VERSION(hw));
+	else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
 		dev_info(&pdev->dev,
-			 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+			 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
+			 hw->aq.api_maj_ver,
+			 hw->aq.api_min_ver,
+			 I40E_FW_API_VERSION_MAJOR,
+			 I40E_FW_MINOR_VERSION(hw));
 
 	i40e_verify_eeprom(pf);
 
@@ -11406,7 +15646,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
 
 	i40e_clear_pxe_mode(hw);
-	err = i40e_get_capabilities(pf);
+
+	err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
 	if (err)
 		goto err_adminq_setup;
 
@@ -11416,6 +15657,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto err_sw_init;
 	}
 
+	if (test_bit(__I40E_RECOVERY_MODE, pf->state))
+		return i40e_init_recovery_mode(pf, hw);
+
 	err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
 				hw->func_caps.num_rx_qp, 0, 0);
 	if (err) {
@@ -11436,7 +15680,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	 */
 	if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
 		dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
-		i40e_aq_stop_lldp(hw, true, NULL);
+		i40e_aq_stop_lldp(hw, true, false, NULL);
 	}
 
 	/* allow a platform config to override the HW addr */
@@ -11452,20 +15696,37 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
 	if (is_valid_ether_addr(hw->mac.port_addr))
 		pf->hw_features |= I40E_HW_PORT_ID_VALID;
+#ifdef HAVE_PTP_1588_CLOCK
+	i40e_ptp_alloc_pins(pf);
+#endif /* HAVE_PTP_1588_CLOCK */
 
-	pci_set_drvdata(pdev, pf);
+#ifdef HAVE_PCI_ERS
 	pci_save_state(pdev);
-#ifdef CONFIG_I40E_DCB
+#endif
+#ifdef CONFIG_DCB
+	status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
+	(status == I40E_SUCCESS &&
+	 lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
+		(pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
+		(pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
+	dev_info(&pdev->dev,
+		 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
+			"FW LLDP is disabled\n" :
+			"FW LLDP is enabled\n");
+
+	/* Enable FW to write default DCB config on link-up */
+	i40e_aq_set_dcb_parameters(hw, true, NULL);
+
 	err = i40e_init_pf_dcb(pf);
 	if (err) {
 		dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
 		pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
 		/* Continue without DCB enabled */
 	}
-#endif /* CONFIG_I40E_DCB */
+#endif /* CONFIG_DCB */
 
 	/* set up periodic task facility */
-	setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
+	timer_setup(&pf->service_timer, i40e_service_timer, 0);
 	pf->service_timer_period = HZ;
 
 	INIT_WORK(&pf->service_task, i40e_service_task);
@@ -11473,7 +15734,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	/* NVM bit on means WoL disabled for the port */
 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-	if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
+	if (BIT(hw->port) & wol_nvm_bits || hw->partition_id != 1)
 		pf->wol_en = false;
 	else
 		pf->wol_en = true;
@@ -11510,6 +15771,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	    !test_bit(__I40E_BAD_EEPROM, pf->state)) {
 		if (pci_num_vf(pdev))
 			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+#if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE)
+		else if (pf->num_req_vfs)
+			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+#endif
 	}
 #endif
 	err = i40e_setup_pf_switch(pf, false);
@@ -11517,6 +15782,20 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
 		goto err_vsis;
 	}
+	if (i40e_is_l4mode_enabled() && hw->pf_id == 0) {
+		u8 l4type = I40E_AQ_SET_SWITCH_L4_TYPE_BOTH;
+
+		switch (l4mode) {
+		case L4_MODE_UDP:
+			l4type = I40E_AQ_SET_SWITCH_L4_TYPE_UDP;
+			break;
+		case L4_MODE_TCP:
+			l4type = I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
+			break;
+		}
+		i40e_set_switch_mode(pf, l4type);
+	}
+	INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
 
 	/* Make sure flow control is set according to current settings */
 	err = i40e_set_fc(hw, &set_fc_aq_fail, true);
@@ -11615,6 +15894,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 				dev_info(&pdev->dev,
 					 "Error %d allocating resources for existing VFs\n",
 					 err);
+#if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE)
+		} else if (pf->num_req_vfs) {
+			err = i40e_alloc_vfs(pf, pf->num_req_vfs);
+			if (err) {
+				pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+				dev_info(&pdev->dev,
+					 "failed to alloc vfs: %d\n", err);
+			}
+#endif /* HAVE_SRIOV_CONFIGURE */
 		}
 	}
 #endif /* CONFIG_PCI_IOV */
@@ -11631,6 +15919,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		}
 	}
 
+	pfs_found++;
 	i40e_dbg_pf_init(pf);
 
 	/* tell the firmware that we're starting */
@@ -11647,7 +15936,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 			dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
 				 err);
 	}
-
 #define PCI_SPEED_SIZE 8
 #define PCI_WIDTH_SIZE 8
 	/* Devices on the IOSF bus do not have this information
@@ -11668,23 +15956,23 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 		switch (hw->bus.speed) {
 		case i40e_bus_speed_8000:
-			strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
+			strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
 		case i40e_bus_speed_5000:
-			strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
+			strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
 		case i40e_bus_speed_2500:
-			strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
+			strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
 		default:
 			break;
 		}
 		switch (hw->bus.width) {
 		case i40e_bus_width_pcie_x8:
-			strncpy(width, "8", PCI_WIDTH_SIZE); break;
+			strlcpy(width, "8", PCI_WIDTH_SIZE); break;
 		case i40e_bus_width_pcie_x4:
-			strncpy(width, "4", PCI_WIDTH_SIZE); break;
+			strlcpy(width, "4", PCI_WIDTH_SIZE); break;
 		case i40e_bus_width_pcie_x2:
-			strncpy(width, "2", PCI_WIDTH_SIZE); break;
+			strlcpy(width, "2", PCI_WIDTH_SIZE); break;
 		case i40e_bus_width_pcie_x1:
-			strncpy(width, "1", PCI_WIDTH_SIZE); break;
+			strlcpy(width, "1", PCI_WIDTH_SIZE); break;
 		default:
 			break;
 		}
@@ -11707,6 +15995,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
 
+	/* set the FEC config due to the board capabilities */
+	i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
+
 	/* get the supported phy types from the fw */
 	err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
 	if (err)
@@ -11724,7 +16015,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 						       pf->main_vsi_seid);
 
 	if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
-		(pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
+	    (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
 		pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
 	if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
 		pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
@@ -11770,7 +16061,11 @@ err_dma:
  * Hot-Plug event, or because the driver is going to be removed from
  * memory.
  **/
+#ifdef HAVE_CONFIG_HOTPLUG
+static void __devexit i40e_remove(struct pci_dev *pdev)
+#else
 static void i40e_remove(struct pci_dev *pdev)
+#endif
 {
 	struct i40e_pf *pf = pci_get_drvdata(pdev);
 	struct i40e_hw *hw = &pf->hw;
@@ -11778,25 +16073,40 @@ static void i40e_remove(struct pci_dev *pdev)
 	int i;
 
 	i40e_dbg_pf_exit(pf);
-
+#ifdef HAVE_PTP_1588_CLOCK
 	i40e_ptp_stop(pf);
 
 	/* Disable RSS in hw */
 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
 
+#endif /* HAVE_PTP_1588_CLOCK */
 	/* no more scheduling of any task */
 	set_bit(__I40E_SUSPENDED, pf->state);
 	set_bit(__I40E_DOWN, pf->state);
-	if (pf->service_timer.data)
+	if (pf->service_timer.function)
 		del_timer_sync(&pf->service_timer);
 	if (pf->service_task.func)
 		cancel_work_sync(&pf->service_task);
-
 	/* Client close must be called explicitly here because the timer
 	 * has been stopped.
 	 */
 	i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+	if (test_bit(__I40E_DEBUG_MODE, pf->state))
+		goto debug_mode_clear;
+	if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+		struct i40e_vsi *vsi = pf->vsi[0];
+
+		/* We know that we have allocated only one vsi for this PF,
+		 * it was just for registering netdevice, so the interface
+		 * could be visible in the 'ifconfig' output
+		 */
+		unregister_netdev(vsi->netdev);
+		free_netdev(vsi->netdev);
+		vsi->netdev = NULL;
+
+		goto unmap;
+	}
 
 	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
 		i40e_free_vfs(pf);
@@ -11823,6 +16133,8 @@ static void i40e_remove(struct pci_dev *pdev)
 	if (pf->vsi[pf->lan_vsi])
 		i40e_vsi_release(pf->vsi[pf->lan_vsi]);
 
+	i40e_cloud_filter_exit(pf);
+
 	/* remove attached clients */
 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
 		ret_code = i40e_lan_del_device(pf);
@@ -11840,22 +16152,37 @@ static void i40e_remove(struct pci_dev *pdev)
 				 ret_code);
 	}
 
-	/* shutdown the adminq */
-	i40e_shutdown_adminq(hw);
-
-	/* destroy the locks only once, here */
-	mutex_destroy(&hw->aq.arq_mutex);
-	mutex_destroy(&hw->aq.asq_mutex);
+unmap:
+	/* Free MSI/legacy interrupt 0 when in recovery mode.
+	 * This is normally done in i40e_vsi_free_irq on
+	 * VSI close but since recovery mode doesn't allow to up
+	 * an interface and we do not allocate all Rx/Tx resources
+	 * for it we'll just do it here
+	 */
+	if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
+	    !(pf->flags & I40E_FLAG_MSIX_ENABLED))
+		free_irq(pf->pdev->irq, pf);
 
 	/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
+	rtnl_lock();
 	i40e_clear_interrupt_scheme(pf);
 	for (i = 0; i < pf->num_alloc_vsi; i++) {
 		if (pf->vsi[i]) {
-			i40e_vsi_clear_rings(pf->vsi[i]);
+			if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
+				i40e_vsi_clear_rings(pf->vsi[i]);
 			i40e_vsi_clear(pf->vsi[i]);
 			pf->vsi[i] = NULL;
 		}
 	}
+	rtnl_unlock();
+
+debug_mode_clear:
+	/* shutdown the adminq */
+	i40e_shutdown_adminq(hw);
+
+	/* destroy the locks only once, here */
+	i40e_destroy_spinlock_d(&hw->aq.arq_spinlock);
+	i40e_destroy_spinlock_d(&hw->aq.asq_spinlock);
 
 	for (i = 0; i < I40E_MAX_VEB; i++) {
 		kfree(pf->veb[i]);
@@ -11873,9 +16200,11 @@ static void i40e_remove(struct pci_dev *pdev)
 	pci_disable_device(pdev);
 }
 
+#ifdef HAVE_PCI_ERS
 /**
  * i40e_pci_error_detected - warning that something funky happened in PCI land
  * @pdev: PCI device information struct
+ * @error: the type of PCI error
  *
  * Called to warn that something happened and the error handling steps
  * are in progress.  Allows the driver to quiesce things, be ready for
@@ -11890,7 +16219,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
 
 	if (!pf) {
 		dev_info(&pdev->dev,
-			 "Cannot recover - error happened during device probe\n");
+			 "Cannot recover -error happened during device probe\n");
 		return PCI_ERS_RESULT_DISCONNECT;
 	}
 
@@ -11947,6 +16276,49 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
 	return result;
 }
 
+#if defined(HAVE_PCI_ERROR_HANDLER_RESET_PREPARE) || defined(HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY) || defined(HAVE_RHEL7_PCI_RESET_NOTIFY)
+/**
+ * i40e_pci_error_reset_prepare - prepare device driver for pci reset
+ * @pdev: PCI device information struct
+ */
+static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+	i40e_prep_for_reset(pf, false);
+}
+
+/**
+ * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
+ * @pdev: PCI device information struct
+ */
+static void i40e_pci_error_reset_done(struct pci_dev *pdev)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+	i40e_reset_and_rebuild(pf, false, false);
+}
+
+#endif
+#if defined(HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY) || defined(HAVE_RHEL7_PCI_RESET_NOTIFY)
+/**
+ * i40e_pci_error_reset_notify - notify device driver of pci reset
+ * @pdev: PCI device information struct
+ * @prepare: true if device is about to be reset; false if reset attempt
+ * completed
+ *
+ * Called to perform pf reset when a pci function level reset is triggered
+ **/
+static void i40e_pci_error_reset_notify(struct pci_dev *pdev, bool prepare)
+{
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+	if (prepare)
+		i40e_pci_error_reset_prepare(pdev);
+	else
+		i40e_pci_error_reset_done(pdev);
+}
+
+#endif
 /**
  * i40e_pci_error_resume - restart operations after PCI error recovery
  * @pdev: PCI device information struct
@@ -12012,6 +16384,11 @@ static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
 			"Failed to enable Multicast Magic Packet wake up\n");
 }
 
+/* FW state indicating on X722 that we need to disable WoL to
+ * allow adapter to shutdown completely
+ */
+#define I40E_GL_FWSTS_FWS0B_STAGE_FW_UPDATE_POR_REQUIRED 0x0F
+
 /**
  * i40e_shutdown - PCI callback for shutting down
  * @pdev: PCI device information struct
@@ -12020,18 +16397,17 @@ static void i40e_shutdown(struct pci_dev *pdev)
 {
 	struct i40e_pf *pf = pci_get_drvdata(pdev);
 	struct i40e_hw *hw = &pf->hw;
+	u32 val = 0;
 
 	set_bit(__I40E_SUSPENDED, pf->state);
 	set_bit(__I40E_DOWN, pf->state);
-	rtnl_lock();
-	i40e_prep_for_reset(pf, true);
-	rtnl_unlock();
 
-	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
-	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+	if (test_bit(__I40E_DEBUG_MODE, pf->state))
+		goto debug_mode;
 
 	del_timer_sync(&pf->service_timer);
 	cancel_work_sync(&pf->service_task);
+	i40e_cloud_filter_exit(pf);
 	i40e_fdir_teardown(pf);
 
 	/* Client close must be called explicitly here because the timer
@@ -12039,55 +16415,186 @@ static void i40e_shutdown(struct pci_dev *pdev)
 	 */
 	i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
 
-	if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
-		i40e_enable_mc_magic_wake(pf);
+	val = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS0B_MASK;
 
-	i40e_prep_for_reset(pf, false);
+	if (pf->hw.mac.type == I40E_MAC_X722) {
+		/* We check here if we need to disable the WoL to allow adapter
+		 * to shutdown completely after a FW update
+		 */
+		if (val != I40E_GL_FWSTS_FWS0B_STAGE_FW_UPDATE_POR_REQUIRED &&
+		    pf->wol_en) {
+			if (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)
+				i40e_enable_mc_magic_wake(pf);
+
+			i40e_prep_for_reset(pf, false);
+
+			wr32(hw, I40E_PFPM_APM, I40E_PFPM_APM_APME_MASK);
+			wr32(hw, I40E_PFPM_WUFC, I40E_PFPM_WUFC_MAG_MASK);
+		} else {
+			i40e_prep_for_reset(pf, false);
+
+			wr32(hw, I40E_PFPM_APM, 0);
+			wr32(hw, I40E_PFPM_WUFC, 0);
+		}
+	} else {
+		i40e_prep_for_reset(pf, false);
+
+		wr32(hw, I40E_PFPM_APM,
+		     (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+		wr32(hw, I40E_PFPM_WUFC,
+		     (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+	}
 
-	wr32(hw, I40E_PFPM_APM,
-	     (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
-	wr32(hw, I40E_PFPM_WUFC,
-	     (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+	/* Free MSI/legacy interrupt 0 when in recovery mode.
+	 * This is normally done in i40e_vsi_free_irq on
+	 * VSI close but since recovery mode doesn't allow to up
+	 * an interface and we do not allocate all Rx/Tx resources
+	 * for it we'll just do it here
+	 */
+	if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
+	    !(pf->flags & I40E_FLAG_MSIX_ENABLED))
+		free_irq(pf->pdev->irq, pf);
 
+	/* Since we're going to destroy queues during the
+	 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
+	 * whole section
+	 */
+	rtnl_lock();
 	i40e_clear_interrupt_scheme(pf);
+	rtnl_unlock();
 
-	if (system_state == SYSTEM_POWER_OFF) {
+debug_mode:
+
+	if (pf->hw.mac.type == I40E_MAC_X722 &&
+	    val == I40E_GL_FWSTS_FWS0B_STAGE_FW_UPDATE_POR_REQUIRED) {
+		pci_wake_from_d3(pdev, false);
+		device_set_wakeup_enable(&pdev->dev, false);
+	} else if (system_state == SYSTEM_POWER_OFF) {
 		pci_wake_from_d3(pdev, pf->wol_en);
-		pci_set_power_state(pdev, PCI_D3hot);
 	}
+	pci_set_power_state(pdev, PCI_D3hot);
 }
 
 #ifdef CONFIG_PM
 /**
- * i40e_suspend - PCI callback for moving to D3
- * @pdev: PCI device information struct
+ * i40e_suspend - PM callback for moving to D3
+ * @dev: generic device information structure
  **/
-static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
+static int i40e_suspend(struct device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev);
 	struct i40e_pf *pf = pci_get_drvdata(pdev);
 	struct i40e_hw *hw = &pf->hw;
-	int retval = 0;
 
-	set_bit(__I40E_SUSPENDED, pf->state);
+	/* If we're already suspended, then there is nothing to do */
+	if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
+		return 0;
+
 	set_bit(__I40E_DOWN, pf->state);
 
+	/* Ensure service task will not be running */
+	del_timer_sync(&pf->service_timer);
+	cancel_work_sync(&pf->service_task);
+
+	/* Client close must be called explicitly here because the timer
+	 * has been stopped.
+	 */
+	i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+
+	if (test_bit(__I40E_DEBUG_MODE, pf->state))
+		return 0;
+
 	if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
 		i40e_enable_mc_magic_wake(pf);
 
-	i40e_prep_for_reset(pf, false);
+	/* Since we're going to destroy queues during the
+	 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
+	 * whole section
+	 */
+	rtnl_lock();
+
+	i40e_prep_for_reset(pf, true);
 
 	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
 	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
 
-	i40e_stop_misc_vector(pf);
-	if (pf->msix_entries) {
-		synchronize_irq(pf->msix_entries[0].vector);
-		free_irq(pf->msix_entries[0].vector, pf);
+	/* Clear the interrupt scheme and release our IRQs so that the system
+	 * can safely hibernate even when there are a large number of CPUs.
+	 * Otherwise hibernation might fail when mapping all the vectors back
+	 * to CPU0.
+	 */
+	i40e_clear_interrupt_scheme(pf);
+
+	rtnl_unlock();
+
+	return 0;
+}
+
+/**
+ * i40e_resume - PM callback for waking up from D3
+ * @dev: generic device information structure
+ **/
+static int i40e_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	int err;
+
+	/* If we're not suspended, then there is nothing to do */
+	if (!test_bit(__I40E_SUSPENDED, pf->state))
+		return 0;
+
+	/* We need to hold the RTNL lock prior to restoring interrupt schemes,
+	 * since we're going to be restoring queues
+	 */
+	rtnl_lock();
+
+	/* We cleared the interrupt scheme when we suspended, so we need to
+	 * restore it now to resume device functionality.
+	 */
+	err = i40e_restore_interrupt_scheme(pf);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
+			err);
 	}
+
+	clear_bit(__I40E_DOWN, pf->state);
+	i40e_reset_and_rebuild(pf, false, true);
+
+	rtnl_unlock();
+
+	/* Clear suspended state last after everything is recovered */
+	clear_bit(__I40E_SUSPENDED, pf->state);
+
+	/* Restart the service task */
+	mod_timer(&pf->service_timer,
+		  round_jiffies(jiffies + pf->service_timer_period));
+
+	return 0;
+}
+
+#ifdef USE_LEGACY_PM_SUPPORT
+/**
+ * i40e_legacy_suspend - PCI callback for moving to D3
+ * @pdev: PCI device information struct
+ * @state: PCI power state
+ *
+ * Legacy suspend handler for older kernels which do not support the newer
+ * generic callbacks
+ **/
+static int i40e_legacy_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	int retval = i40e_suspend(&pdev->dev);
+
+	/* Some older kernels may not handle state correctly for legacy power
+	 * management, so we'll handle it here ourselves
+	 */
 	retval = pci_save_state(pdev);
 	if (retval)
 		return retval;
 
+	pci_disable_device(pdev);
 	pci_wake_from_d3(pdev, pf->wol_en);
 	pci_set_power_state(pdev, PCI_D3hot);
 
@@ -12095,12 +16602,14 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
 }
 
 /**
- * i40e_resume - PCI callback for waking up from D3
+ * i40e_legacy_resume - PCI callback for waking up from D3
  * @pdev: PCI device information struct
+ *
+ * Legacy resume handler for kernels which do not support the newer generic
+ * callbacks.
  **/
-static int i40e_resume(struct pci_dev *pdev)
+static int i40e_legacy_resume(struct pci_dev *pdev)
 {
-	struct i40e_pf *pf = pci_get_drvdata(pdev);
 	u32 err;
 
 	pci_set_power_state(pdev, PCI_D0);
@@ -12112,7 +16621,7 @@ static int i40e_resume(struct pci_dev *pdev)
 
 	err = pci_enable_device_mem(pdev);
 	if (err) {
-		dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
+		dev_err(pci_dev_to_dev(pdev), "Cannot enable PCI device from suspend\n");
 		return err;
 	}
 	pci_set_master(pdev);
@@ -12120,43 +16629,75 @@ static int i40e_resume(struct pci_dev *pdev)
 	/* no wakeup events while running */
 	pci_wake_from_d3(pdev, false);
 
-	/* handling the reset will rebuild the device state */
-	if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
-		clear_bit(__I40E_DOWN, pf->state);
-		if (pf->msix_entries) {
-			err = request_irq(pf->msix_entries[0].vector,
-					  i40e_intr, 0, pf->int_name, pf);
-			if (err) {
-				dev_err(&pf->pdev->dev,
-					"request_irq for %s failed: %d\n",
-					pf->int_name, err);
-			}
-		}
-		i40e_reset_and_rebuild(pf, false, false);
-	}
-
-	return 0;
+	return i40e_resume(&pdev->dev);
 }
+#endif /* USE_LEGACY_PM_SUPPORT */
+#endif /* CONFIG_PM */
 
-#endif
+#ifdef HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS
 static const struct pci_error_handlers i40e_err_handler = {
+#else
+static struct pci_error_handlers i40e_err_handler = {
+#endif
 	.error_detected = i40e_pci_error_detected,
 	.slot_reset = i40e_pci_error_slot_reset,
+#ifdef HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY
+	.reset_notify = i40e_pci_error_reset_notify,
+#endif
+#ifdef HAVE_PCI_ERROR_HANDLER_RESET_PREPARE
+	.reset_prepare = i40e_pci_error_reset_prepare,
+	.reset_done = i40e_pci_error_reset_done,
+#endif
 	.resume = i40e_pci_error_resume,
 };
 
+#if defined(HAVE_RHEL6_SRIOV_CONFIGURE) || defined(HAVE_RHEL7_PCI_DRIVER_RH)
+static struct pci_driver_rh i40e_driver_rh = {
+#ifdef HAVE_RHEL6_SRIOV_CONFIGURE
+	.sriov_configure = i40e_pci_sriov_configure,
+#elif defined(HAVE_RHEL7_PCI_RESET_NOTIFY)
+	.reset_notify = i40e_pci_error_reset_notify,
+#endif
+};
+
+#endif
+#endif /* HAVE_PCI_ERS */
+#if defined(CONFIG_PM) && !defined(USE_LEGACY_PM_SUPPORT)
+static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
+#endif /* CONFIG_PM && !USE_LEGACY_PM_SUPPORT */
+
 static struct pci_driver i40e_driver = {
 	.name     = i40e_driver_name,
 	.id_table = i40e_pci_tbl,
 	.probe    = i40e_probe,
+#ifdef HAVE_CONFIG_HOTPLUG
+	.remove   = __devexit_p(i40e_remove),
+#else
 	.remove   = i40e_remove,
-#ifdef CONFIG_PM
-	.suspend  = i40e_suspend,
-	.resume   = i40e_resume,
 #endif
+#ifdef CONFIG_PM
+#ifdef USE_LEGACY_PM_SUPPORT
+	.suspend  = i40e_legacy_suspend,
+	.resume   = i40e_legacy_resume,
+#else /* USE_LEGACY_PM_SUPPORT */
+	.driver   = {
+		.pm = &i40e_pm_ops,
+	},
+#endif /* !USE_LEGACY_PM_SUPPORT */
+#endif /* CONFIG_PM */
 	.shutdown = i40e_shutdown,
+#ifdef HAVE_PCI_ERS
 	.err_handler = &i40e_err_handler,
+#endif
+#ifdef HAVE_SRIOV_CONFIGURE
 	.sriov_configure = i40e_pci_sriov_configure,
+#endif
+#ifdef HAVE_RHEL6_SRIOV_CONFIGURE
+	.rh_reserved = &i40e_driver_rh,
+#endif
+#ifdef HAVE_RHEL7_PCI_DRIVER_RH
+	.pci_driver_rh = &i40e_driver_rh,
+#endif
 };
 
 /**
@@ -12170,11 +16711,6 @@ static int __init i40e_init_module(void)
 	pr_info("%s: %s - version %s\n", i40e_driver_name,
 		i40e_driver_string, i40e_driver_version_str);
 	pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
-#if defined (CONFIG_ARM64)
-	mark_tech_preview(i40e_driver.name, THIS_MODULE);
-#elif !defined (CONFIG_ARM64) && !defined (CONFIG_PPC)
-	mark_driver_unsupported(i40e_driver.name);
-#endif
 
 	/* There is no need to throttle the number of active tasks because
 	 * each device limits its own task using a state bit for scheduling
@@ -12188,7 +16724,13 @@ static int __init i40e_init_module(void)
 		pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
 		return -ENOMEM;
 	}
+#ifdef HAVE_RHEL7_PCI_DRIVER_RH
+	/* The size member must be initialized in the driver via a call to
+	 * set_pci_driver_rh_size before pci_register_driver is called
+	 */
+	set_pci_driver_rh_size(i40e_driver_rh);
 
+#endif
 	i40e_dbg_init();
 	return pci_register_driver(&i40e_driver);
 }
@@ -12205,5 +16747,8 @@ static void __exit i40e_exit_module(void)
 	pci_unregister_driver(&i40e_driver);
 	destroy_workqueue(i40e_wq);
 	i40e_dbg_exit();
+#ifdef HAVE_KFREE_RCU_BARRIER
+	rcu_barrier();
+#endif
 }
 module_exit(i40e_exit_module);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index d591b3e6b..04fe349dd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #include "i40e_prototype.h"
 
@@ -34,12 +11,12 @@
  * once per NVM initialization, e.g. inside the i40e_init_shared_code().
  * Please notice that the NVM term is used here (& in all methods covered
  * in this file) as an equivalent of the FLASH part mapped into the SR.
- * We are accessing FLASH always thru the Shadow RAM.
+ * We are accessing FLASH always through the Shadow RAM.
  **/
 i40e_status i40e_init_nvm(struct i40e_hw *hw)
 {
 	struct i40e_nvm_info *nvm = &hw->nvm;
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 	u32 fla, gens;
 	u8 sr_size;
 
@@ -78,7 +55,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
 i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
 				       enum i40e_aq_resource_access_type access)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 	u64 gtime, timeout;
 	u64 time_left = 0;
 
@@ -108,13 +85,13 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
 							I40E_NVM_RESOURCE_ID,
 							access, 0, &time_left,
 							NULL);
-			if (!ret_code) {
+			if (ret_code == I40E_SUCCESS) {
 				hw->nvm.hw_semaphore_timeout =
 					    I40E_MS_TO_GTIME(time_left) + gtime;
 				break;
 			}
 		}
-		if (ret_code) {
+		if (ret_code != I40E_SUCCESS) {
 			hw->nvm.hw_semaphore_timeout = 0;
 			i40e_debug(hw, I40E_DEBUG_NVM,
 				   "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
@@ -147,11 +124,10 @@ void i40e_release_nvm(struct i40e_hw *hw)
 	 */
 	while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
 	       (total_delay < hw->aq.asq_cmd_timeout)) {
-		usleep_range(1000, 2000);
-		ret_code = i40e_aq_release_resource(hw,
-						    I40E_NVM_RESOURCE_ID,
-						    0, NULL);
-		total_delay++;
+			usleep_range(1000, 2000);
+			ret_code = i40e_aq_release_resource(hw,
+						I40E_NVM_RESOURCE_ID, 0, NULL);
+			total_delay++;
 	}
 }
 
@@ -170,7 +146,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
 	for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
 		srctl = rd32(hw, I40E_GLNVM_SRCTL);
 		if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
-			ret_code = 0;
+			ret_code = I40E_SUCCESS;
 			break;
 		}
 		udelay(5);
@@ -188,15 +164,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
  *
  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
  **/
-static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
-					    u16 *data)
+static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw,
+						      u16 offset, u16 *data)
 {
 	i40e_status ret_code = I40E_ERR_TIMEOUT;
 	u32 sr_reg;
 
 	if (offset >= hw->nvm.sr_size) {
 		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "NVM read error: offset %d beyond Shadow RAM limit %d\n",
+			   "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
 			   offset, hw->nvm.sr_size);
 		ret_code = I40E_ERR_PARAM;
 		goto read_nvm_exit;
@@ -204,7 +180,7 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
 
 	/* Poll the done bit first */
 	ret_code = i40e_poll_sr_srctl_done_bit(hw);
-	if (!ret_code) {
+	if (ret_code == I40E_SUCCESS) {
 		/* Write the address and start reading */
 		sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
 			 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
@@ -212,14 +188,14 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
 
 		/* Poll I40E_GLNVM_SRCTL until the done bit is set */
 		ret_code = i40e_poll_sr_srctl_done_bit(hw);
-		if (!ret_code) {
+		if (ret_code == I40E_SUCCESS) {
 			sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
 			*data = (u16)((sr_reg &
 				       I40E_GLNVM_SRDATA_RDDATA_MASK)
 				    >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
 		}
 	}
-	if (ret_code)
+	if (ret_code != I40E_SUCCESS)
 		i40e_debug(hw, I40E_DEBUG_NVM,
 			   "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
 			   offset);
@@ -239,9 +215,10 @@ read_nvm_exit:
  *
  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
  **/
-static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
-				    u32 offset, u16 words, void *data,
-				    bool last_command)
+static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
+					      u8 module_pointer, u32 offset,
+					      u16 words, void *data,
+					      bool last_command)
 {
 	i40e_status ret_code = I40E_ERR_NVM;
 	struct i40e_asq_cmd_details cmd_details;
@@ -287,18 +264,18 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
  * Reads one 16 bit word from the Shadow RAM using the AdminQ
  **/
 static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
-					 u16 *data)
+						   u16 *data)
 {
 	i40e_status ret_code = I40E_ERR_TIMEOUT;
 
 	ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
-	*data = le16_to_cpu(*(__le16 *)data);
+	*data = LE16_TO_CPU(*(__le16 *)data);
 
 	return ret_code;
 }
 
 /**
- * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
+ * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  * @data: word read from the Shadow RAM
@@ -309,19 +286,17 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
  * taken via i40e_acquire_nvm().
  **/
 static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
-					u16 offset, u16 *data)
+						  u16 offset, u16 *data)
 {
-	i40e_status ret_code = 0;
 
 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
-		ret_code = i40e_read_nvm_word_aq(hw, offset, data);
-	else
-		ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
-	return ret_code;
+		return i40e_read_nvm_word_aq(hw, offset, data);
+
+	return i40e_read_nvm_word_srctl(hw, offset, data);
 }
 
 /**
- * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
+ * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  * @data: word read from the Shadow RAM
@@ -329,21 +304,93 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
  * Reads one 16 bit word from the Shadow RAM.
  **/
 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
-			       u16 *data)
+					 u16 *data)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
+
+	if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 
-	ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 	if (ret_code)
 		return ret_code;
-
 	ret_code = __i40e_read_nvm_word(hw, offset, data);
 
-	i40e_release_nvm(hw);
-
+	if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+		i40e_release_nvm(hw);
 	return ret_code;
 }
 
+/**
+ * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
+ * @hw: Pointer to the HW structure
+ * @module_ptr: Pointer to module in words with respect to NVM beginning
+ * @module_offset: Offset in words from module start
+ * @data_offset: Offset in words from reading data area start
+ * @words_data_size: Words to read from NVM
+ * @data_ptr: Pointer to memory location where resulting buffer will be stored
+ **/
+enum i40e_status_code
+i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset,
+			  u16 data_offset, u16 words_data_size, u16 *data_ptr)
+{
+	i40e_status status;
+	u16 specific_ptr = 0;
+	u16 ptr_value = 0;
+	u16 offset = 0;
+
+	if (module_ptr != 0) {
+		status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
+		if (status != I40E_SUCCESS) {
+			i40e_debug(hw, I40E_DEBUG_ALL,
+				   "Reading nvm word failed.Error code: %d.\n",
+				   status);
+			return I40E_ERR_NVM;
+		}
+	}
+#define I40E_NVM_INVALID_PTR_VAL 0x7FFF
+#define I40E_NVM_INVALID_VAL 0xFFFF
+
+	/* Pointer not initialized */
+	if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
+	    ptr_value == I40E_NVM_INVALID_VAL) {
+		i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
+		return I40E_ERR_BAD_PTR;
+	}
+
+	/* Check whether the module is in SR mapped area or outside */
+	if (ptr_value & I40E_PTR_TYPE) {
+		/* Pointer points outside of the Shared RAM mapped area */
+		i40e_debug(hw, I40E_DEBUG_ALL,
+			   "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
+
+		return I40E_ERR_PARAM;
+	} else {
+		/* Read from the Shadow RAM */
+
+		status = i40e_read_nvm_word(hw, ptr_value + module_offset,
+					    &specific_ptr);
+		if (status != I40E_SUCCESS) {
+			i40e_debug(hw, I40E_DEBUG_ALL,
+				   "Reading nvm word failed.Error code: %d.\n",
+				   status);
+			return I40E_ERR_NVM;
+		}
+
+		offset = ptr_value + module_offset + specific_ptr +
+			data_offset;
+
+		status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
+					      data_ptr);
+		if (status != I40E_SUCCESS) {
+			i40e_debug(hw, I40E_DEBUG_ALL,
+				   "Reading nvm buffer failed.Error code: %d.\n",
+				   status);
+		}
+	}
+
+	return status;
+}
+
 /**
  * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
  * @hw: pointer to the HW structure
@@ -356,16 +403,16 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
  * and followed by the release.
  **/
 static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
-					      u16 *words, u16 *data)
+							u16 *words, u16 *data)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 	u16 index, word;
 
-	/* Loop thru the selected region */
+	/* Loop through the selected region */
 	for (word = 0; word < *words; word++) {
 		index = offset + word;
 		ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
-		if (ret_code)
+		if (ret_code != I40E_SUCCESS)
 			break;
 	}
 
@@ -387,7 +434,7 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
  * and followed by the release.
  **/
 static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
-					   u16 *words, u16 *data)
+						     u16 *words, u16 *data)
 {
 	i40e_status ret_code;
 	u16 read_size = *words;
@@ -414,7 +461,7 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
 
 		ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
 					    data + words_read, last_cmd);
-		if (ret_code)
+		if (ret_code != I40E_SUCCESS)
 			goto read_nvm_buffer_aq_exit;
 
 		/* Increment counter for words already read and move offset to
@@ -425,7 +472,7 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
 	} while (words_read < *words);
 
 	for (i = 0; i < *words; i++)
-		data[i] = le16_to_cpu(((__le16 *)data)[i]);
+		data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
 
 read_nvm_buffer_aq_exit:
 	*words = words_read;
@@ -433,7 +480,7 @@ read_nvm_buffer_aq_exit:
 }
 
 /**
- * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
+ * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  * @words: (in) number of words to read; (out) number of words actually read
@@ -443,15 +490,42 @@ read_nvm_buffer_aq_exit:
  * method.
  **/
 static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
-					  u16 offset, u16 *words,
-					  u16 *data)
+						    u16 offset, u16 *words,
+						    u16 *data)
 {
-	i40e_status ret_code = 0;
-
 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
-		ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
-	else
+		return i40e_read_nvm_buffer_aq(hw, offset, words, data);
+
+	return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+}
+
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+					   u16 *words, u16 *data)
+{
+	i40e_status ret_code = I40E_SUCCESS;
+
+	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+		if (!ret_code) {
+			ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+							 data);
+			i40e_release_nvm(hw);
+		}
+	} else {
 		ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+	}
+
 	return ret_code;
 }
 
@@ -467,8 +541,8 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
  **/
 static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
-				     u32 offset, u16 words, void *data,
-				     bool last_command)
+					       u32 offset, u16 words, void *data,
+					       bool last_command)
 {
 	i40e_status ret_code = I40E_ERR_NVM;
 	struct i40e_asq_cmd_details cmd_details;
@@ -482,25 +556,20 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
 	 * Firmware will check the module-based model.
 	 */
 	if ((offset + words) > hw->nvm.sr_size)
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "NVM write error: offset %d beyond Shadow RAM limit %d\n",
-			   (offset + words), hw->nvm.sr_size);
+		hw_dbg(hw, "NVM write error: offset beyond Shadow RAM limit.\n");
 	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
 		/* We can write only up to 4KB (one sector), in one AQ write */
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "NVM write fail error: tried to write %d words, limit is %d.\n",
-			   words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+		hw_dbg(hw, "NVM write fail error: cannot write more than 4KB in a single write.\n");
 	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
 		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
 		/* A single write cannot spread over two sectors */
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
-			   offset, words);
+		hw_dbg(hw, "NVM write error: cannot spread over two sectors in a single write.\n");
 	else
 		ret_code = i40e_aq_update_nvm(hw, module_pointer,
 					      2 * offset,  /*bytes*/
 					      2 * words,   /*bytes*/
-					      data, last_command, &cmd_details);
+					      data, last_command, 0,
+					      &cmd_details);
 
 	return ret_code;
 }
@@ -518,7 +587,7 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
 static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
 						    u16 *checksum)
 {
-	i40e_status ret_code;
+	i40e_status ret_code = I40E_SUCCESS;
 	struct i40e_virt_mem vmem;
 	u16 pcie_alt_module = 0;
 	u16 checksum_local = 0;
@@ -534,7 +603,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
 
 	/* read pointer to VPD area */
 	ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
-	if (ret_code) {
+	if (ret_code != I40E_SUCCESS) {
 		ret_code = I40E_ERR_NVM_CHECKSUM;
 		goto i40e_calc_nvm_checksum_exit;
 	}
@@ -542,7 +611,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
 	/* read pointer to PCIe Alt Auto-load module */
 	ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
 					&pcie_alt_module);
-	if (ret_code) {
+	if (ret_code != I40E_SUCCESS) {
 		ret_code = I40E_ERR_NVM_CHECKSUM;
 		goto i40e_calc_nvm_checksum_exit;
 	}
@@ -556,7 +625,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
 			u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
 
 			ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
-			if (ret_code) {
+			if (ret_code != I40E_SUCCESS) {
 				ret_code = I40E_ERR_NVM_CHECKSUM;
 				goto i40e_calc_nvm_checksum_exit;
 			}
@@ -598,16 +667,15 @@ i40e_calc_nvm_checksum_exit:
  **/
 i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
 {
-	i40e_status ret_code;
+	i40e_status ret_code = I40E_SUCCESS;
 	u16 checksum;
 	__le16 le_sum;
 
 	ret_code = i40e_calc_nvm_checksum(hw, &checksum);
-	if (!ret_code) {
-		le_sum = cpu_to_le16(checksum);
+	le_sum = CPU_TO_LE16(checksum);
+	if (ret_code == I40E_SUCCESS)
 		ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
 					     1, &le_sum, true);
-	}
 
 	return ret_code;
 }
@@ -623,7 +691,7 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
 i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
 						 u16 *checksum)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code = I40E_SUCCESS;
 	u16 checksum_sr = 0;
 	u16 checksum_local = 0;
 
@@ -655,42 +723,51 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
 }
 
 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
-					  struct i40e_nvm_access *cmd,
-					  u8 *bytes, int *perrno);
+						    struct i40e_nvm_access *cmd,
+						    u8 *bytes, int *perrno);
 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
-					     struct i40e_nvm_access *cmd,
-					     u8 *bytes, int *perrno);
+						    struct i40e_nvm_access *cmd,
+						    u8 *bytes, int *perrno);
 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
-					     struct i40e_nvm_access *cmd,
-					     u8 *bytes, int *errno);
+						    struct i40e_nvm_access *cmd,
+						    u8 *bytes, int *perrno);
 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
-						struct i40e_nvm_access *cmd,
-						int *perrno);
+						    struct i40e_nvm_access *cmd,
+						    int *perrno);
 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
-					 struct i40e_nvm_access *cmd,
-					 int *perrno);
+						   struct i40e_nvm_access *cmd,
+						   int *perrno);
 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
-					 struct i40e_nvm_access *cmd,
-					 u8 *bytes, int *perrno);
+						   struct i40e_nvm_access *cmd,
+						   u8 *bytes, int *perrno);
 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
-					struct i40e_nvm_access *cmd,
-					u8 *bytes, int *perrno);
+						  struct i40e_nvm_access *cmd,
+						  u8 *bytes, int *perrno);
 static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
-				       struct i40e_nvm_access *cmd,
-				       u8 *bytes, int *perrno);
+						 struct i40e_nvm_access *cmd,
+						 u8 *bytes, int *perrno);
 static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
-					     struct i40e_nvm_access *cmd,
-					     u8 *bytes, int *perrno);
-static inline u8 i40e_nvmupd_get_module(u32 val)
+						    struct i40e_nvm_access *cmd,
+						    u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+						    struct i40e_nvm_access *cmd,
+						    u8 *bytes, int *perrno);
+static INLINE u8 i40e_nvmupd_get_module(u32 val)
 {
 	return (u8)(val & I40E_NVM_MOD_PNT_MASK);
 }
-static inline u8 i40e_nvmupd_get_transaction(u32 val)
+static INLINE u8 i40e_nvmupd_get_transaction(u32 val)
 {
 	return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
 }
 
-static const char * const i40e_nvm_update_state_str[] = {
+static INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val)
+{
+	return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
+		    I40E_NVM_PRESERVATION_FLAGS_SHIFT);
+}
+
+static const char *i40e_nvm_update_state_str[] = {
 	"I40E_NVMUPD_INVALID",
 	"I40E_NVMUPD_READ_CON",
 	"I40E_NVMUPD_READ_SNT",
@@ -707,6 +784,8 @@ static const char * const i40e_nvm_update_state_str[] = {
 	"I40E_NVMUPD_STATUS",
 	"I40E_NVMUPD_EXEC_AQ",
 	"I40E_NVMUPD_GET_AQ_RESULT",
+	"I40E_NVMUPD_GET_AQ_EVENT",
+	"I40E_NVMUPD_GET_FEATURES",
 };
 
 /**
@@ -719,8 +798,8 @@ static const char * const i40e_nvm_update_state_str[] = {
  * Dispatches command depending on what update state is current
  **/
 i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
-				struct i40e_nvm_access *cmd,
-				u8 *bytes, int *perrno)
+					  struct i40e_nvm_access *cmd,
+					  u8 *bytes, int *perrno)
 {
 	i40e_status status;
 	enum i40e_nvmupd_cmd upd_cmd;
@@ -764,7 +843,32 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
 		if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 
-		return 0;
+		return I40E_SUCCESS;
+	}
+
+	/*
+	 * A supported features request returns immediately
+	 * rather than going into state machine
+	 */
+	if (upd_cmd == I40E_NVMUPD_FEATURES) {
+		if (cmd->data_size < hw->nvmupd_features.size) {
+			*perrno = -EFAULT;
+			return I40E_ERR_BUF_TOO_SHORT;
+		}
+
+		/*
+		 * If buffer is bigger than i40e_nvmupd_features structure,
+		 * make sure the trailing bytes are set to 0x0.
+		 */
+		if (cmd->data_size > hw->nvmupd_features.size)
+			i40e_memset(bytes + hw->nvmupd_features.size, 0x0,
+				    cmd->data_size - hw->nvmupd_features.size,
+				    I40E_NONDMA_MEM);
+
+		i40e_memcpy(bytes, &hw->nvmupd_features,
+			    hw->nvmupd_features.size, I40E_NONDMA_MEM);
+
+		return I40E_SUCCESS;
 	}
 
 	/* Clear status even it is not read and log */
@@ -782,7 +886,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
 	 * ~5ms for most commands. However lock is held for ~60ms for
 	 * NVMUPD_CSUM_LCB command.
 	 */
-	mutex_lock(&hw->aq.arq_mutex);
+	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
 	switch (hw->nvmupd_state) {
 	case I40E_NVMUPD_STATE_INIT:
 		status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
@@ -802,9 +906,9 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
 		 * the wait info and return before doing anything else
 		 */
 		if (cmd->offset == 0xffff) {
-			i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
-			status = 0;
-			goto exit;
+			i40e_nvmupd_clear_wait_state(hw);
+			status = I40E_SUCCESS;
+			break;
 		}
 
 		status = I40E_ERR_NOT_READY;
@@ -819,8 +923,8 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
 		*perrno = -ESRCH;
 		break;
 	}
-exit:
-	mutex_unlock(&hw->aq.arq_mutex);
+
+	i40e_release_spinlock(&hw->aq.arq_spinlock);
 	return status;
 }
 
@@ -835,10 +939,10 @@ exit:
  * state. Reject all other commands.
  **/
 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
-					  struct i40e_nvm_access *cmd,
-					  u8 *bytes, int *perrno)
+						    struct i40e_nvm_access *cmd,
+						    u8 *bytes, int *perrno)
 {
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 	enum i40e_nvmupd_cmd upd_cmd;
 
 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
@@ -948,6 +1052,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
 		status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
 		break;
 
+	case I40E_NVMUPD_GET_AQ_EVENT:
+		status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
+		break;
+
 	default:
 		i40e_debug(hw, I40E_DEBUG_NVM,
 			   "NVMUPD: bad cmd %s in init state\n",
@@ -970,10 +1078,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
  * change in state; reject all other commands.
  **/
 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
-					     struct i40e_nvm_access *cmd,
-					     u8 *bytes, int *perrno)
+						    struct i40e_nvm_access *cmd,
+						    u8 *bytes, int *perrno)
 {
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 	enum i40e_nvmupd_cmd upd_cmd;
 
 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
@@ -1012,10 +1120,10 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
  * change in state; reject all other commands
  **/
 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
-					     struct i40e_nvm_access *cmd,
-					     u8 *bytes, int *perrno)
+						    struct i40e_nvm_access *cmd,
+						    u8 *bytes, int *perrno)
 {
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 	enum i40e_nvmupd_cmd upd_cmd;
 	bool retry_attempt = false;
 
@@ -1122,38 +1230,55 @@ retry:
 }
 
 /**
- * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * i40e_nvmupd_clear_wait_state - clear wait state on hw
  * @hw: pointer to the hardware structure
- * @opcode: the event that just happened
  **/
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
 {
-	if (opcode == hw->nvm_wait_opcode) {
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
-		if (hw->nvm_release_on_done) {
-			i40e_release_nvm(hw);
-			hw->nvm_release_on_done = false;
-		}
-		hw->nvm_wait_opcode = 0;
+	i40e_debug(hw, I40E_DEBUG_NVM,
+		   "NVMUPD: clearing wait on opcode 0x%04x\n",
+		   hw->nvm_wait_opcode);
 
-		if (hw->aq.arq_last_status) {
-			hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
-			return;
-		}
+	if (hw->nvm_release_on_done) {
+		i40e_release_nvm(hw);
+		hw->nvm_release_on_done = false;
+	}
+	hw->nvm_wait_opcode = 0;
 
-		switch (hw->nvmupd_state) {
-		case I40E_NVMUPD_STATE_INIT_WAIT:
-			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
-			break;
+	if (hw->aq.arq_last_status) {
+		hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+		return;
+	}
 
-		case I40E_NVMUPD_STATE_WRITE_WAIT:
-			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
-			break;
+	switch (hw->nvmupd_state) {
+	case I40E_NVMUPD_STATE_INIT_WAIT:
+		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+		break;
 
-		default:
-			break;
-		}
+	case I40E_NVMUPD_STATE_WRITE_WAIT:
+		hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+		break;
+
+	default:
+		break;
+	}
+}
+
+/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ * @desc: AdminQ descriptor
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+				  struct i40e_aq_desc *desc)
+{
+	u32 aq_desc_len = sizeof(struct i40e_aq_desc);
+
+	if (opcode == hw->nvm_wait_opcode) {
+		i40e_memcpy(&hw->nvm_aq_event_desc, desc,
+			    aq_desc_len, I40E_NONDMA_TO_NONDMA);
+		i40e_nvmupd_clear_wait_state(hw);
 	}
 }
 
@@ -1166,8 +1291,8 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
  * Return one of the valid command types or I40E_NVMUPD_INVALID
  **/
 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
-						 struct i40e_nvm_access *cmd,
-						 int *perrno)
+						    struct i40e_nvm_access *cmd,
+						    int *perrno)
 {
 	enum i40e_nvmupd_cmd upd_cmd;
 	u8 module, transaction;
@@ -1204,10 +1329,23 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
 			upd_cmd = I40E_NVMUPD_READ_SA;
 			break;
 		case I40E_NVM_EXEC:
-			if (module == 0xf)
-				upd_cmd = I40E_NVMUPD_STATUS;
-			else if (module == 0)
+			switch (module) {
+			case I40E_NVM_EXEC_GET_AQ_RESULT:
 				upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
+				break;
+			case I40E_NVM_EXEC_FEATURES:
+				upd_cmd = I40E_NVMUPD_FEATURES;
+				break;
+			case I40E_NVM_EXEC_STATUS:
+				upd_cmd = I40E_NVMUPD_STATUS;
+				break;
+			default:
+				*perrno = -EFAULT;
+				return I40E_NVMUPD_INVALID;
+			}
+			break;
+		case I40E_NVM_AQE:
+			upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
 			break;
 		}
 		break;
@@ -1259,8 +1397,8 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
  * cmd structure contains identifiers and data buffer
  **/
 static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
-				       struct i40e_nvm_access *cmd,
-				       u8 *bytes, int *perrno)
+						 struct i40e_nvm_access *cmd,
+						 u8 *bytes, int *perrno)
 {
 	struct i40e_asq_cmd_details cmd_details;
 	i40e_status status;
@@ -1271,6 +1409,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
 	u32 aq_data_len;
 
 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+	if (cmd->offset == 0xffff)
+		return I40E_SUCCESS;
+
 	memset(&cmd_details, 0, sizeof(cmd_details));
 	cmd_details.wb_desc = &hw->nvm_wb_desc;
 
@@ -1289,7 +1430,7 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
 
 	/* if data buffer needed, make sure it's ready */
 	aq_data_len = cmd->data_size - aq_desc_len;
-	buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
+	buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
 	if (buff_size) {
 		if (!hw->nvm_buff.va) {
 			status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
@@ -1302,10 +1443,14 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
 
 		if (hw->nvm_buff.va) {
 			buff = hw->nvm_buff.va;
-			memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+			i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
+				I40E_NONDMA_TO_NONDMA);
 		}
 	}
 
+	if (cmd->offset)
+		memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
+
 	/* and away we go! */
 	status = i40e_asq_send_command(hw, aq_desc, buff,
 				       buff_size, &cmd_details);
@@ -1315,6 +1460,7 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
 			   i40e_stat_str(hw, status),
 			   i40e_aq_str(hw, hw->aq.asq_last_status));
 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+		return status;
 	}
 
 	/* should we wait for a followup event? */
@@ -1336,8 +1482,8 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
  * cmd structure contains identifiers and data buffer
  **/
 static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
-					     struct i40e_nvm_access *cmd,
-					     u8 *bytes, int *perrno)
+						    struct i40e_nvm_access *cmd,
+						    u8 *bytes, int *perrno)
 {
 	u32 aq_total_len;
 	u32 aq_desc_len;
@@ -1347,7 +1493,7 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
 
 	aq_desc_len = sizeof(struct i40e_aq_desc);
-	aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
+	aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
 
 	/* check offset range */
 	if (cmd->offset > aq_total_len) {
@@ -1375,13 +1521,13 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
 			   __func__, cmd->offset, cmd->offset + len);
 
 		buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
-		memcpy(bytes, buff, len);
+		i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
 
 		bytes += len;
 		remainder -= len;
 		buff = hw->nvm_buff.va;
 	} else {
-		buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+		buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
 	}
 
 	if (remainder > 0) {
@@ -1389,10 +1535,45 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
 
 		i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
 			   __func__, start_byte, start_byte + remainder);
-		memcpy(bytes, buff, remainder);
+		i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
 	}
 
-	return 0;
+	return I40E_SUCCESS;
+}
+
+/**
+ * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+						    struct i40e_nvm_access *cmd,
+						    u8 *bytes, int *perrno)
+{
+	u32 aq_total_len;
+	u32 aq_desc_len;
+
+	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+	aq_desc_len = sizeof(struct i40e_aq_desc);
+	aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen);
+
+	/* check copylength range */
+	if (cmd->data_size > aq_total_len) {
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "%s: copy length %d too big, trimming to %d\n",
+			   __func__, cmd->data_size, aq_total_len);
+		cmd->data_size = aq_total_len;
+	}
+
+	i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size,
+		    I40E_NONDMA_TO_NONDMA);
+
+	return I40E_SUCCESS;
 }
 
 /**
@@ -1405,8 +1586,8 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
  * cmd structure contains identifiers and data buffer
  **/
 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
-					struct i40e_nvm_access *cmd,
-					u8 *bytes, int *perrno)
+						  struct i40e_nvm_access *cmd,
+						  u8 *bytes, int *perrno)
 {
 	struct i40e_asq_cmd_details cmd_details;
 	i40e_status status;
@@ -1444,10 +1625,10 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
  * module, offset, data_size and data are in cmd structure
  **/
 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
-					 struct i40e_nvm_access *cmd,
-					 int *perrno)
+						   struct i40e_nvm_access *cmd,
+						   int *perrno)
 {
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 	struct i40e_asq_cmd_details cmd_details;
 	u8 module, transaction;
 	bool last;
@@ -1484,24 +1665,26 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
  * module, offset, data_size and data are in cmd structure
  **/
 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
-					 struct i40e_nvm_access *cmd,
-					 u8 *bytes, int *perrno)
+						   struct i40e_nvm_access *cmd,
+						   u8 *bytes, int *perrno)
 {
-	i40e_status status = 0;
+	i40e_status status = I40E_SUCCESS;
 	struct i40e_asq_cmd_details cmd_details;
 	u8 module, transaction;
+	u8 preservation_flags;
 	bool last;
 
 	transaction = i40e_nvmupd_get_transaction(cmd->config);
 	module = i40e_nvmupd_get_module(cmd->config);
 	last = (transaction & I40E_NVM_LCB);
+	preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
 
 	memset(&cmd_details, 0, sizeof(cmd_details));
 	cmd_details.wb_desc = &hw->nvm_wb_desc;
 
 	status = i40e_aq_update_nvm(hw, module, cmd->offset,
 				    (u16)cmd->data_size, bytes, last,
-				    &cmd_details);
+				    preservation_flags, &cmd_details);
 	if (status) {
 		i40e_debug(hw, I40E_DEBUG_NVM,
 			   "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 80e66da6b..f6ac4b2bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifndef _I40E_OSDEP_H_
 #define _I40E_OSDEP_H_
@@ -34,14 +11,41 @@
 #include <linux/pci.h>
 #include <linux/highuid.h>
 
-/* get readq/writeq support for 32 bit kernels, use the low-first version */
-#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io.h>
+#include <asm-generic/int-ll64.h>
+
+#ifndef readq
+static inline __u64 readq(const volatile void __iomem *addr)
+{
+	const volatile u32 __iomem *p = addr;
+	u32 low, high;
+
+	low = readl(p);
+	high = readl(p + 1);
+
+	return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(__u64 val, volatile void __iomem *addr)
+{
+	writel(val, addr);
+	writel(val >> 32, addr + 4);
+}
+#endif
+#include "kcompat.h"
 
 /* File to be the magic between shared code and
  * actual OS primitives
  */
 
-#define hw_dbg(hw, S, A...)	do {} while (0)
+#define hw_dbg(h, s, ...) do {					\
+		pr_debug("i40e %02x:%02x.%x " s,		\
+			(h)->bus.bus_id, (h)->bus.device,	\
+			(h)->bus.func, ##__VA_ARGS__);		\
+} while (0)
+
 
 #define wr32(a, reg, value)	writel((value), ((a)->hw_addr + (reg)))
 #define rd32(a, reg)		readl((a)->hw_addr + (reg))
@@ -49,7 +53,6 @@
 #define wr64(a, reg, value)	writeq((value), ((a)->hw_addr + (reg)))
 #define rd64(a, reg)		readq((a)->hw_addr + (reg))
 #define i40e_flush(a)		readl((a)->hw_addr + I40E_GLGEN_STAT)
-
 /* memory allocation tracking */
 struct i40e_dma_mem {
 	void *va;
@@ -58,7 +61,8 @@ struct i40e_dma_mem {
 };
 
 #define i40e_allocate_dma_mem(h, m, unused, s, a) \
-			i40e_allocate_dma_mem_d(h, m, s, a)
+			i40e_allocate_dma_mem_d(h, m, unused, s, a)
+
 #define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
 
 struct i40e_virt_mem {
@@ -77,5 +81,40 @@ do {								\
 			(h)->bus.func, ##__VA_ARGS__);		\
 } while (0)
 
+/* these things are all directly replaced with sed during the kernel build */
+#define INLINE inline
+
+
+#define CPU_TO_LE16(o) cpu_to_le16(o)
+#define CPU_TO_LE32(s) cpu_to_le32(s)
+#define CPU_TO_LE64(h) cpu_to_le64(h)
+#define LE16_TO_CPU(a) le16_to_cpu(a)
+#define LE32_TO_CPU(c) le32_to_cpu(c)
+#define LE64_TO_CPU(k) le64_to_cpu(k)
+
+/* SW spinlock */
+struct i40e_spinlock {
+	struct mutex spinlock;
+};
+
+static inline void i40e_no_action(struct i40e_spinlock *sp)
+{
+	/* nothing */
+}
+
+/* the locks are initialized in _probe and destroyed in _remove
+ * so make sure NOT to implement init/destroy here, as to
+ * avoid the i40e_init_adminq code trying to reinitialize
+ * the persistent lock memory
+ */
+#define i40e_init_spinlock(_sp)    i40e_no_action(_sp)
+#define i40e_acquire_spinlock(_sp) i40e_acquire_spinlock_d(_sp)
+#define i40e_release_spinlock(_sp) i40e_release_spinlock_d(_sp)
+#define i40e_destroy_spinlock(_sp) i40e_no_action(_sp)
+
+
+#define i40e_memset(a, b, c, d)  memset((a), (b), (c))
+#define i40e_memcpy(a, b, c, d)  memcpy((a), (b), (c))
+
 typedef enum i40e_status_code i40e_status;
 #endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index a39b13197..0f6120609 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -1,35 +1,12 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifndef _I40E_PROTOTYPE_H_
 #define _I40E_PROTOTYPE_H_
 
 #include "i40e_type.h"
 #include "i40e_alloc.h"
-#include <linux/avf/virtchnl.h>
+#include "virtchnl.h"
 
 /* Prototypes for shared code functions that are not in
  * the standard function pointer structures.  These are
@@ -45,6 +22,13 @@ void i40e_adminq_init_ring_data(struct i40e_hw *hw);
 i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
 					     struct i40e_arq_event_info *e,
 					     u16 *events_pending);
+enum i40e_status_code
+i40e_asq_send_command_atomic(struct i40e_hw *hw,
+			     struct i40e_aq_desc *desc,
+			     void *buff, /* can be NULL */
+			     u16  buff_size,
+			     struct i40e_asq_cmd_details *cmd_details,
+			     bool is_atomic_context);
 i40e_status i40e_asq_send_command(struct i40e_hw *hw,
 				struct i40e_aq_desc *desc,
 				void *buff, /* can be NULL */
@@ -58,29 +42,39 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
 void i40e_idle_aq(struct i40e_hw *hw);
 bool i40e_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
-const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
-				bool pf_lut, u8 *lut, u16 lut_size);
+					  bool pf_lut, u8 *lut, u16 lut_size);
 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
-				bool pf_lut, u8 *lut, u16 lut_size);
+					  bool pf_lut, u8 *lut, u16 lut_size);
 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
-				u16 seid,
-				struct i40e_aqc_get_set_rss_key_data *key);
+				     u16 seid,
+				     struct i40e_aqc_get_set_rss_key_data *key);
 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
-				u16 seid,
-				struct i40e_aqc_get_set_rss_key_data *key);
+				     u16 seid,
+				     struct i40e_aqc_get_set_rss_key_data *key);
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
 u32 i40e_led_get(struct i40e_hw *hw);
 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
-			     u16 led_addr, u32 mode);
+				       u16 led_addr, u32 mode);
 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
-			     u16 *val);
+				       u16 *val);
 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
-				    u32 time, u32 interval);
-
+					      u32 time, u32 interval);
+i40e_status i40e_get_phy_lpi_status(struct i40e_hw *hw,
+					      struct i40e_hw_port_stats *stats);
+i40e_status i40e_get_lpi_counters(struct i40e_hw *hw, u32 *tx_counter,
+					    u32 *rx_counter, bool *is_clear);
+i40e_status i40e_lpi_stat_update(struct i40e_hw *hw,
+					   bool offset_loaded, u64 *tx_offset,
+					   u64 *tx_stat, u64 *rx_offset,
+					   u64 *rx_stat);
+i40e_status i40e_get_lpi_duration(struct i40e_hw *hw,
+					    struct i40e_hw_port_stats *stat,
+					    u64 *tx_duration, u64 *rx_duration);
 /* admin send queue commands */
 
 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
@@ -89,8 +83,8 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
 				u16 *api_major_version, u16 *api_minor_version,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
-					u32 reg_addr, u64 reg_val,
-					struct i40e_asq_cmd_details *cmd_details);
+				u32 reg_addr, u64 reg_val,
+				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
 				u32  reg_addr, u64 *reg_val,
 				struct i40e_asq_cmd_details *cmd_details);
@@ -99,23 +93,22 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
-				      struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
 			bool qualified_modules, bool report_init,
 			struct i40e_aq_get_phy_abilities_resp *abilities,
 			struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+i40e_status i40e_aq_set_phy_config(struct i40e_hw *hw,
 				struct i40e_aq_set_phy_config *config,
 				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+i40e_status i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
 				  bool atomic_reset);
 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
-				     struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
 				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+			struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
-					bool enable_link,
-					struct i40e_asq_cmd_details *cmd_details);
+		bool enable_link, struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
 				bool enable_lse, struct i40e_link_status *link,
 				struct i40e_asq_cmd_details *cmd_details);
@@ -136,13 +129,14 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
 		bool rx_only_promisc);
 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
 		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
-							 u16 seid, bool enable,
-							 u16 vid,
+i40e_status i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
+				u16 seid, bool set,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+				u16 seid, bool enable, u16 vid,
 				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
-							 u16 seid, bool enable,
-							 u16 vid,
+i40e_status i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+				u16 seid, bool enable, u16 vid,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
 				u16 seid, bool enable, u16 vid,
@@ -188,9 +182,8 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
 				struct i40e_aqc_get_switch_config_resp *buf,
 				u16 buf_size, u16 *start_seid,
 				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
-						u16 flags,
-						u16 valid_flags,
+i40e_status i40e_aq_set_switch_config(struct i40e_hw *hw,
+				u16 flags, u16 valid_flags, u8 mode,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
 				enum i40e_aq_resources_ids resource,
@@ -206,36 +199,72 @@ i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
 				bool last_command,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
-			      u32 offset, u16 length, bool last_command,
-			      struct i40e_asq_cmd_details *cmd_details);
+				u32 offset, u16 length, bool last_command,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_read_nvm_config(struct i40e_hw *hw,
+				u8 cmd_flags, u32 field_id, void *data,
+				u16 buf_size, u16 *element_count,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_write_nvm_config(struct i40e_hw *hw,
+				u8 cmd_flags, void *data, u16 buf_size,
+				u16 element_count,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_oem_post_update(struct i40e_hw *hw,
+				void *buff, u16 buff_size,
+				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
 				void *buff, u16 buff_size, u16 *data_size,
 				enum i40e_admin_queue_opc list_type_opc,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
 				u32 offset, u16 length, void *data,
-				bool last_command,
+				bool last_command, u8 preservation_flags,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+				u8 rearrange_nvm,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
 				u8 mib_type, void *buff, u16 buff_size,
 				u16 *local_len, u16 *remote_len,
 				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+				u8 mib_type, void *buff, u16 buff_size,
+				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
 				bool enable_update,
 				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+		     struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+				bool persist,
 				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
+						 bool dcb_enable,
+						 struct i40e_asq_cmd_details
+						 *cmd_details);
 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
+				bool persist,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
-				       void *buff, u16 buff_size,
-				       struct i40e_asq_cmd_details *cmd_details);
+				void *buff, u16 buff_size,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
+				bool start_agent,
+				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
 				u16 udp_port, u8 protocol_index,
 				u8 *filter_index,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
 				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
+			u8 *num_entries,
+			struct i40e_aqc_switch_resource_alloc_element_resp *buf,
+			u16 count,
+			struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
@@ -282,9 +311,31 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
 		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
 		struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
-				   struct i40e_asq_cmd_details *cmd_details);
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+			     struct i40e_aqc_cloud_filters_element_bb *filters,
+			     u8 filter_count);
+enum i40e_status_code
+i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,
+			  struct i40e_aqc_cloud_filters_element_data *filters,
+			  u8 filter_count);
+enum i40e_status_code
+i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,
+			  struct i40e_aqc_cloud_filters_element_data *filters,
+			  u8 filter_count);
+enum i40e_status_code
+i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+			     struct i40e_aqc_cloud_filters_element_bb *filters,
+			     u8 filter_count);
 i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
-			       struct i40e_lldp_variables *lldp_cfg);
+					struct i40e_lldp_variables *lldp_cfg);
+i40e_status i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
+		struct i40e_aqc_replace_cloud_filters_cmd *filters,
+		struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf);
+i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
+				u32 reg_addr0, u32 *reg_val0,
+				u32 reg_addr1, u32 *reg_val1);
 /* i40e_common */
 i40e_status i40e_init_shared_code(struct i40e_hw *hw);
 i40e_status i40e_pf_reset(struct i40e_hw *hw);
@@ -294,15 +345,13 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
 i40e_status i40e_update_link_info(struct i40e_hw *hw);
 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
-				      u32 *max_bw, u32 *min_bw, bool *min_valid,
-				      bool *max_valid);
+		u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid);
 i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
 			struct i40e_aqc_configure_partition_bw_data *bw_data,
 			struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
-				 u32 pba_num_size);
-i40e_status i40e_validate_mac_addr(u8 *mac_addr);
+					    u32 pba_num_size);
 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
 /* prototype for functions used for NVM access */
 i40e_status i40e_init_nvm(struct i40e_hw *hw);
@@ -311,23 +360,70 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
 void i40e_release_nvm(struct i40e_hw *hw);
 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
 					 u16 *data);
+enum i40e_status_code
+i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset,
+			  u16 data_offset, u16 words_data_size, u16 *data_ptr);
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+					   u16 *words, u16 *data);
 i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
 i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
 						 u16 *checksum);
 i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
-				struct i40e_nvm_access *cmd,
-				u8 *bytes, int *);
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
+					  struct i40e_nvm_access *cmd,
+					  u8 *bytes, int *);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+				  struct i40e_aq_desc *desc);
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
 
+i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+
 extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
 
-static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+static INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
 {
 	return i40e_ptype_lookup[ptype];
 }
 
-/* prototype for functions used for SW locks */
+/**
+ * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
+ * @link_speed: the speed to convert
+ *
+ * Returns the link_speed in terms of the virtchnl interface, for use in
+ * converting link_speed as reported by the AdminQ into the format used for
+ * talking to virtchnl devices. If we can't represent the link speed properly,
+ * report LINK_SPEED_UNKNOWN.
+ **/
+static INLINE enum virtchnl_link_speed
+i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
+{
+	switch (link_speed) {
+	case I40E_LINK_SPEED_100MB:
+		return VIRTCHNL_LINK_SPEED_100MB;
+	case I40E_LINK_SPEED_1GB:
+		return VIRTCHNL_LINK_SPEED_1GB;
+	case I40E_LINK_SPEED_2_5GB:
+		return VIRTCHNL_LINK_SPEED_2_5GB;
+	case I40E_LINK_SPEED_5GB:
+		return VIRTCHNL_LINK_SPEED_5GB;
+	case I40E_LINK_SPEED_10GB:
+		return VIRTCHNL_LINK_SPEED_10GB;
+	case I40E_LINK_SPEED_40GB:
+		return VIRTCHNL_LINK_SPEED_40GB;
+	case I40E_LINK_SPEED_20GB:
+		return VIRTCHNL_LINK_SPEED_20GB;
+	case I40E_LINK_SPEED_25GB:
+		return VIRTCHNL_LINK_SPEED_25GB;
+	case I40E_LINK_SPEED_UNKNOWN:
+	default:
+		return VIRTCHNL_LINK_SPEED_UNKNOWN;
+	}
+}
+/* prototype for functions used for SW spinlocks */
+void i40e_init_spinlock(struct i40e_spinlock *sp);
+void i40e_acquire_spinlock(struct i40e_spinlock *sp);
+void i40e_release_spinlock(struct i40e_spinlock *sp);
+void i40e_destroy_spinlock(struct i40e_spinlock *sp);
 
 /* i40e_common for VF drivers*/
 void i40e_vf_parse_hw_config(struct i40e_hw *hw,
@@ -346,10 +442,10 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
 				struct i40e_control_filter_stats *stats,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
-			       u8 table_id, u32 start_index, u16 buff_size,
-			       void *buff, u16 *ret_buff_size,
-			       u8 *ret_next_table, u32 *ret_next_index,
-			       struct i40e_asq_cmd_details *cmd_details);
+				u8 table_id, u32 start_index, u16 buff_size,
+				void *buff, u16 *ret_buff_size,
+				u8 *ret_next_table, u32 *ret_next_index,
+				struct i40e_asq_cmd_details *cmd_details);
 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
 						    u16 vsi_seid);
 i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
@@ -360,35 +456,84 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
 				u32 reg_addr, u32 reg_val,
 				struct i40e_asq_cmd_details *cmd_details);
 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+enum i40e_status_code
+i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+			     u8 phy_select, u8 dev_addr, bool page_change,
+			     bool set_mdio, u8 mdio_num,
+			     u32 reg_addr, u32 reg_val,
+			     struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+			     u8 phy_select, u8 dev_addr, bool page_change,
+			     bool set_mdio, u8 mdio_num,
+			     u32 reg_addr, u32 *reg_val,
+			     struct i40e_asq_cmd_details *cmd_details);
+
+/* Convenience wrappers for most common use case */
+#define i40e_aq_set_phy_register(hw, ps, da, pc, ra, rv, cd) \
+	i40e_aq_set_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
+#define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd) \
+	i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
+
+enum i40e_status_code
+i40e_aq_run_phy_activity(struct i40e_hw *hw, u16 activity_id, u32 opcode,
+			 u32 *cmd_status, u32 *data0, u32 *data1,
+			 struct i40e_asq_cmd_details *cmd_details);
+
+i40e_status i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
+			struct i40e_aqc_arp_proxy_data *proxy_config,
+			struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
+			struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
+			struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
+			u8 filter_index,
+			struct i40e_aqc_set_wol_filter_data *filter,
+			bool set_filter, bool no_wol_tco,
+			bool filter_valid, bool no_wol_tco_valid,
+			struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
+			u16 *wake_reason,
+			struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
+			struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
-					    u16 reg, u8 phy_addr, u16 *value);
+					u16 reg, u8 phy_addr, u16 *value);
 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
-					     u16 reg, u8 phy_addr, u16 value);
+					u16 reg, u8 phy_addr, u16 value);
 i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
 				u8 page, u16 reg, u8 phy_addr, u16 *value);
 i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
 				u8 page, u16 reg, u8 phy_addr, u16 value);
-i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
-				   u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
-				    u8 phy_addr, u16 value);
+i40e_status i40e_read_phy_register(struct i40e_hw *hw,
+				u8 page, u16 reg, u8 phy_addr, u16 *value);
+i40e_status i40e_write_phy_register(struct i40e_hw *hw,
+				u8 page, u16 reg, u8 phy_addr, u16 value);
 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
-				    u32 time, u32 interval);
-i40e_status i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
-			      u16 buff_size, u32 track_id,
-			      u32 *error_offset, u32 *error_info,
-			      struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
-				 u16 buff_size, u8 flags,
-				 struct i40e_asq_cmd_details *cmd_details);
+					      u32 time, u32 interval);
+i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+					u16 buff_size, u32 track_id,
+					u32 *error_offset, u32 *error_info,
+					struct i40e_asq_cmd_details *
+					cmd_details);
+i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+					   u16 buff_size, u8 flags,
+					   struct i40e_asq_cmd_details *
+					   cmd_details);
 struct i40e_generic_seg_header *
 i40e_find_segment_in_package(u32 segment_type,
 			     struct i40e_package_header *pkg_header);
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+			     struct i40e_profile_segment *profile);
 enum i40e_status_code
 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
 		   u32 track_id);
 enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+		      u32 track_id);
+enum i40e_status_code
 i40e_add_pinfo_to_list(struct i40e_hw *hw,
 		       struct i40e_profile_segment *profile,
 		       u8 *profile_info_sec, u32 track_id);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index d8456c381..44373fb7a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -1,31 +1,15 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
+/* this lets the macros that return timespec64 or structs compile cleanly with
+ * W=2
+ */
+#pragma GCC diagnostic ignored "-Waggregate-return"
 #include "i40e.h"
+#ifdef HAVE_PTP_1588_CLOCK
 #include <linux/ptp_classify.h>
+#include <linux/posix-clock.h>
+
 
 /* The XL710 timesync is very much like Intel's 82599 design when it comes to
  * the fundamental clock design. However, the clock operations are much simpler
@@ -39,14 +23,210 @@
  * At 1Gb link, the period is multiplied by 20. (32ns)
  * 1588 functionality is not supported at 100Mbps.
  */
-#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
-#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
-#define I40E_PTP_1GB_INCVAL  0x2000000000ULL
+#define I40E_PTP_40GB_INCVAL		0x0199999999ULL
+#define I40E_PTP_10GB_INCVAL_MULT	2
+#define I40E_PTP_1GB_INCVAL_MULT	20
+#define I40E_ISGN			0x80000000
 
 #define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
 #define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (2 << \
 					I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_SUBDEV_ID_25G_PTP_PIN	0xB
+#define to_dev(obj) container_of(obj, struct device, kobj)
+
+enum i40e_ptp_pin {
+	SDP3_2 = 0,
+	SDP3_3,
+	GPIO_4
+};
+
+static struct ptp_pin_desc sdp_desc[] = {
+/*	  name  idx   func      chan */
+	{"SDP3_2", SDP3_2, PTP_PF_NONE, 0},
+	{"SDP3_3", SDP3_3, PTP_PF_NONE, 1},
+	{"GPIO_4", GPIO_4, PTP_PF_NONE, 1},
+};
+
+#ifndef HAVE_PTP_1588_CLOCK_PINS
+static ssize_t i40e_sysfs_ptp_pins_read(struct kobject *kobj,
+					struct kobj_attribute *attr,
+					char *buf);
+
+static ssize_t i40e_sysfs_ptp_pins_write(struct kobject *kobj,
+					 struct kobj_attribute *attr,
+					 const char *buf, size_t count);
+
+static struct kobj_attribute ptp_pins_attribute = __ATTR(pins, 0660,
+	       i40e_sysfs_ptp_pins_read, i40e_sysfs_ptp_pins_write);
+#endif /* HAVE_PTP_1588_CLOCK_PINS */
+
+enum i40e_ptp_gpio_pin_state {
+	end = -2,
+	invalid,
+	off,
+	in_A,
+	in_B,
+	out_A,
+	out_B,
+};
+
+static const char * const i40e_ptp_gpio_pin_state2str[] = {
+	"off", "in_A", "in_B", "out_A", "out_B"
+};
+
+enum i40e_ptp_led_pin_state {
+	low = 0,
+	high,
+};
+
+struct i40e_ptp_pins_settings {
+	enum i40e_ptp_gpio_pin_state sdp3_2;
+	enum i40e_ptp_gpio_pin_state sdp3_3;
+	enum i40e_ptp_gpio_pin_state gpio_4;
+	enum i40e_ptp_led_pin_state led2_0;
+	enum i40e_ptp_led_pin_state led2_1;
+	enum i40e_ptp_led_pin_state led3_0;
+	enum i40e_ptp_led_pin_state led3_1;
+};
+
+static const struct i40e_ptp_pins_settings i40e_ptp_pin_led_allowed_states [] = {
+	{off,	off,	off,		high,	high,	high,	high},
+	{off,	in_A,	off,		high,	high,	high,	low},
+	{off,	out_A,	off,		high,	low,	high,	high},
+	{off,	in_B,	off,		high,	high,	high,	low},
+	{off,	out_B,	off,		high,	low,	high,	high},
+	{in_A,	off,	off,		high,	high,	high,	low},
+	{in_A,	in_B,	off,		high,	high,	high,	low},
+	{in_A,	out_B,	off,		high,	low,	high,	high},
+	{out_A,	off,	off,		high,	low,	high,	high},
+	{out_A,	in_B,	off,		high,	low,	high,	high},
+	{in_B,	off,	off,		high,	high,	high,	low},
+	{in_B,	in_A,	off,		high,	high,	high,	low},
+	{in_B,	out_A,	off,		high,	low,	high,	high},
+	{out_B,	off,	off,		high,	low,	high,	high},
+	{out_B,	in_A,	off,		high,	low,	high,	high},
+	{off,	off,	in_A,		high,	high,	low,	high},
+	{off,	out_A,	in_A,		high,	low,	low,	high},
+	{off,	in_B,	in_A,		high,	high,	low,	low},
+	{off,	out_B,	in_A,		high,	low,	low,	high},
+	{out_A,	off,	in_A,		high,	low,	low,	high},
+	{out_A,	in_B,	in_A,		high,	low,	low,	high},
+	{in_B,	off,	in_A,		high,	high,	low,	low},
+	{in_B,	out_A,	in_A,		high,	low,	low,	high},
+	{out_B,	off,	in_A,		high,	low,	low,	high},
+	{off,	off,	out_A,		low,	high,	high,	high},
+	{off,	in_A,	out_A,		low,	high,	high,	low},
+	{off,	in_B,	out_A,		low,	high,	high,	low},
+	{off,	out_B,	out_A,		low,	low,	high,	high},
+	{in_A,	off,	out_A,		low,	high,	high,	low},
+	{in_A,	in_B,	out_A,		low,	high,	high,	low},
+	{in_A,	out_B,	out_A,		low,	low,	high,	high},
+	{in_B,	off,	out_A,		low,	high,	high,	low},
+	{in_B,	in_A,	out_A,		low,	high,	high,	low},
+	{out_B,	off,	out_A,		low,	low,	high,	high},
+	{out_B,	in_A,	out_A,		low,	low,	high,	high},
+	{off,	off,	in_B,		high,	high,	low,	high},
+	{off,	in_A,	in_B,		high,	high,	low,	low},
+	{off,	out_A,	in_B,		high,	low,	low,	high},
+	{off,	out_B,	in_B,		high,	low,	low,	high},
+	{in_A,	off,	in_B,		high,	high,	low,	low},
+	{in_A,	out_B,	in_B,		high,	low,	low,	high},
+	{out_A,	off,	in_B,		high,	low,	low,	high},
+	{out_B,	off,	in_B,		high,	low,	low,	high},
+	{out_B,	in_A,	in_B,		high,	low,	low,	high},
+	{off,	off,	out_B,		low,	high,	high,	high},
+	{off,	in_A,	out_B,		low,	high,	high,	low},
+	{off,	out_A,	out_B,		low,	low,	high,	high},
+	{off,	in_B,	out_B,		low,	high,	high,	low},
+	{in_A,	off,	out_B,		low,	high,	high,	low},
+	{in_A,	in_B,	out_B,		low,	high,	high,	low},
+	{out_A,	off,	out_B,		low,	low,	high,	high},
+	{out_A,	in_B,	out_B,		low,	low,	high,	high},
+	{in_B,	off,	out_B,		low,	high,	high,	low},
+	{in_B,	in_A,	out_B,		low,	high,	high,	low},
+	{in_B,	out_A,	out_B,		low,	low,	high,	high},
+	{end,	end,	end,		end,	end,	end,	end}
+};
+
+static int i40e_ptp_set_pins(struct i40e_pf *pf,
+			     struct i40e_ptp_pins_settings *pins);
 
+/**
+ * i40e_ptp_extts0_work - workqueue task function
+ * @work: workqueue task structure
+ *
+ * Service for PTP external clock event
+ **/
+void i40e_ptp_extts0_work(struct work_struct *work)
+{
+	struct i40e_pf *pf = container_of(work, struct i40e_pf,
+					  ptp_extts0_work);
+	struct i40e_hw *hw = &pf->hw;
+	struct ptp_clock_event event;
+	u32 hi, lo;
+
+	/* Event time is captured by one of the two matched registers
+	 *      PRTTSYN_EVNT_L: 32 LSB of sampled time event
+	 *      PRTTSYN_EVNT_H: 32 MSB of sampled time event
+	 * Event is defined in PRTTSYN_EVNT_0 register
+	 */
+	lo = rd32(hw, I40E_PRTTSYN_EVNT_L(0));
+	hi = rd32(hw, I40E_PRTTSYN_EVNT_H(0));
+
+	event.timestamp = (((u64)hi) << 32) | lo;
+
+	event.type = PTP_CLOCK_EXTTS;
+	event.index = 0;
+
+	/* fire event */
+	ptp_clock_event(pf->ptp_clock, &event);
+}
+
+/**
+ * i40e_is_ptp_pin_dev - check if device supports PTP pins
+ * @hw: pointer to the hardware structure
+ *
+ * Return true if device supports PTP pins, false otherwise.
+ */
+static bool i40e_is_ptp_pin_dev(const struct i40e_hw* const hw)
+{
+	return I40E_DEV_ID_25G_SFP28 == hw->device_id &&
+	       I40E_SUBDEV_ID_25G_PTP_PIN == hw->subsystem_device_id;
+}
+
+/**
+ * i40_ptp_reset_timing_events - Reset PTP timing events
+ * @pf: Board private structure
+ *
+ * This function resets timing events for pf.
+ **/
+static void i40_ptp_reset_timing_events(struct i40e_pf *pf)
+{
+	u32 i;
+
+	spin_lock_bh(&pf->ptp_rx_lock);
+	for(i = 0; i <= I40E_PRTTSYN_RXTIME_L_MAX_INDEX; i++ ) {
+		/* reading and automatically clearing timing events registers */
+		rd32(&pf->hw, I40E_PRTTSYN_RXTIME_L(i));
+		rd32(&pf->hw, I40E_PRTTSYN_RXTIME_H(i));
+		pf->latch_events[i] = 0;
+	}
+	/* reading and automatically clearing timing events registers */
+	rd32(&pf->hw, I40E_PRTTSYN_TXTIME_L);
+	rd32(&pf->hw, I40E_PRTTSYN_TXTIME_H);
+
+	pf->tx_hwtstamp_timeouts = 0;
+	pf->tx_hwtstamp_skipped = 0;
+	pf->rx_hwtstamp_cleared = 0;
+	pf->latch_event_flags = 0;
+	spin_unlock_bh(&pf->ptp_rx_lock);
+}
+
+int i40e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+		    enum ptp_pin_function func, unsigned int chan)
+{
+	return 0;
+}
 /**
  * i40e_ptp_read - Read the PHC time from the device
  * @pf: Board private structure
@@ -88,8 +268,8 @@ static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec64 *ts)
 	/* The timer will not update until the high register is written, so
 	 * write the low register first.
 	 */
-	wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF);
-	wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32);
+	wr32(hw, I40E_PRTTSYN_TIME_L, (u32)ns);
+	wr32(hw, I40E_PRTTSYN_TIME_H, (u32)(ns >> 32));
 }
 
 /**
@@ -129,42 +309,103 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 		ppb = -ppb;
 	}
 
-	smp_mb(); /* Force any pending update before accessing. */
-	adj = ACCESS_ONCE(pf->ptp_base_adj);
-
-	freq = adj;
+	freq = I40E_PTP_40GB_INCVAL;
 	freq *= ppb;
 	diff = div_u64(freq, 1000000000ULL);
 
 	if (neg_adj)
-		adj -= diff;
+		adj = I40E_PTP_40GB_INCVAL - diff;
 	else
-		adj += diff;
+		adj = I40E_PTP_40GB_INCVAL + diff;
+
+	/* At some link speeds, the base incval is so large that directly
+	 * multiplying by ppb would result in arithmetic overflow even when
+	 * using a u64. Avoid this by instead calculating the new incval
+	 * always in terms of the 40GbE clock rate and then multiplying by the
+	 * link speed factor afterwards. This does result in slightly lower
+	 * precision at lower link speeds, but it is fairly minor.
+	 */
+	smp_mb(); /* Force any pending update before accessing. */
+	adj *= READ_ONCE(pf->ptp_adj_mult);
 
-	wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
-	wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
+	wr32(hw, I40E_PRTTSYN_INC_L, (u32)adj);
+	wr32(hw, I40E_PRTTSYN_INC_H, (u32)(adj >> 32));
 
 	return 0;
 }
 
+/**
+ * i40e_ptp_set_1pps_signal_hw - configure 1PPS PTP signal for pins
+ * @pf: the PF private data structure
+ *
+ * Configure 1PPS signal used for PTP pins
+ **/
+static void i40e_ptp_set_1pps_signal_hw(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	struct timespec64 now;
+	u64 ns;
+
+	wr32(hw, I40E_PRTTSYN_AUX_0(1), 0);
+	wr32(hw, I40E_PRTTSYN_AUX_1(1),
+	     I40E_PRTTSYN_AUX_1_MAX_INDEX);
+	wr32(hw, I40E_PRTTSYN_AUX_0(1),
+	     I40E_PRTTSYN_AUX_0_MAX_INDEX);
+
+	i40e_ptp_read(pf, &now);
+	now.tv_sec += I40E_PTP_2_SEC_DELAY;
+	now.tv_nsec = 0;
+	ns = timespec64_to_ns(&now);
+
+	/* I40E_PRTTSYN_TGT_L(1) */
+	wr32(hw, I40E_PRTTSYN_TGT_L(1), ns & 0xFFFFFFFF);
+	/* I40E_PRTTSYN_TGT_H(1) */
+	wr32(hw, I40E_PRTTSYN_TGT_H(1), ns >> 32);
+	wr32(hw, I40E_PRTTSYN_CLKO(1), I40E_PTP_HALF_SECOND);
+	wr32(hw, I40E_PRTTSYN_AUX_1(1),
+	     I40E_PRTTSYN_AUX_1_MAX_INDEX);
+	wr32(hw, I40E_PRTTSYN_AUX_0(1), 7);
+}
+
 /**
  * i40e_ptp_adjtime - Adjust the PHC time
  * @ptp: The PTP clock structure
  * @delta: Offset in nanoseconds to adjust the PHC time by
  *
- * Adjust the frequency of the PHC by the indicated parts per billion from the
- * base frequency.
+ * Adjust the current clock time by a delta specified in nanoseconds.
  **/
 static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
 	struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
-	struct timespec64 now;
+	struct i40e_hw *hw = &pf->hw;
 
 	mutex_lock(&pf->tmreg_lock);
 
-	i40e_ptp_read(pf, &now);
-	timespec64_add_ns(&now, delta);
-	i40e_ptp_write(pf, (const struct timespec64 *)&now);
+	if (delta > -999999900LL && delta < 999999900LL) {
+		int neg_adj = 0;
+		u32 timadj;
+		u64 tohw;
+
+		if (delta < 0) {
+			neg_adj = 1;
+			tohw = -delta;
+		} else {
+			tohw = delta;
+		}
+
+		timadj = tohw & 0x3FFFFFFF;
+		if (neg_adj)
+			timadj |= I40E_ISGN;
+		wr32(hw, I40E_PRTTSYN_ADJ, timadj);
+	} else {
+		struct timespec64 then, now;
+
+		then = ns_to_timespec64(delta);
+		i40e_ptp_read(pf, &now);
+		now = timespec64_add(now, then);
+		i40e_ptp_write(pf, (const struct timespec64 *)&now);
+		i40e_ptp_set_1pps_signal_hw(pf);
+	}
 
 	mutex_unlock(&pf->tmreg_lock);
 
@@ -174,7 +415,7 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 /**
  * i40e_ptp_gettime - Get the time of the PHC
  * @ptp: The PTP clock structure
- * @ts: timespec structure to hold the current time value
+ * @ts: timespec64 structure to hold the current time value
  *
  * Read the device clock and return the correct value on ns, after converting it
  * into a timespec struct.
@@ -186,14 +427,13 @@ static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 	mutex_lock(&pf->tmreg_lock);
 	i40e_ptp_read(pf, ts);
 	mutex_unlock(&pf->tmreg_lock);
-
 	return 0;
 }
 
 /**
  * i40e_ptp_settime - Set the time of the PHC
  * @ptp: The PTP clock structure
- * @ts: timespec structure that holds the new time value
+ * @ts: timespec64 structure that holds the new time value
  *
  * Set the device clock to the user input value. The conversion from timespec
  * to ns happens in the write function.
@@ -206,23 +446,155 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
 	mutex_lock(&pf->tmreg_lock);
 	i40e_ptp_write(pf, ts);
 	mutex_unlock(&pf->tmreg_lock);
+	return 0;
+}
+
+#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64
+/**
+ * i40e_ptp_gettime32 - Get the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the device clock and return the correct value on ns, after converting it
+ * into a timespec struct.
+ **/
+static int i40e_ptp_gettime32(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+	struct timespec64 ts64;
+	int err;
 
+	err = i40e_ptp_gettime(ptp, &ts64);
+	if (err)
+		return err;
+
+	*ts = timespec64_to_timespec(ts64);
 	return 0;
 }
 
 /**
- * i40e_ptp_feature_enable - Enable/disable ancillary features of the PHC subsystem
+ * i40e_ptp_settime32 - Set the time of the PHC
  * @ptp: The PTP clock structure
- * @rq: The requested feature to change
- * @on: Enable/disable flag
+ * @ts: timespec structure that holds the new time value
  *
- * The XL710 does not support any of the ancillary features of the PHC
- * subsystem, so this function may just return.
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ **/
+static int i40e_ptp_settime32(struct ptp_clock_info *ptp,
+			      const struct timespec *ts)
+{
+	struct timespec64 ts64 = timespec_to_timespec64(*ts);
+
+	return i40e_ptp_settime(ptp, &ts64);
+}
+#endif
+
+static int i40e_pps_configure(struct ptp_clock_info *ptp,
+			      struct ptp_clock_request *rq,
+			      int on)
+{
+	return 0;
+}
+
+enum i40e_ptp_gpio_pin_state i40e_pin_state(int index, int func)
+{
+	enum i40e_ptp_gpio_pin_state state = off;
+
+	if (index == 0 && func == PTP_PF_EXTTS)
+		state = in_A;
+	if (index == 1 && func == PTP_PF_EXTTS)
+		state = in_B;
+	if (index == 0 && func == PTP_PF_PEROUT)
+		state = out_A;
+	if (index == 1 && func == PTP_PF_PEROUT)
+		state = out_B;
+
+	return state;
+}
+
+int i40e_ptp_enable_pin(struct i40e_pf *pf, unsigned int chan,
+			enum ptp_pin_function func, int on)
+{
+	struct i40e_ptp_pins_settings pins;
+	enum i40e_ptp_gpio_pin_state *pin = NULL;
+	int pin_index;
+
+	/* Preserve previous state of pins that we don't touch */
+	pins.sdp3_2 = pf->ptp_pins->sdp3_2;
+	pins.sdp3_3 = pf->ptp_pins->sdp3_3;
+	pins.gpio_4 = pf->ptp_pins->gpio_4;
+
+	/* If we want to turn on the pin - find the corresponding one based on
+	 * the given index. If we want to turn the function off - we need to
+	 * find which pin had it assigned, we can't use ptp_find_pin here
+	 * because it tries to lock the pincfg_mux which is locked by
+	 * ptp_pin_store() that calls here.
+	 */
+	if (on) {
+		pin_index = ptp_find_pin(pf->ptp_clock, func, chan);
+		if (pin_index < 0)
+			return -EBUSY;
+
+		switch (pin_index) {
+		case SDP3_2:
+			pin = &pins.sdp3_2;
+			break;
+		case SDP3_3:
+			pin = &pins.sdp3_3;
+			break;
+		case GPIO_4:
+			pin = &pins.gpio_4;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		*pin = i40e_pin_state(chan, func);
+	} else {
+		if (pins.sdp3_2 == i40e_pin_state(chan, func))
+			pins.sdp3_2 = off;
+		if (pins.sdp3_3 == i40e_pin_state(chan, func))
+			pins.sdp3_3 = off;
+		if (pins.gpio_4 == i40e_pin_state(chan, func))
+			pins.gpio_4 = off;
+	}
+
+	return i40e_ptp_set_pins(pf, &pins) ? -EINVAL : 0;
+}
+
+/**
+ * i40e_ptp_feature_enable - Enable external clock pins
+ * @ptp: The PTP clock structure
+ * @rq: The PTP clock request structure
+ * @on: To turn feature on/off
+ *
+ * Setting on/off PTP PPS feature for pin.
  **/
 static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp,
-				   struct ptp_clock_request *rq, int on)
+			   struct ptp_clock_request *rq,
+			   int on)
 {
-	return -EOPNOTSUPP;
+	struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+
+	enum ptp_pin_function func;
+	unsigned int chan;
+
+	/* TODO: Implement flags handling for EXTTS and PEROUT */
+	switch (rq->type) {
+	case PTP_CLK_REQ_EXTTS:
+		func = PTP_PF_EXTTS;
+		chan = rq->extts.index;
+		break;
+	case PTP_CLK_REQ_PEROUT:
+		func = PTP_PF_PEROUT;
+		chan = rq->perout.index;
+		break;
+	case PTP_CLK_REQ_PPS:
+		return i40e_pps_configure(ptp, rq, on);
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return i40e_ptp_enable_pin(pf, chan, func, on);
 }
 
 /**
@@ -269,7 +641,6 @@ static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf)
 /**
  * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
  * @pf: The PF private data structure
- * @vsi: The VSI with the rings relevant to 1588
  *
  * This watchdog task is scheduled to detect error case where hardware has
  * dropped an Rx packet that was timestamped when the ring is full. The
@@ -333,10 +704,12 @@ void i40e_ptp_rx_hang(struct i40e_pf *pf)
  * This watchdog task is run periodically to make sure that we clear the Tx
  * timestamp logic if we don't obtain a timestamp in a reasonable amount of
  * time. It is unexpected in the normal case but if it occurs it results in
- * permanently prevent timestamps of future packets
+ * permanently preventing timestamps of future packets.
  **/
 void i40e_ptp_tx_hang(struct i40e_pf *pf)
 {
+	struct sk_buff *skb;
+
 	if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
 		return;
 
@@ -349,9 +722,12 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf)
 	 * within a second it is reasonable to assume that we never will.
 	 */
 	if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) {
-		dev_kfree_skb_any(pf->ptp_tx_skb);
+		skb = pf->ptp_tx_skb;
 		pf->ptp_tx_skb = NULL;
 		clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
+
+		/* Free the skb after we clear the bitlock */
+		dev_kfree_skb_any(skb);
 		pf->tx_hwtstamp_timeouts++;
 	}
 }
@@ -461,6 +837,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
 	struct i40e_link_status *hw_link_info;
 	struct i40e_hw *hw = &pf->hw;
 	u64 incval;
+	u32 mult;
 
 	hw_link_info = &hw->phy.link_info;
 
@@ -468,10 +845,10 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
 
 	switch (hw_link_info->link_speed) {
 	case I40E_LINK_SPEED_10GB:
-		incval = I40E_PTP_10GB_INCVAL;
+		mult = I40E_PTP_10GB_INCVAL_MULT;
 		break;
 	case I40E_LINK_SPEED_1GB:
-		incval = I40E_PTP_1GB_INCVAL;
+		mult = I40E_PTP_1GB_INCVAL_MULT;
 		break;
 	case I40E_LINK_SPEED_100MB:
 	{
@@ -482,31 +859,36 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
 				 "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n");
 			warn_once++;
 		}
-		incval = 0;
+		mult = 0;
 		break;
 	}
 	case I40E_LINK_SPEED_40GB:
 	default:
-		incval = I40E_PTP_40GB_INCVAL;
+		mult = 1;
 		break;
 	}
 
+	/* The increment value is calculated by taking the base 40GbE incvalue
+	 * and multiplying it by a factor based on the link speed.
+	 */
+	incval = I40E_PTP_40GB_INCVAL * mult;
+
 	/* Write the new increment value into the increment register. The
 	 * hardware will not update the clock until both registers have been
 	 * written.
 	 */
-	wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF);
-	wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
+	wr32(hw, I40E_PRTTSYN_INC_L, (u32)incval);
+	wr32(hw, I40E_PRTTSYN_INC_H, (u32)(incval >> 32));
 
 	/* Update the base adjustement value. */
-	ACCESS_ONCE(pf->ptp_base_adj) = incval;
+	WRITE_ONCE(pf->ptp_adj_mult, mult);
 	smp_mb(); /* Force the above update. */
 }
 
 /**
  * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
  * @pf: Board private structure
- * @ifreq: ioctl data
+ * @ifr: ioctl data
  *
  * Obtain the current hardware timestamping settigs as requested. To do this,
  * keep a shadow copy of the timestamp settings rather than attempting to
@@ -523,6 +905,296 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
 		-EFAULT : 0;
 }
 
+/**
+ * i40e_ptp_free_pins - free memory used by PTP pins
+ * @pf: Board private structure
+ *
+ * Release memory allocated for PTP pins.
+ **/
+void i40e_ptp_free_pins(struct i40e_pf *pf) {
+	if (pf->hw.pf_id == 0 && i40e_is_ptp_pin_dev(&pf->hw)) {
+		kfree(pf->ptp_pins);
+		kfree(pf->ptp_caps.pin_config);
+		pf->ptp_pins = NULL;
+		kobject_put(pf->ptp_kobj);
+	}
+}
+
+/**
+ * i40e_ptp_set_pin_out_hw - Set HW GPIO pin, cares only for outputs
+ * @pf: Board private structure
+ *
+ * This function sets GPIO pin for PTP, cares only for outputs
+ **/
+static void i40e_ptp_set_pin_out_hw(struct i40e_pf *pf,
+				   unsigned int pin,
+				   enum i40e_ptp_gpio_pin_state state)
+{
+	struct i40e_hw *hw = &pf->hw;
+
+	switch (state) {
+	case out_A:
+		wr32(hw, I40E_GLGEN_GPIO_CTL(pin),
+		     I40E_PORT_0_OUT_HIGH_TIMESYNC_0);
+		break;
+	case out_B:
+		wr32(hw, I40E_GLGEN_GPIO_CTL(pin),
+		     I40E_PORT_1_OUT_HIGH_TIMESYNC_1);
+		break;
+	default:
+		break;
+	}
+}
+/**
+ * i40e_ptp_set_pin_in_hw - Set HW GPIO pin, cares only for inputs
+ * @pf: Board private structure
+ *
+ * This function sets GPIO pin for PTP, cares only for inputs
+ **/
+static void i40e_ptp_set_pin_in_hw(struct i40e_pf *pf,
+				   unsigned int pin,
+				   enum i40e_ptp_gpio_pin_state state)
+{
+	struct i40e_hw *hw = &pf->hw;
+
+	switch (state) {
+	case off:
+		wr32(hw, I40E_GLGEN_GPIO_CTL(pin), 0);
+		break;
+	case in_A:
+		wr32(hw, I40E_GLGEN_GPIO_CTL(pin), I40E_PORT_0_TIMESYNC_1);
+		break;
+	case in_B:
+		wr32(hw, I40E_GLGEN_GPIO_CTL(pin), I40E_PORT_1_TIMESYNC_1);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * i40e_ptp_set_pins_hw - Set HW GPIO pins
+ * @pf: Board private structure
+ *
+ * This function sets GPIO pins for PTP
+ **/
+static void i40e_ptp_set_pins_hw(struct i40e_pf *pf)
+{
+	const struct i40e_ptp_pins_settings *pins = pf->ptp_pins;
+	struct i40e_hw *hw = &pf->hw;
+
+	if (!i40e_is_ptp_pin_dev(hw)) {
+		dev_warn(&pf->pdev->dev,
+			 "PTP external clock not supported.\n");
+		return;
+	}
+
+	if (!pins || pf->hw.pf_id) {
+		dev_warn(&pf->pdev->dev,
+			 "PTP PIN setting allowed for PF0 only.\n");
+		return;
+	}
+
+	/* setting SDP PTP pins first to the low/off state */
+	i40e_ptp_set_pin_in_hw(pf, I40E_SDP3_2, off);
+	i40e_ptp_set_pin_in_hw(pf, I40E_SDP3_3, off);
+	i40e_ptp_set_pin_in_hw(pf, I40E_GPIO_4, off);
+
+	i40e_ptp_set_pin_out_hw(pf, I40E_SDP3_2, pins->sdp3_2);
+	i40e_ptp_set_pin_out_hw(pf, I40E_SDP3_3, pins->sdp3_3);
+	i40e_ptp_set_pin_out_hw(pf, I40E_GPIO_4, pins->gpio_4);
+
+	i40e_ptp_set_pin_in_hw(pf, I40E_SDP3_2, pins->sdp3_2);
+	i40e_ptp_set_pin_in_hw(pf, I40E_SDP3_3, pins->sdp3_3);
+	i40e_ptp_set_pin_in_hw(pf, I40E_GPIO_4, pins->gpio_4);
+
+	switch (pf->ptp_pins->led2_0) {
+	case low: wr32(hw, I40E_GLGEN_GPIO_SET,I40E_DRIVE_SDP_ON |
+		       I40E_LED2_0); break;
+	case high: wr32(hw, I40E_GLGEN_GPIO_SET,I40E_DRIVE_SDP_ON |
+			I40E_GPIO_SET_HIGH | I40E_LED2_0); break;
+	}
+	switch (pf->ptp_pins->led2_1) {
+	case low: wr32(hw, I40E_GLGEN_GPIO_SET,I40E_DRIVE_SDP_ON |
+		       I40E_LED2_1); break;
+	case high: wr32(hw, I40E_GLGEN_GPIO_SET,I40E_DRIVE_SDP_ON |
+			I40E_GPIO_SET_HIGH | I40E_LED2_1); break;
+	}
+	switch (pf->ptp_pins->led3_0) {
+	case low: wr32(hw, I40E_GLGEN_GPIO_SET,I40E_DRIVE_SDP_ON |
+		       I40E_LED3_0); break;
+	case high: wr32(hw, I40E_GLGEN_GPIO_SET,I40E_DRIVE_SDP_ON |
+			I40E_GPIO_SET_HIGH | I40E_LED3_0); break;
+	}
+	switch (pf->ptp_pins->led3_1) {
+	case low: wr32(hw, I40E_GLGEN_GPIO_SET,I40E_DRIVE_SDP_ON |
+		       I40E_LED3_1); break;
+	case high: wr32(hw, I40E_GLGEN_GPIO_SET,I40E_DRIVE_SDP_ON |
+			I40E_GPIO_SET_HIGH | I40E_LED3_1); break;
+	}
+
+	dev_info(&pf->pdev->dev,
+		 "PTP configuration set to: SDP3_2: %s,  SDP3_3: %s,  GPIO_4: %s\n",
+		 i40e_ptp_gpio_pin_state2str[pins->sdp3_2],
+		 i40e_ptp_gpio_pin_state2str[pins->sdp3_3],
+		 i40e_ptp_gpio_pin_state2str[pins->gpio_4]);
+}
+
+/**
+ * i40e_ptp_set_pins - set PTP pins in HW
+ * @pf: Board private structure
+ * @pins: PTP pins to be applied
+ *
+ * Validate and set PTP pins in HW for specific PF.
+ * Return 0 on success or negative value on error.
+ **/
+static int i40e_ptp_set_pins(struct i40e_pf *pf,
+			     struct i40e_ptp_pins_settings *pins)
+{
+	int i = 0;
+
+	if (!i40e_is_ptp_pin_dev(&pf->hw)) {
+		dev_warn(&pf->pdev->dev,
+			 "PTP external clock not supported.\n");
+		return -ENOTSUPP;
+	}
+
+	if (!pf->ptp_pins || pf->hw.pf_id) {
+		dev_warn(&pf->pdev->dev,
+			 "PTP PIN setting allowed for PF0 only.\n");
+		return -ENOTSUPP;
+	}
+
+	if (pins->sdp3_2 == invalid)
+		pins->sdp3_2 = pf->ptp_pins->sdp3_2;
+	if (pins->sdp3_3 == invalid)
+		pins->sdp3_3 = pf->ptp_pins->sdp3_3;
+	if (pins->gpio_4 == invalid)
+		pins->gpio_4 = pf->ptp_pins->gpio_4;
+	while (i40e_ptp_pin_led_allowed_states[i].sdp3_2 != end) {
+		if (pins->sdp3_2 == i40e_ptp_pin_led_allowed_states[i].sdp3_2 &&
+		    pins->sdp3_3 == i40e_ptp_pin_led_allowed_states[i].sdp3_3 &&
+		    pins->gpio_4 == i40e_ptp_pin_led_allowed_states[i].gpio_4) {
+			pins->led2_0 = i40e_ptp_pin_led_allowed_states[i].led2_0;
+			pins->led2_1 = i40e_ptp_pin_led_allowed_states[i].led2_1;
+			pins->led3_0 = i40e_ptp_pin_led_allowed_states[i].led3_0;
+			pins->led3_1 = i40e_ptp_pin_led_allowed_states[i].led3_1;
+			break;
+		}
+		i++;
+	}
+	if (i40e_ptp_pin_led_allowed_states[i].sdp3_2 == end) {
+		dev_warn(&pf->pdev->dev,
+			 "Unsupported PTP pin configuration: SDP3_2: %s,  SDP3_3: %s,  GPIO_4: %s\n",
+			 i40e_ptp_gpio_pin_state2str[pins->sdp3_2],
+			 i40e_ptp_gpio_pin_state2str[pins->sdp3_3],
+			 i40e_ptp_gpio_pin_state2str[pins->gpio_4]);
+
+		return -EPERM;
+	}
+	memcpy(pf->ptp_pins, pins, sizeof(*pins));
+	i40e_ptp_set_pins_hw(pf);
+	i40_ptp_reset_timing_events(pf);
+
+	return 0;
+}
+
+/**
+ * i40e_ptp_set_pins_ioctl - ioctl interface to set the HW timestamping
+ *                           gpio pins
+ * @pf: board private structure
+ * @ifr: ioctl data
+ *
+ * Set the current hardware timestamping pins for current PF.
+ **/
+int i40e_ptp_set_pins_ioctl(struct i40e_pf *pf, struct ifreq *ifr)
+{
+	struct i40e_ptp_pins_settings pins;
+	int err;
+
+	if (!i40e_is_ptp_pin_dev(&pf->hw)) {
+		dev_warn(&pf->pdev->dev,
+			 "PTP external clock not supported.\n");
+		return -ENOTSUPP;
+	}
+
+	if (!pf->ptp_pins || pf->hw.pf_id) {
+		dev_warn(&pf->pdev->dev,
+			 "PTP PIN setting allowed for PF0 only.\n");
+		return -ENOTSUPP;
+	}
+
+	err = copy_from_user(&pins, ifr->ifr_data, sizeof(pins));
+	if (err) {
+		dev_warn(&pf->pdev->dev, "Cannot read user data during SIOCSPINS ioctl\n");
+		return -EIO;
+	}
+
+	return i40e_ptp_set_pins(pf, &pins);
+}
+
+/**
+ * i40e_ptp_alloc_pins - allocate PTP pins structure
+ * @pf: Board private structure
+ *
+ * allocate PTP pins structure
+ **/
+int i40e_ptp_alloc_pins(struct i40e_pf *pf)
+{
+	dev_info(&pf->pdev->dev,
+		"PTP subsystem device ID: %d\n", pf->hw.subsystem_device_id);
+
+	if (pf->hw.pf_id || !i40e_is_ptp_pin_dev(&pf->hw))
+		return 0;
+
+	pf->ptp_pins = kzalloc(sizeof(struct i40e_ptp_pins_settings), GFP_KERNEL);
+
+	if (!pf->ptp_pins) {
+		dev_warn(&pf->pdev->dev, "Cannot allocate memory for PTP pins structure\n");
+		return -I40E_ERR_NO_MEMORY;
+	}
+
+	pf->ptp_pins->sdp3_2 = off;
+	pf->ptp_pins->sdp3_3 = off;
+	pf->ptp_pins->gpio_4 = off;
+	pf->ptp_pins->led2_0 = high;
+	pf->ptp_pins->led2_1 = high;
+	pf->ptp_pins->led3_0 = high;
+	pf->ptp_pins->led3_1 = high;
+
+	i40e_ptp_set_pins_hw(pf);
+
+	return 0;
+}
+
+
+/**
+ * i40e_ptp_get_pins - ioctl interface to read the HW timestamping gpio pins
+ * @pf: Board private structure
+ * @ifr: ioctl data
+ *
+ * Obtain the current hardware timestamping gpio pins settigs as requested.
+ **/
+int i40e_ptp_get_pins(struct i40e_pf *pf, struct ifreq *ifr)
+{
+	if (!i40e_is_ptp_pin_dev(&pf->hw)) {
+		dev_warn(&pf->pdev->dev,
+                         "PTP external clock not supported.\n");
+		return -ENOTSUPP;
+	}
+
+	if (!pf->ptp_pins || pf->hw.pf_id) {
+		dev_warn(&pf->pdev->dev,
+			 "PTP PIN reading allowed for PF0 only.\n");
+		return -ENOTSUPP;
+	}
+
+	return copy_to_user(ifr->ifr_data, pf->ptp_pins,
+			            sizeof(*(pf->ptp_pins)))
+	       ? -EFAULT
+	       : 0;
+}
+
 /**
  * i40e_ptp_set_timestamp_mode - setup hardware for requested timestamp mode
  * @pf: Board private structure
@@ -541,6 +1213,21 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
 	struct i40e_hw *hw = &pf->hw;
 	u32 tsyntype, regval;
 
+	/* Selects external trigger to cause event */
+	regval = rd32(hw, I40E_PRTTSYN_AUX_0(0));
+	/* Bit 17:16 is EVNTLVL, 01B rising edge */
+	regval &= 0;
+	regval |= (1 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT);
+	/* regval: 0001 0000 0000 0000 0000 */
+	wr32(hw, I40E_PRTTSYN_AUX_0(0), regval);
+
+	/* Enabel interrupts */
+	regval = rd32(hw, I40E_PRTTSYN_CTL0);
+	regval |= 1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT;
+	wr32(hw, I40E_PRTTSYN_CTL0, regval);
+
+	INIT_WORK(&pf->ptp_extts0_work, i40e_ptp_extts0_work);
+
 	/* Reserved for future extensions. */
 	if (config->flags)
 		return -EINVAL;
@@ -599,7 +1286,9 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
 		}
 		break;
+#ifdef HAVE_HWTSTAMP_FILTER_NTP_ALL
 	case HWTSTAMP_FILTER_NTP_ALL:
+#endif /* HAVE_HWTSTAMP_FILTER_NTP_ALL */
 	case HWTSTAMP_FILTER_ALL:
 	default:
 		return -ERANGE;
@@ -650,7 +1339,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
 /**
  * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
  * @pf: Board private structure
- * @ifreq: ioctl data
+ * @ifr: ioctl data
  *
  * Respond to the user filter requests and make the appropriate hardware
  * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
@@ -683,6 +1372,41 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
 		-EFAULT : 0;
 }
 
+static int i40e_init_pin_config(struct i40e_pf *pf)
+{
+	int i;
+
+	if (pf->hw.pf_id != 0)
+		return -ENOTSUPP;
+
+	pf->ptp_caps.n_pins = 3;
+	pf->ptp_caps.n_ext_ts = 2;
+	pf->ptp_caps.pps = 1;
+	pf->ptp_caps.n_per_out = 2;
+
+	pf->ptp_caps.pin_config = kcalloc(pf->ptp_caps.n_pins,
+					  sizeof(*pf->ptp_caps.pin_config),
+					  GFP_KERNEL);
+	if (!pf->ptp_caps.pin_config)
+		return -ENOMEM;
+
+	for (i = 0; i < pf->ptp_caps.n_pins; i++) {
+		snprintf(pf->ptp_caps.pin_config[i].name,
+			 sizeof(pf->ptp_caps.pin_config[i].name),
+			 "%s", sdp_desc[i].name);
+		pf->ptp_caps.pin_config[i].index = sdp_desc[i].index;
+		pf->ptp_caps.pin_config[i].func = PTP_PF_NONE;
+		pf->ptp_caps.pin_config[i].chan = sdp_desc[i].chan;
+	}
+
+	pf->ptp_caps.verify = i40e_ptp_verify;
+	pf->ptp_caps.enable = i40e_ptp_feature_enable;
+
+	pf->ptp_caps.pps = 1;
+
+	return 0;
+}
+
 /**
  * i40e_ptp_create_clock - Create PTP clock device for userspace
  * @pf: Board private structure
@@ -695,20 +1419,29 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
  **/
 static long i40e_ptp_create_clock(struct i40e_pf *pf)
 {
+	int err;
 	/* no need to create a clock device if we already have one */
 	if (!IS_ERR_OR_NULL(pf->ptp_clock))
 		return 0;
 
-	strncpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name));
+	strlcpy(pf->ptp_caps.name, i40e_driver_name,
+		sizeof(pf->ptp_caps.name) - 1);
 	pf->ptp_caps.owner = THIS_MODULE;
 	pf->ptp_caps.max_adj = 999999999;
-	pf->ptp_caps.n_ext_ts = 0;
-	pf->ptp_caps.pps = 0;
 	pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
 	pf->ptp_caps.adjtime = i40e_ptp_adjtime;
+#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
 	pf->ptp_caps.gettime64 = i40e_ptp_gettime;
 	pf->ptp_caps.settime64 = i40e_ptp_settime;
-	pf->ptp_caps.enable = i40e_ptp_feature_enable;
+#else
+	pf->ptp_caps.gettime = i40e_ptp_gettime32;
+	pf->ptp_caps.settime = i40e_ptp_settime32;
+#endif
+	if (i40e_is_ptp_pin_dev(&pf->hw)) {
+		err = i40e_init_pin_config(pf);
+		if (err)
+			return err;
+	}
 
 	/* Attempt to register the clock before enabling the hardware. */
 	pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
@@ -722,9 +1455,202 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
 	pf->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
 	pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
 
+	/* Set the previous "reset" time to the current Kernel clock time */
+	pf->ptp_prev_hw_time = ktime_to_timespec64(ktime_get_real());
+	pf->ptp_reset_start = ktime_get();
+
 	return 0;
 }
 
+/**
+ * i40e_ptp_save_hw_time - Save the current PTP time as ptp_prev_hw_time
+ * @pf: Board private structure
+ *
+ * Read the current PTP time and save it into pf->ptp_prev_hw_time. This should
+ * be called at the end of preparing to reset, just before hardware reset
+ * occurs, in order to preserve the PTP time as close as possible across
+ * resets.
+ */
+void i40e_ptp_save_hw_time(struct i40e_pf *pf)
+{
+	/* don't try to access the PTP clock if it's not enabled */
+	if (!(pf->flags & I40E_FLAG_PTP))
+		return;
+
+	i40e_ptp_gettime(&pf->ptp_caps, &pf->ptp_prev_hw_time);
+	/* Get a monotonic starting time for this reset */
+	pf->ptp_reset_start = ktime_get();
+}
+
+/**
+ * i40e_ptp_restore_hw_time - Restore the ptp_prev_hw_time + delta to PTP regs
+ * @pf: Board private structure
+ *
+ * Restore the PTP hardware clock registers. We previously cached the PTP
+ * hardware time as pf->ptp_prev_hw_time. To be as accurate as possible,
+ * update this value based on the time delta since the time was saved, using
+ * CLOCK_MONOTONIC (via ktime_get()) to calculate the time difference.
+ *
+ * This ensures that the hardware clock is restored to nearly what it should
+ * have been if a reset had not occurred.
+ */
+void i40e_ptp_restore_hw_time(struct i40e_pf *pf)
+{
+	ktime_t delta = ktime_sub(ktime_get(), pf->ptp_reset_start);
+
+	/* Update the previous HW time with the ktime delta */
+	timespec64_add_ns(&pf->ptp_prev_hw_time, ktime_to_ns(delta));
+
+	/* Restore the hardware clock registers */
+	i40e_ptp_settime(&pf->ptp_caps, &pf->ptp_prev_hw_time);
+}
+
+#ifndef HAVE_PTP_1588_CLOCK_PINS
+/**
+ * __get_pf_pdev - helper function to get the pdev
+ * @kobj:	kobject passed
+ * @pdev:	PCI device information struct
+ */
+static int __get_pf_pdev(struct kobject *kobj, struct pci_dev **pdev)
+{
+	struct device *dev;
+
+	if (!kobj->parent)
+		return -EINVAL;
+
+	/* get pdev */
+	dev = to_dev(kobj->parent);
+	*pdev = to_pci_dev(dev);
+
+	return 0;
+}
+
+/**
+ * i40e_ptp_pins_to_num - convert PTP pins to integer number
+ * @pf:	PCI physical function
+ *
+ * Return PTP pins states from pf as integer number.
+ **/
+static unsigned int i40e_ptp_pins_to_num(struct i40e_pf *pf)
+{
+	return pf->ptp_pins->gpio_4 +
+	       pf->ptp_pins->sdp3_3 * 10 +
+	       pf->ptp_pins->sdp3_2 * 100;
+}
+
+/**
+ * i40e_ptp_set_pins_str - wrapper to set PTP pins in HW from string
+ * @pf: Board private structure
+ * @buf: string with PTP pins to be applied
+ * @count: length of a buf argument
+ *
+ * Set the current hardware timestamping pins for current PF.
+ * Return 0 on success and negative value on error.
+ **/
+static int i40e_ptp_set_pins_str(struct i40e_pf *pf, const char* buf,
+				 int count)
+{
+	struct i40e_ptp_pins_settings pins;
+	const int PIN_STR_LEN = 4;
+	unsigned long res;
+
+	if (count != PIN_STR_LEN || kstrtoul(buf, 10, &res))
+		return -EINVAL;
+
+	pins.sdp3_2 = res / 100 % 10;
+	pins.sdp3_3 = res / 10 % 10;
+	pins.gpio_4 = res % 10;
+
+	if (pins.sdp3_2 > out_B ||
+	    pins.sdp3_3 > out_B ||
+	    pins.gpio_4 > out_B)
+		return -EINVAL;
+
+	return i40e_ptp_set_pins(pf, &pins) ? -EINVAL : count;
+}
+
+
+
+/**
+ * i40e_sysfs_ptp_pins_read - sysfs interface for reading PTP pins status
+ * @kobj:  sysfs node
+ * @attr:  sysfs node attributes
+ * @buf:   string representing PTP pins
+ *
+ * Return number of bytes read on success or negative value on failure.
+ **/
+static ssize_t i40e_sysfs_ptp_pins_read(struct kobject *kobj,
+					struct kobj_attribute *attr,
+					char *buf)
+{
+        struct pci_dev *pdev;
+        struct i40e_pf *pf;
+	unsigned int pins;
+
+        if(__get_pf_pdev(kobj, &pdev))
+                return -EPERM;
+
+        pf = pci_get_drvdata(pdev);
+	pins = i40e_ptp_pins_to_num(pf);
+
+	dev_info(&pf->pdev->dev,
+		 "PTP pins: SDP3_2: %s, SDP3_3: %s, GPIO_4: %s\n",
+		 i40e_ptp_gpio_pin_state2str[pf->ptp_pins->sdp3_2],
+		 i40e_ptp_gpio_pin_state2str[pf->ptp_pins->sdp3_3],
+		 i40e_ptp_gpio_pin_state2str[pf->ptp_pins->gpio_4]);
+
+        return sprintf(buf, "%.3d\n", pins);
+}
+
+/**
+ * i40e_sysfs_ptp_pins_write - sysfs interface for setting PTP pins in HW
+ * @kobj:  sysfs node
+ * @attr:  sysfs node attributes
+ * @buf:   string representing PTP pins
+ * @count: length of a 'buf' string
+ *
+ * Return number of bytes written on success or negative value on failure.
+ **/
+static ssize_t i40e_sysfs_ptp_pins_write(struct kobject *kobj,
+					 struct kobj_attribute *attr,
+					 const char *buf, size_t count)
+{
+	struct pci_dev *pdev;
+	struct i40e_pf *pf;
+
+	if(__get_pf_pdev(kobj, &pdev))
+		return -EPERM;
+
+	pf = pci_get_drvdata(pdev);
+
+	return i40e_ptp_set_pins_str(pf, buf, count);
+}
+
+/**
+ * i40e_ptp_pins_sysfs_init - initialize sysfs for PTP pins
+ * @pf: board private structure
+ *
+ * Initialize sysfs for handling PTP timestamping pins in HW.
+ **/
+static void i40e_ptp_pins_sysfs_init(struct i40e_pf *pf)
+{
+	if (pf->hw.pf_id != 0 || !i40e_is_ptp_pin_dev(&pf->hw))
+		return;
+
+	pf->ptp_kobj = kobject_create_and_add("ptp_pins", &pf->pdev->dev.kobj);
+	if(!pf->ptp_kobj) {
+		dev_info(&pf->pdev->dev, "Failed to create ptp_pins kobject\n");
+		return;
+	}
+
+	if (sysfs_create_file(pf->ptp_kobj, &ptp_pins_attribute.attr)) {
+		dev_info(&pf->pdev->dev, "Failed to create PTP pins kobject\n");
+		kobject_put(pf->ptp_kobj);
+		return;
+	}
+}
+#endif /* HAVE_PTP_1588_CLOCK_PINS */
+
 /**
  * i40e_ptp_init - Initialize the 1588 support after device probe or reset
  * @pf: Board private structure
@@ -732,10 +1658,14 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
  * This function sets device up for 1588 support. The first time it is run, it
  * will create a PHC clock device. It does not create a clock device if one
  * already exists. It also reconfigures the device after a reset.
+ *
+ * The first time a clock is created, i40e_ptp_create_clock will set
+ * pf->ptp_prev_hw_time to the current system time. During resets, it is
+ * expected that this timespec will be set to the last known PTP clock time,
+ * in order to preserve the clock time as close as possible across a reset.
  **/
 void i40e_ptp_init(struct i40e_pf *pf)
 {
-	struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
 	struct i40e_hw *hw = &pf->hw;
 	u32 pf_id;
 	long err;
@@ -747,9 +1677,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
 		I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
 	if (hw->pf_id != pf_id) {
 		pf->flags &= ~I40E_FLAG_PTP;
-		dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n",
-			 __func__,
-			 netdev->name);
+		dev_info(&pf->pdev->dev, "PTP not supported on this device\n");
 		return;
 	}
 
@@ -760,10 +1688,9 @@ void i40e_ptp_init(struct i40e_pf *pf)
 	err = i40e_ptp_create_clock(pf);
 	if (err) {
 		pf->ptp_clock = NULL;
-		dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
-			__func__);
-	} else if (pf->ptp_clock) {
-		struct timespec64 ts;
+		dev_err(&pf->pdev->dev,
+			"PTP clock register failed: %ld\n", err);
+	} else {
 		u32 regval;
 
 		if (pf->hw.debug_mask & I40E_DEBUG_LAN)
@@ -784,10 +1711,15 @@ void i40e_ptp_init(struct i40e_pf *pf)
 		/* reset timestamping mode */
 		i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
 
-		/* Set the clock value. */
-		ts = ktime_to_timespec64(ktime_get_real());
-		i40e_ptp_settime(&pf->ptp_caps, &ts);
+		/* Restore the clock time based on last known value */
+		i40e_ptp_restore_hw_time(pf);
 	}
+
+#ifndef HAVE_PTP_1588_CLOCK_PINS
+	i40e_ptp_pins_sysfs_init(pf);
+#endif /* HAVE_PTP_1588_CLOCK_PINS */
+
+	i40e_ptp_set_1pps_signal_hw(pf);
 }
 
 /**
@@ -799,20 +1731,42 @@ void i40e_ptp_init(struct i40e_pf *pf)
  **/
 void i40e_ptp_stop(struct i40e_pf *pf)
 {
+	struct i40e_hw *hw = &pf->hw;
+	u32 regval;
+
 	pf->flags &= ~I40E_FLAG_PTP;
 	pf->ptp_tx = false;
 	pf->ptp_rx = false;
 
 	if (pf->ptp_tx_skb) {
-		dev_kfree_skb_any(pf->ptp_tx_skb);
+		struct sk_buff *skb = pf->ptp_tx_skb;
+
 		pf->ptp_tx_skb = NULL;
 		clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
+		dev_kfree_skb_any(skb);
 	}
 
 	if (pf->ptp_clock) {
 		ptp_clock_unregister(pf->ptp_clock);
 		pf->ptp_clock = NULL;
-		dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__,
+		dev_info(&pf->pdev->dev, "removed PHC from %s\n",
 			 pf->vsi[pf->lan_vsi]->netdev->name);
 	}
+
+	/* Set GPIO4 as an input */
+	wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_SDP3_2), 0x0);
+	wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_SDP3_3), 0x0);
+	wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_GPIO_4), 0x0);
+
+	regval = rd32(hw, I40E_PRTTSYN_AUX_0(0));
+	regval &= ~I40E_PRTTSYN_AUX_0_PTPFLAG_MASK;
+	wr32(hw, I40E_PRTTSYN_AUX_0(0), regval);
+
+	/* Disable interrupts */
+	regval = rd32(hw, I40E_PRTTSYN_CTL0);
+	regval &= I40E_PRTTSYN_CTL0_FFFB_MASK;
+	wr32(hw, I40E_PRTTSYN_CTL0, regval);
+
+	i40e_ptp_free_pins(pf);
 }
+#endif /* HAVE_PTP_1588_CLOCK */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 86ca27f72..955611c14 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1,51 +1,28 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifndef _I40E_REGISTER_H_
 #define _I40E_REGISTER_H_
 
-#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAH              0x000801C0 /* Reset: EMPR */
 #define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
-#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAH_ARQBAH_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
+#define I40E_GL_ARQBAL              0x000800C0 /* Reset: EMPR */
 #define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
-#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAL_ARQBAL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
+#define I40E_GL_ARQH            0x000803C0 /* Reset: EMPR */
 #define I40E_GL_ARQH_ARQH_SHIFT 0
-#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
-#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
+#define I40E_GL_ARQH_ARQH_MASK  I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
+#define I40E_GL_ARQT            0x000804C0 /* Reset: EMPR */
 #define I40E_GL_ARQT_ARQT_SHIFT 0
-#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
-#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
+#define I40E_GL_ARQT_ARQT_MASK  I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
+#define I40E_GL_ATQBAH              0x00080140 /* Reset: EMPR */
 #define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
-#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
+#define I40E_GL_ATQBAH_ATQBAH_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
+#define I40E_GL_ATQBAL              0x00080040 /* Reset: EMPR */
 #define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
-#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
+#define I40E_GL_ATQBAL_ATQBAL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
+#define I40E_GL_ATQH            0x00080340 /* Reset: EMPR */
 #define I40E_GL_ATQH_ATQH_SHIFT 0
 #define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
 #define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
@@ -81,7 +58,7 @@
 #define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
 #define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
 #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
 #define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
 #define I40E_PF_ARQT_ARQT_SHIFT 0
 #define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
@@ -104,7 +81,7 @@
 #define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
 #define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
 #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
 #define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
 #define I40E_PF_ATQT_ATQT_SHIFT 0
 #define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
@@ -131,7 +108,7 @@
 #define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
 #define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
 #define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
 #define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQT_MAX_INDEX 127
 #define I40E_VF_ARQT_ARQT_SHIFT 0
@@ -159,7 +136,7 @@
 #define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
 #define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
 #define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
 #define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQT_MAX_INDEX 127
 #define I40E_VF_ATQT_ATQT_SHIFT 0
@@ -282,7 +259,7 @@
 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
 #define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
 #define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
 #define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
 #define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
@@ -386,6 +363,14 @@
 #define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
 #define I40E_GL_FWSTS_FWS1B_SHIFT 16
 #define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_EMPR_0 I40E_MASK(0x20, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_EMPR_10 I40E_MASK(0x2A, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT)
 #define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
 #define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
 #define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
@@ -526,7 +511,7 @@
 #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
 #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
 #define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MSRWD_MAX_INDEX 3
 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
@@ -1265,14 +1250,14 @@
 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
 #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
 #define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
 #define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
 #define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
 #define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
 #define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
 #define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
 #define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
 #define I40E_QRX_ENA_MAX_INDEX 1535
 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0
@@ -1681,7 +1666,7 @@
 #define I40E_GLNVM_SRCTL_START_SHIFT 30
 #define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
 #define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
 #define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
 #define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
 #define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
@@ -2794,7 +2779,7 @@
 #define I40E_GLV_RUPP_MAX_INDEX 383
 #define I40E_GLV_RUPP_RUPP_SHIFT 0
 #define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_TEPC_MAX_INDEX 383
 #define I40E_GLV_TEPC_TEPC_SHIFT 0
 #define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
@@ -2914,6 +2899,9 @@
 #define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
 #define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
 #define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PTPFLAG_SHIFT 17
+#define I40E_PRTTSYN_AUX_0_PTPFLAG_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_PTPFLAG_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PTP_OUT_SYNC_CLK_IO 0xF
 #define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
 #define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
@@ -3048,7 +3036,7 @@
 #define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
 #define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
 #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
 #define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VP_MDET_RX_MAX_INDEX 127
 #define I40E_VP_MDET_RX_VALID_SHIFT 0
@@ -3184,7 +3172,7 @@
 #define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
 #define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
 #define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
 #define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
 #define I40E_VF_ARQT1_ARQT_SHIFT 0
 #define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
@@ -3207,7 +3195,7 @@
 #define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
 #define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
 #define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
 #define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
 #define I40E_VF_ATQT1_ATQT_SHIFT 0
 #define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
@@ -5290,6 +5278,87 @@
 #define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
 #define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
 #define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
+/* Flow Director */
+#define I40E_REG_INSET_L2_DMAC_SHIFT 60
+#define I40E_REG_INSET_L2_DMAC_MASK I40E_MASK(0xEULL, I40E_REG_INSET_L2_DMAC_SHIFT)
+#define I40E_REG_INSET_L2_SMAC_SHIFT 56
+#define I40E_REG_INSET_L2_SMAC_MASK I40E_MASK(0x1CULL, I40E_REG_INSET_L2_SMAC_SHIFT)
+#define I40E_REG_INSET_L2_OUTER_VLAN_SHIFT 26
+#define I40E_REG_INSET_L2_OUTER_VLAN_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L2_OUTER_VLAN_SHIFT)
+#define I40E_REG_INSET_L2_INNER_VLAN_SHIFT 55
+#define I40E_REG_INSET_L2_INNER_VLAN_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L2_INNER_VLAN_SHIFT)
+#define I40E_REG_INSET_TUNNEL_VLAN_SHIFT 56
+#define I40E_REG_INSET_TUNNEL_VLAN_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_TUNNEL_VLAN_SHIFT)
+#define I40E_REG_INSET_L3_SRC_IP4_SHIFT 47
+#define I40E_REG_INSET_L3_SRC_IP4_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_L3_SRC_IP4_SHIFT)
+#define I40E_REG_INSET_L3_DST_IP4_SHIFT 35
+#define I40E_REG_INSET_L3_DST_IP4_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_L3_DST_IP4_SHIFT)
+#define I40E_X722_REG_INSET_L3_SRC_IP4_SHIFT 49
+#define I40E_X722_REG_INSET_L3_SRC_IP4_MASK I40E_MASK(0x3ULL, I40E_X722_REG_INSET_L3_SRC_IP4_SHIFT)
+#define I40E_X722_REG_INSET_L3_DST_IP4_SHIFT 41
+#define I40E_X722_REG_INSET_L3_DST_IP4_MASK I40E_MASK(0x3ULL, I40E_X722_REG_INSET_L3_DST_IP4_SHIFT)
+#define I40E_X722_REG_INSET_L3_IP4_PROTO_SHIFT 52
+#define I40E_X722_REG_INSET_L3_IP4_PROTO_MASK I40E_MASK(0x1ULL, I40E_X722_REG_INSET_L3_IP4_PROTO_SHIFT)
+#define I40E_X722_REG_INSET_L3_IP4_TTL_SHIFT 52
+#define I40E_X722_REG_INSET_L3_IP4_TTL_MASK I40E_MASK(0x1ULL, I40E_X722_REG_INSET_L3_IP4_TTL_SHIFT)
+#define I40E_REG_INSET_L3_IP4_TOS_SHIFT 54
+#define I40E_REG_INSET_L3_IP4_TOS_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP4_TOS_SHIFT)
+#define I40E_REG_INSET_L3_IP4_PROTO_SHIFT 50
+#define I40E_REG_INSET_L3_IP4_PROTO_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP4_PROTO_SHIFT)
+#define I40E_REG_INSET_L3_IP4_TTL_SHIFT 50
+#define I40E_REG_INSET_L3_IP4_TTL_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP4_TTL_SHIFT)
+#define I40E_REG_INSET_L3_SRC_IP6_SHIFT 43
+#define I40E_REG_INSET_L3_SRC_IP6_MASK I40E_MASK(0xFFULL, I40E_REG_INSET_L3_SRC_IP6_SHIFT)
+#define I40E_REG_INSET_L3_DST_IP6_SHIFT 35
+#define I40E_REG_INSET_L3_DST_IP6_MASK I40E_MASK(0xFFULL, I40E_REG_INSET_L3_DST_IP6_SHIFT)
+#define I40E_REG_INSET_L3_IP6_TC_SHIFT 54
+#define I40E_REG_INSET_L3_IP6_TC_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP6_TC_SHIFT)
+#define I40E_REG_INSET_L3_IP6_NEXT_HDR_SHIFT 51
+#define I40E_REG_INSET_L3_IP6_NEXT_HDR_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP6_NEXT_HDR_SHIFT)
+#define I40E_REG_INSET_L3_IP6_HOP_LIMIT_SHIFT 51
+#define I40E_REG_INSET_L3_IP6_HOP_LIMIT_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP6_HOP_LIMIT_SHIFT)
+#define I40E_REG_INSET_L4_SRC_PORT_SHIFT 34
+#define I40E_REG_INSET_L4_SRC_PORT_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L4_SRC_PORT_SHIFT)
+#define I40E_REG_INSET_L4_DST_PORT_SHIFT 33
+#define I40E_REG_INSET_L4_DST_PORT_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L4_DST_PORT_SHIFT)
+#define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG_SHIFT 31
+#define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG_SHIFT)
+#define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC_SHIFT 22
+#define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC_MASK I40E_MASK(0x7ULL, I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC_SHIFT)
+#define I40E_REG_INSET_TUNNEL_L2_INNER_SRC_MAC_SHIFT 11
+#define I40E_REG_INSET_TUNNEL_L2_INNER_SRC_MAC_MASK I40E_MASK(0x7ULL, I40E_REG_INSET_TUNNEL_L2_INNER_SRC_MAC_SHIFT)
+#define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT_SHIFT 21
+#define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT_SHIFT)
+#define I40E_REG_INSET_TUNNEL_ID_SHIFT 18
+#define I40E_REG_INSET_TUNNEL_ID_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_TUNNEL_ID_SHIFT)
+#define I40E_REG_INSET_LAST_ETHER_TYPE_SHIFT 14
+#define I40E_REG_INSET_LAST_ETHER_TYPE_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_LAST_ETHER_TYPE_SHIFT)
+#define I40E_REG_INSET_TUNNEL_L3_SRC_IP4_SHIFT 8
+#define I40E_REG_INSET_TUNNEL_L3_SRC_IP4_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_TUNNEL_L3_SRC_IP4_SHIFT)
+#define I40E_REG_INSET_TUNNEL_L3_DST_IP4_SHIFT 6
+#define I40E_REG_INSET_TUNNEL_L3_DST_IP4_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_TUNNEL_L3_DST_IP4_SHIFT)
+#define I40E_REG_INSET_TUNNEL_L3_DST_IP6_SHIFT 6
+#define I40E_REG_INSET_TUNNEL_L3_DST_IP6_MASK I40E_MASK(0xFFULL, I40E_REG_INSET_TUNNEL_L3_DST_IP6_SHIFT)
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD1_SHIFT 13
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD1_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD1_SHIFT)
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD2_SHIFT 12
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD2_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD2_SHIFT)
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD3_SHIFT 11
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD3_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD3_SHIFT)
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD4_SHIFT 10
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD4_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD4_SHIFT)
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD5_SHIFT 9
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD5_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD5_SHIFT)
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD6_SHIFT 8
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD6_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD6_SHIFT)
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD7_SHIFT 7
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD7_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD7_SHIFT)
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD8_SHIFT 6
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD8_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD8_SHIFT)
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORDS_SHIFT 6
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORDS_MASK I40E_MASK(0xFFULL, I40E_REG_INSET_FLEX_PAYLOAD_WORDS_SHIFT)
+#define I40E_REG_INSET_MASK_DEFAULT 0x0000000000000000ULL
+
 #define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
 #define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
@@ -5350,4 +5419,5 @@
 #define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
 #define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
 #define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+
 #endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 5f9cac55a..ab12f1311 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifndef _I40E_STATUS_H_
 #define _I40E_STATUS_H_
@@ -95,6 +72,7 @@ enum i40e_status_code {
 	I40E_ERR_NOT_READY			= -63,
 	I40E_NOT_SUPPORTED			= -64,
 	I40E_ERR_FIRMWARE_API_VERSION		= -65,
+	I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR	= -66,
 };
 
 #endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
index d3e55f54a..27e47ccf3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -1,29 +1,23 @@
-/*******************************************************************************
- *
- * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
-/* Modeled on trace-events-sample.h */
+#ifndef CONFIG_TRACEPOINTS
+#if !defined(_I40E_TRACE_H_)
+#define _I40E_TRACE_H_
+/* If the Linux kernel tracepoints are not available then the i40e_trace*
+ * macros become nops.
+ */
 
-/* The trace subsystem name for i40e will be "i40e".
+#define i40e_trace(trace_name, args...)
+#define i40e_trace_enabled(trace_name) (0)
+#endif /* !defined(_I40E_TRACE_H_) */
+#else /* CONFIG_TRACEPOINTS */
+/*
+ * Modeled on trace-events-sample.h
+ */
+
+/*
+ * The trace subsystem name for i40e will be "i40e".
  *
  * This file is named i40e_trace.h.
  *
@@ -34,7 +28,8 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM i40e
 
-/* See trace-events-sample.h for a detailed description of why this
+/*
+ * See trace-events-sample.h for a detailed description of why this
  * guard clause is different from most normal include files.
  */
 #if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
@@ -69,7 +64,8 @@
 
 #define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)()
 
-/* Events common to PF and VF. Corresponding versions will be defined
+/*
+ * Events common to PF and VF. Corresponding versions will be defined
  * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace()
  * macro above will select the right trace point name for the driver
  * being built from shared code.
@@ -85,7 +81,8 @@ DECLARE_EVENT_CLASS(
 
 	TP_ARGS(ring, desc, buf),
 
-	/* The convention here is to make the first fields in the
+	/*
+	 * The convention here is to make the first fields in the
 	 * TP_STRUCT match the TP_PROTO exactly. This enables the use
 	 * of the args struct generated by the tplist tool (from the
 	 * bcc-tools package) to be used for those fields. To access
@@ -132,7 +129,7 @@ DECLARE_EVENT_CLASS(
 	i40e_rx_template,
 
 	TP_PROTO(struct i40e_ring *ring,
-		 union i40e_32byte_rx_desc *desc,
+		 union i40e_rx_desc *desc,
 		 struct sk_buff *skb),
 
 	TP_ARGS(ring, desc, skb),
@@ -160,7 +157,7 @@ DECLARE_EVENT_CLASS(
 DEFINE_EVENT(
 	i40e_rx_template, i40e_clean_rx_irq,
 	TP_PROTO(struct i40e_ring *ring,
-		 union i40e_32byte_rx_desc *desc,
+		 union i40e_rx_desc *desc,
 		 struct sk_buff *skb),
 
 	TP_ARGS(ring, desc, skb));
@@ -168,7 +165,7 @@ DEFINE_EVENT(
 DEFINE_EVENT(
 	i40e_rx_template, i40e_clean_rx_irq_rx,
 	TP_PROTO(struct i40e_ring *ring,
-		 union i40e_32byte_rx_desc *desc,
+		 union i40e_rx_desc *desc,
 		 struct sk_buff *skb),
 
 	TP_ARGS(ring, desc, skb));
@@ -213,7 +210,9 @@ DEFINE_EVENT(
 
 	TP_ARGS(skb, ring));
 
-/* Events unique to the PF. */
+/*
+ * Events unique to the PF.
+ */
 
 #endif /* _I40E_TRACE_H_ */
 /* This must be outside ifdef _I40E_TRACE_H */
@@ -227,3 +226,4 @@ DEFINE_EVENT(
 #undef TRACE_INCLUDE_FILE
 #define TRACE_INCLUDE_FILE i40e_trace
 #include <trace/define_trace.h>
+#endif /* CONFIG_TRACEPOINTS */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 3c07ff171..96bc531ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1,32 +1,10 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #include <linux/prefetch.h>
-#include <net/busy_poll.h>
-#include <linux/bpf_trace.h>
+#ifdef HAVE_XDP_SUPPORT
+#include <net/xdp.h>
+#endif
 #include "i40e.h"
 #include "i40e_trace.h"
 #include "i40e_prototype.h"
@@ -67,9 +45,6 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
 	flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
 		     (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
 
-	flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
-		      (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
-
 	flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
 		      (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
 
@@ -152,6 +127,7 @@ static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
 	/* grab the next descriptor */
 	i = tx_ring->next_to_use;
 	first = &tx_ring->tx_bi[i];
+
 	i40e_fdir(tx_ring, fdir_data, add);
 
 	/* Now program a dummy descriptor */
@@ -192,7 +168,6 @@ dma_fail:
 }
 
 #define IP_HEADER_OFFSET 14
-#define I40E_UDPIP_DUMMY_PACKET_LEN 42
 /**
  * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
  * @vsi: pointer to the targeted VSI
@@ -264,7 +239,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
 	return 0;
 }
 
-#define I40E_TCPIP_DUMMY_PACKET_LEN 54
 /**
  * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
  * @vsi: pointer to the targeted VSI
@@ -285,7 +259,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
 	/* Dummy packet */
 	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
 		0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
-		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x50, 0x11,
 		0x0, 0x72, 0, 0, 0, 0};
 
 	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
@@ -334,7 +308,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
 		    I40E_DEBUG_FD & pf->hw.debug_mask)
 			dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
-		pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
+		set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
 	} else {
 		pf->fd_tcp4_filter_cnt--;
 	}
@@ -342,7 +316,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
 	return 0;
 }
 
-#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
 /**
  * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
  * a specific flow spec
@@ -416,7 +389,6 @@ static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
 	return 0;
 }
 
-#define I40E_IP_DUMMY_PACKET_LEN 34
 /**
  * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
  * a specific flow spec
@@ -465,9 +437,6 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
 			dev_info(&pf->pdev->dev,
 				 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
 				 fd_data->pctype, fd_data->fd_id, ret);
-			/* The packet buffer wasn't added to the ring so we
-			 * need to free it now.
-			 */
 			kfree(raw_packet);
 			return -EOPNOTSUPP;
 		} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
@@ -493,7 +462,7 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
 /**
  * i40e_add_del_fdir - Build raw packets to add/del fdir filter
  * @vsi: pointer to the targeted VSI
- * @cmd: command to get or set RX flow classification rules
+ * @input: filter to add or delete
  * @add: true adds a filter, false removes it
  *
  **/
@@ -503,7 +472,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
 	struct i40e_pf *pf = vsi->back;
 	int ret;
 
-	switch (input->flow_type & ~FLOW_EXT) {
+	switch (input->flow_type) {
 	case TCP_V4_FLOW:
 		ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
 		break;
@@ -592,8 +561,14 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
 		pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
 
 		if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
-		    pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
-			pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
+		    test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
+			/* These set_bit() calls aren't atomic with the
+			 * test_bit() here, but worse case we potentially
+			 * disable ATR and queue a flush right after SB
+			 * support is re-enabled. That shouldn't cause an
+			 * issue in practice
+			 */
+			set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
 			set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
 		}
 
@@ -606,11 +581,10 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
 		 */
 		if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
 			if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
-			    !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) {
-				pf->flags |= I40E_FLAG_FD_SB_AUTO_DISABLED;
+			    !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
+					      pf->state))
 				if (I40E_DEBUG_FD & pf->hw.debug_mask)
 					dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
-			}
 		}
 	} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
@@ -630,8 +604,14 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
 	if (tx_buffer->skb) {
 		if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
 			kfree(tx_buffer->raw_buf);
+#ifdef HAVE_XDP_SUPPORT
 		else if (ring_is_xdp(ring))
+#ifdef HAVE_XDP_FRAME_STRUCT
+			xdp_return_frame(tx_buffer->xdpf);
+#else
 			page_frag_free(tx_buffer->raw_buf);
+#endif
+#endif
 		else
 			dev_kfree_skb_any(tx_buffer->skb);
 		if (dma_unmap_len(tx_buffer, len))
@@ -706,17 +686,23 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
 
 /**
  * i40e_get_tx_pending - how many tx descriptors not processed
- * @tx_ring: the ring of descriptors
+ * @ring: the ring of descriptors
+ * @in_sw: use SW variables
  *
  * Since there is no access to the ring head register
  * in XL710, we need to use our local copies
  **/
-u32 i40e_get_tx_pending(struct i40e_ring *ring)
+u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
 {
 	u32 head, tail;
 
-	head = i40e_get_head(ring);
-	tail = readl(ring->tail);
+	if (!in_sw) {
+		head = i40e_get_head(ring);
+		tail = readl(ring->tail);
+	} else {
+		head = ring->next_to_clean;
+		tail = ring->next_to_use;
+	}
 
 	if (head != tail)
 		return (head < tail) ?
@@ -725,6 +711,59 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring)
 	return 0;
 }
 
+/**
+ * i40e_detect_recover_hung - Function to detect and recover hung_queues
+ * @vsi:  pointer to vsi struct with tx queues
+ *
+ * VSI has netdev and netdev has TX queues. This function is to check each of
+ * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
+ **/
+void i40e_detect_recover_hung(struct i40e_vsi *vsi)
+{
+	struct i40e_ring *tx_ring = NULL;
+	struct net_device *netdev;
+	unsigned int i;
+	int packets;
+
+	if (!vsi)
+		return;
+
+	if (test_bit(__I40E_VSI_DOWN, vsi->state))
+		return;
+
+	netdev = vsi->netdev;
+	if (!netdev)
+		return;
+
+	if (!netif_carrier_ok(netdev))
+		return;
+
+	for (i = 0; i < vsi->num_queue_pairs; i++) {
+		tx_ring = vsi->tx_rings[i];
+		if (tx_ring && tx_ring->desc) {
+			/* If packet counter has not changed the queue is
+			 * likely stalled, so force an interrupt for this
+			 * queue.
+			 *
+			 * prev_pkt_ctr would be negative if there was no
+			 * pending work.
+			 */
+			packets = tx_ring->stats.packets & INT_MAX;
+			if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
+				i40e_force_wb(vsi, tx_ring->q_vector);
+				continue;
+			}
+
+			/* Memory barrier between read of packet count and call
+			 * to i40e_get_tx_pending()
+			 */
+			smp_rmb();
+			tx_ring->tx_stats.prev_pkt_ctr =
+			    i40e_get_tx_pending(tx_ring, true) ? packets : -1;
+		}
+	}
+}
+
 #define WB_STRIDE 4
 
 /**
@@ -738,7 +777,7 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring)
 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
 			      struct i40e_ring *tx_ring, int napi_budget)
 {
-	u16 i = tx_ring->next_to_clean;
+	int i = tx_ring->next_to_clean;
 	struct i40e_tx_buffer *tx_buf;
 	struct i40e_tx_desc *tx_head;
 	struct i40e_tx_desc *tx_desc;
@@ -759,7 +798,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		smp_rmb();
+		read_barrier_depends();
 
 		i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
 		/* we have caught up to head, no work left to do */
@@ -774,9 +813,15 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
 		total_packets += tx_buf->gso_segs;
 
 		/* free the skb/XDP data */
+#ifdef HAVE_XDP_SUPPORT
 		if (ring_is_xdp(tx_ring))
+#ifdef HAVE_XDP_FRAME_STRUCT
+			xdp_return_frame(tx_buf->xdpf);
+#else
 			page_frag_free(tx_buf->raw_buf);
+#endif
 		else
+#endif
 			napi_consume_skb(tx_buf->skb, napi_budget);
 
 		/* unmap skb header data */
@@ -844,7 +889,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
 		 * them to be written back in case we stay in NAPI.
 		 * In this mode on X722 we do not enable Interrupt.
 		 */
-		unsigned int j = i40e_get_tx_pending(tx_ring);
+		unsigned int j = i40e_get_tx_pending(tx_ring, false);
 
 		if (budget &&
 		    ((j / WB_STRIDE) == 0) && (j > 0) &&
@@ -902,8 +947,8 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
 		      I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
 
 		wr32(&vsi->back->hw,
-		     I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
-		     val);
+		    I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
+		    val);
 	} else {
 		val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
 		      I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
@@ -929,8 +974,7 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
 			  /* allow 00 to be written to the index */
 
 		wr32(&vsi->back->hw,
-		     I40E_PFINT_DYN_CTLN(q_vector->v_idx +
-					 vsi->base_vector - 1), val);
+		     I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
 	} else {
 		u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
 			  I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
@@ -942,101 +986,244 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
 	}
 }
 
+static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
+					struct i40e_ring_container *rc)
+{
+	return &q_vector->rx == rc;
+}
+
+static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
+{
+	unsigned int divisor;
+
+	switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
+	case I40E_LINK_SPEED_40GB:
+		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
+		break;
+	case I40E_LINK_SPEED_25GB:
+	case I40E_LINK_SPEED_20GB:
+		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
+		break;
+	default:
+	case I40E_LINK_SPEED_10GB:
+		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
+		break;
+	case I40E_LINK_SPEED_1GB:
+	case I40E_LINK_SPEED_100MB:
+		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
+		break;
+	}
+
+	return divisor;
+}
+
 /**
- * i40e_set_new_dynamic_itr - Find new ITR level
+ * i40e_update_itr - update the dynamic ITR value based on statistics
+ * @q_vector: structure containing interrupt and ring information
  * @rc: structure containing ring performance data
  *
- * Returns true if ITR changed, false if not
- *
- * Stores a new ITR value based on packets and byte counts during
- * the last interrupt.  The advantage of per interrupt computation
- * is faster updates and more accurate ITR for the current traffic
- * pattern.  Constants in this function were computed based on
- * theoretical maximum wire speed and thresholds were set based on
- * testing data as well as attempting to minimize response time
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt.  The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern.  Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
  * while increasing bulk throughput.
  **/
-static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+static void i40e_update_itr(struct i40e_q_vector *q_vector,
+			    struct i40e_ring_container *rc)
 {
-	enum i40e_latency_range new_latency_range = rc->latency_range;
-	u32 new_itr = rc->itr;
-	int bytes_per_int;
-	unsigned int usecs, estimated_usecs;
+	unsigned int avg_wire_size, packets, bytes, itr;
+	unsigned long next_update = jiffies;
 
-	if (rc->total_packets == 0 || !rc->itr)
-		return false;
+	/* If we don't have any rings just leave ourselves set for maximum
+	 * possible latency so we take ourselves out of the equation.
+	 */
+	if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
+		return;
 
-	usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
-	bytes_per_int = rc->total_bytes / usecs;
+	/* For Rx we want to push the delay up and default to low latency.
+	 * for Tx we want to pull the delay down and default to high latency.
+	 */
+	itr = i40e_container_is_rx(q_vector, rc) ?
+	      I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
+	      I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
+
+	/* If we didn't update within up to 1 - 2 jiffies we can assume
+	 * that either packets are coming in so slow there hasn't been
+	 * any work, or that there is so much work that NAPI is dealing
+	 * with interrupt moderation and we don't need to do anything.
+	 */
+	if (time_after(next_update, rc->next_update))
+		goto clear_counts;
+
+	/* If itr_countdown is set it means we programmed an ITR within
+	 * the last 4 interrupt cycles. This has a side effect of us
+	 * potentially firing an early interrupt. In order to work around
+	 * this we need to throw out any data received for a few
+	 * interrupts following the update.
+	 */
+	if (q_vector->itr_countdown) {
+		itr = rc->target_itr;
+		goto clear_counts;
+	}
 
-	/* The calculations in this algorithm depend on interrupts actually
-	 * firing at the ITR rate. This may not happen if the packet rate is
-	 * really low, or if we've been napi polling. Check to make sure
-	 * that's not the case before we continue.
+	packets = rc->total_packets;
+	bytes = rc->total_bytes;
+
+	if (i40e_container_is_rx(q_vector, rc)) {
+		/* If Rx there are 1 to 4 packets and bytes are less than
+		 * 9000 assume insufficient data to use bulk rate limiting
+		 * approach unless Tx is already in bulk rate limiting. We
+		 * are likely latency driven.
+		 */
+		if (packets && packets < 4 && bytes < 9000 &&
+		    (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
+			itr = I40E_ITR_ADAPTIVE_LATENCY;
+			goto adjust_by_size;
+		}
+	} else if (packets < 4) {
+		/* If we have Tx and Rx ITR maxed and Tx ITR is running in
+		 * bulk mode and we are receiving 4 or fewer packets just
+		 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
+		 * that the Rx can relax.
+		 */
+		if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
+		    (q_vector->rx.target_itr & I40E_ITR_MASK) ==
+		     I40E_ITR_ADAPTIVE_MAX_USECS)
+			goto clear_counts;
+	} else if (packets > 32) {
+		/* If we have processed over 32 packets in a single interrupt
+		 * for Tx assume we need to switch over to "bulk" mode.
+		 */
+		rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
+	}
+
+	/* We have no packets to actually measure against. This means
+	 * either one of the other queues on this vector is active or
+	 * we are a Tx queue doing TSO with too high of an interrupt rate.
+	 *
+	 * Between 4 and 56 we can assume that our current interrupt delay
+	 * is only slightly too low. As such we should increase it by a small
+	 * fixed amount.
 	 */
-	estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update);
-	if (estimated_usecs > usecs) {
-		new_latency_range = I40E_LOW_LATENCY;
-		goto reset_latency;
+	if (packets < 56) {
+		itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
+		if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
+			itr &= I40E_ITR_ADAPTIVE_LATENCY;
+			itr += I40E_ITR_ADAPTIVE_MAX_USECS;
+		}
+		goto clear_counts;
+	}
+
+	if (packets <= 256) {
+		itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
+		itr &= I40E_ITR_MASK;
+
+		/* Between 56 and 112 is our "goldilocks" zone where we are
+		 * working out "just right". Just report that our current
+		 * ITR is good for us.
+		 */
+		if (packets <= 112)
+			goto clear_counts;
+
+		/* If packet count is 128 or greater we are likely looking
+		 * at a slight overrun of the delay we want. Try halving
+		 * our delay to see if that will cut the number of packets
+		 * in half per interrupt.
+		 */
+		itr /= 2;
+		itr &= I40E_ITR_MASK;
+		if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
+			itr = I40E_ITR_ADAPTIVE_MIN_USECS;
+
+		goto clear_counts;
 	}
 
-	/* simple throttlerate management
-	 *   0-10MB/s   lowest (50000 ints/s)
-	 *  10-20MB/s   low    (20000 ints/s)
-	 *  20-1249MB/s bulk   (18000 ints/s)
+	/* The paths below assume we are dealing with a bulk ITR since
+	 * number of packets is greater than 256. We are just going to have
+	 * to compute a value and try to bring the count under control,
+	 * though for smaller packet sizes there isn't much we can do as
+	 * NAPI polling will likely be kicking in sooner rather than later.
+	 */
+	itr = I40E_ITR_ADAPTIVE_BULK;
+
+adjust_by_size:
+	/* If packet counts are 256 or greater we can assume we have a gross
+	 * overestimation of what the rate should be. Instead of trying to fine
+	 * tune it just use the formula below to try and dial in an exact value
+	 * give the current packet size of the frame.
+	 */
+	avg_wire_size = bytes / packets;
+
+	/* The following is a crude approximation of:
+	 *  wmem_default / (size + overhead) = desired_pkts_per_int
+	 *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
+	 *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
 	 *
-	 * The math works out because the divisor is in 10^(-6) which
-	 * turns the bytes/us input value into MB/s values, but
-	 * make sure to use usecs, as the register values written
-	 * are in 2 usec increments in the ITR registers, and make sure
-	 * to use the smoothed values that the countdown timer gives us.
+	 * Assuming wmem_default is 212992 and overhead is 640 bytes per
+	 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
+	 * formula down to
+	 *
+	 *  (170 * (size + 24)) / (size + 640) = ITR
+	 *
+	 * We first do some math on the packet size and then finally bitshift
+	 * by 8 after rounding up. We also have to account for PCIe link speed
+	 * difference as ITR scales based on this.
 	 */
-	switch (new_latency_range) {
-	case I40E_LOWEST_LATENCY:
-		if (bytes_per_int > 10)
-			new_latency_range = I40E_LOW_LATENCY;
-		break;
-	case I40E_LOW_LATENCY:
-		if (bytes_per_int > 20)
-			new_latency_range = I40E_BULK_LATENCY;
-		else if (bytes_per_int <= 10)
-			new_latency_range = I40E_LOWEST_LATENCY;
-		break;
-	case I40E_BULK_LATENCY:
-	default:
-		if (bytes_per_int <= 20)
-			new_latency_range = I40E_LOW_LATENCY;
-		break;
+	if (avg_wire_size <= 60) {
+		/* Start at 250k ints/sec */
+		avg_wire_size = 4096;
+	} else if (avg_wire_size <= 380) {
+		/* 250K ints/sec to 60K ints/sec */
+		avg_wire_size *= 40;
+		avg_wire_size += 1696;
+	} else if (avg_wire_size <= 1084) {
+		/* 60K ints/sec to 36K ints/sec */
+		avg_wire_size *= 15;
+		avg_wire_size += 11452;
+	} else if (avg_wire_size <= 1980) {
+		/* 36K ints/sec to 30K ints/sec */
+		avg_wire_size *= 5;
+		avg_wire_size += 22420;
+	} else {
+		/* plateau at a limit of 30K ints/sec */
+		avg_wire_size = 32256;
 	}
 
-reset_latency:
-	rc->latency_range = new_latency_range;
+	/* If we are in low latency mode halve our delay which doubles the
+	 * rate to somewhere between 100K to 16K ints/sec
+	 */
+	if (itr & I40E_ITR_ADAPTIVE_LATENCY)
+		avg_wire_size /= 2;
 
-	switch (new_latency_range) {
-	case I40E_LOWEST_LATENCY:
-		new_itr = I40E_ITR_50K;
-		break;
-	case I40E_LOW_LATENCY:
-		new_itr = I40E_ITR_20K;
-		break;
-	case I40E_BULK_LATENCY:
-		new_itr = I40E_ITR_18K;
-		break;
-	default:
-		break;
+	/* Resultant value is 256 times larger than it needs to be. This
+	 * gives us room to adjust the value as needed to either increase
+	 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
+	 *
+	 * Use addition as we have already recorded the new latency flag
+	 * for the ITR value.
+	 */
+	itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
+	       I40E_ITR_ADAPTIVE_MIN_INC;
+
+	if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
+		itr &= I40E_ITR_ADAPTIVE_LATENCY;
+		itr += I40E_ITR_ADAPTIVE_MAX_USECS;
 	}
 
+clear_counts:
+	/* write back value */
+	rc->target_itr = itr;
+
+	/* next update should occur within next jiffy */
+	rc->next_update = next_update + 1;
+
 	rc->total_bytes = 0;
 	rc->total_packets = 0;
-	rc->last_itr_update = jiffies;
-
-	if (new_itr != rc->itr) {
-		rc->itr = new_itr;
-		return true;
-	}
-	return false;
 }
 
+#ifndef CONFIG_I40E_DISABLE_PACKET_SPLIT
 /**
  * i40e_reuse_rx_page - page flip buffer and store it back on the ring
  * @rx_ring: rx descriptor ring to store buffers on
@@ -1063,6 +1250,33 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
 	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
 }
 
+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
+/**
+ * i40e_reuse_rx_skb - Recycle unused skb and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have skb reused
+ *
+ * Synchronizes skb for reuse by the adapter
+ **/
+static void i40e_reuse_rx_skb(struct i40e_ring *rx_ring,
+			      struct i40e_rx_buffer *old_buff)
+{
+	struct i40e_rx_buffer *new_buff;
+	u16 nta = rx_ring->next_to_alloc;
+
+	new_buff = &rx_ring->rx_bi[nta];
+
+	/* update, and store next to alloc */
+	nta++;
+	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+	/* transfer page from old buffer to new buffer */
+	new_buff->dma		= old_buff->dma;
+	new_buff->skb		= old_buff->skb;
+}
+
+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
 /**
  * i40e_rx_is_programming_status - check for programming status descriptor
  * @qw: qword representing status_error_len in CPU ordering
@@ -1108,12 +1322,20 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
 
 	prefetch(I40E_RX_DESC(rx_ring, ntc));
 
+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
+	/* place unused page back on the ring */
+	i40e_reuse_rx_skb(rx_ring, rx_buffer);
+
+	/* clear contents of buffer_info */
+	rx_buffer->skb = NULL;
+#else
 	/* place unused page back on the ring */
 	i40e_reuse_rx_page(rx_ring, rx_buffer);
 	rx_ring->rx_stats.page_reuse_count++;
 
 	/* clear contents of buffer_info */
 	rx_buffer->page = NULL;
+#endif
 
 	id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
 		  I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
@@ -1143,8 +1365,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
 	if (!tx_ring->tx_bi)
 		goto err;
 
-	u64_stats_init(&tx_ring->syncp);
-
 	/* round up to nearest 4K */
 	tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
 	/* add u32 for head writeback, align after this takes care of
@@ -1162,6 +1382,7 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
 
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
+	tx_ring->tx_stats.prev_pkt_ctr = -1;
 	return 0;
 
 err:
@@ -1192,6 +1413,15 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
 	for (i = 0; i < rx_ring->count; i++) {
 		struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
 
+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
+		if (!rx_bi->skb)
+			continue;
+
+		dma_unmap_single(rx_ring->dev, rx_bi->dma,
+				 rx_ring->rx_buf_len, DMA_FROM_DEVICE);
+		dev_kfree_skb(rx_bi->skb);
+		rx_bi->skb = NULL;
+#else /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
 		if (!rx_bi->page)
 			continue;
 
@@ -1214,6 +1444,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
 
 		rx_bi->page = NULL;
 		rx_bi->page_offset = 0;
+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
 	}
 
 	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
@@ -1236,6 +1467,10 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
 {
 	i40e_clean_rx_ring(rx_ring);
+#ifdef HAVE_XDP_BUFF_RXQ
+	if (rx_ring->vsi->type == I40E_VSI_MAIN)
+		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+#endif
 	rx_ring->xdp_prog = NULL;
 	kfree(rx_ring->rx_bi);
 	rx_ring->rx_bi = NULL;
@@ -1256,6 +1491,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
 {
 	struct device *dev = rx_ring->dev;
+	int err = -ENOMEM;
 	int bi_size;
 
 	/* warn if we are about to overwrite the pointer */
@@ -1264,11 +1500,13 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
 	rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
 	if (!rx_ring->rx_bi)
 		goto err;
+#ifdef HAVE_NDO_GET_STATS64
 
 	u64_stats_init(&rx_ring->syncp);
+#endif /* HAVE_NDO_GET_STATS64 */
 
 	/* Round up to nearest 4K */
-	rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+	rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc);
 	rx_ring->size = ALIGN(rx_ring->size, 4096);
 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
 					   &rx_ring->dma, GFP_KERNEL);
@@ -1282,14 +1520,22 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
 	rx_ring->next_to_alloc = 0;
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
+#ifdef HAVE_XDP_BUFF_RXQ
+	/* XDP RX-queue info only needed for RX rings exposed to XDP */
+	if (rx_ring->vsi->type == I40E_VSI_MAIN) {
+		err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+				       rx_ring->queue_index);
+		if (err < 0)
+			goto err;
+	}
 
 	rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
-
+#endif
 	return 0;
 err:
 	kfree(rx_ring->rx_bi);
 	rx_ring->rx_bi = NULL;
-	return -ENOMEM;
+	return err;
 }
 
 /**
@@ -1313,6 +1559,46 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
 	writel(val, rx_ring->tail);
 }
 
+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
+static bool i40e_alloc_mapped_skb(struct i40e_ring *rx_ring,
+				  struct i40e_rx_buffer *bi)
+{
+	struct sk_buff *skb = bi->skb;
+	dma_addr_t dma;
+
+	if (unlikely(skb))
+		return true;
+
+	if (likely(!skb)) {
+		skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+				       rx_ring->rx_buf_len,
+				       GFP_ATOMIC | __GFP_NOWARN);
+		if (unlikely(!skb)) {
+			rx_ring->rx_stats.alloc_buff_failed++;
+			return false;
+		}
+	}
+
+	dma = dma_map_single(rx_ring->dev, skb->data,
+			     rx_ring->rx_buf_len, DMA_FROM_DEVICE);
+
+	/*
+	 * if mapping failed free memory back to system since
+	 * there isn't much point in holding memory we can't use
+	 */
+	if (dma_mapping_error(rx_ring->dev, dma)) {
+		dev_kfree_skb_any(skb);
+		rx_ring->rx_stats.alloc_buff_failed++;
+		return false;
+	}
+
+	bi->skb = skb;
+	bi->dma = dma;
+
+	return true;
+}
+
+#else /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
 /**
  * i40e_rx_offset - Return expected offset into page to access data
  * @rx_ring: Ring we are requesting offset of
@@ -1376,6 +1662,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
 	return true;
 }
 
+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
 /**
  * i40e_receive_skb - Send a completed packet up the stack
  * @rx_ring:  rx ring in play
@@ -1386,12 +1673,32 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
 			     struct sk_buff *skb, u16 vlan_tag)
 {
 	struct i40e_q_vector *q_vector = rx_ring->q_vector;
+#ifdef HAVE_VLAN_RX_REGISTER
+	struct i40e_vsi *vsi = rx_ring->vsi;
+#endif
 
+#ifdef HAVE_VLAN_RX_REGISTER
+	if (vlan_tag & VLAN_VID_MASK) {
+		if (!vsi->vlgrp)
+			dev_kfree_skb_any(skb);
+		else
+			vlan_gro_receive(&q_vector->napi, vsi->vlgrp,
+					 vlan_tag, skb);
+	} else {
+		napi_gro_receive(&q_vector->napi, skb);
+	}
+#else /* HAVE_VLAN_RX_REGISTER */
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
 	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 	    (vlan_tag & VLAN_VID_MASK))
+#else
+	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_RX) &&
+	    (vlan_tag & VLAN_VID_MASK))
+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 
 	napi_gro_receive(&q_vector->napi, skb);
+#endif /* HAVE_VLAN_RX_REGISTER */
 }
 
 /**
@@ -1415,6 +1722,12 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
 	bi = &rx_ring->rx_bi[ntu];
 
 	do {
+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
+		if (!i40e_alloc_mapped_skb(rx_ring, bi))
+			goto no_buffers;
+
+		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+#else
 		if (!i40e_alloc_mapped_page(rx_ring, bi))
 			goto no_buffers;
 
@@ -1428,6 +1741,7 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
 		 * because each write-back erases this info.
 		 */
 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
 
 		rx_desc++;
 		bi++;
@@ -1459,6 +1773,58 @@ no_buffers:
 	return true;
 }
 
+#define I40E_XDP_PASS          0
+#define I40E_XDP_CONSUMED      BIT(0)
+#define I40E_XDP_TX            BIT(1)
+#define I40E_XDP_REDIR         BIT(2)
+
+static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
+{
+	/* Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.
+	 */
+	wmb();
+	writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
+}
+
+#ifdef I40E_ADD_PROBES
+static void i40e_rx_extra_counters(struct i40e_vsi *vsi, u32 rx_error,
+				   const struct i40e_rx_ptype_decoded decoded)
+{
+	bool ipv4;
+
+	ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
+	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
+
+	if (ipv4 &&
+	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+		vsi->back->rx_ip4_cso_err++;
+
+	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) {
+		if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP)
+			vsi->back->rx_tcp_cso_err++;
+		else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_UDP)
+			vsi->back->rx_udp_cso_err++;
+		else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_SCTP)
+			vsi->back->rx_sctp_cso_err++;
+	}
+
+	if ((decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
+	    (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4))
+		vsi->back->rx_ip4_cso++;
+	if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP)
+		vsi->back->rx_tcp_cso++;
+	else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_UDP)
+		vsi->back->rx_udp_cso++;
+	else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_SCTP)
+		vsi->back->rx_sctp_cso++;
+}
+
+#endif /* I40E_ADD_PROBES */
+#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_GENEVE_RX_OFFLOAD) || defined(HAVE_UDP_ENC_RX_OFFLOAD)
+#define I40E_TUNNEL_SUPPORT
+#endif
 /**
  * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
  * @vsi: the VSI we care about
@@ -1488,8 +1854,13 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
 	skb_checksum_none_assert(skb);
 
 	/* Rx csum enabled and ip headers found? */
+#ifdef HAVE_NDO_SET_FEATURES
 	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
 		return;
+#else
+	if (!(vsi->back->flags & I40E_FLAG_RX_CSUM_ENABLED))
+		return;
+#endif
 
 	/* did the hardware decode the packet and checksum? */
 	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
@@ -1498,12 +1869,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
 	/* both known and outer_ip must be set for the below code to work */
 	if (!(decoded.known && decoded.outer_ip))
 		return;
+#ifdef I40E_ADD_PROBES
+	vsi->back->hw_csum_rx_outer++;
+#endif
 
 	ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
 	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
 	ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
 	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
 
+#ifdef I40E_ADD_PROBES
+	i40e_rx_extra_counters(vsi, rx_error, decoded);
+
+#endif /* I40E_ADD_PROBES */
 	if (ipv4 &&
 	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
 			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
@@ -1526,12 +1904,18 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
 	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
 		return;
 
+#ifdef I40E_TUNNEL_SUPPORT
 	/* If there is an outer header present that might contain a checksum
 	 * we need to bump the checksum level by 1 to reflect the fact that
 	 * we are indicating we validated the inner checksum.
 	 */
 	if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+#ifdef HAVE_SKBUFF_CSUM_LEVEL
 		skb->csum_level = 1;
+#else
+		skb->encapsulation = 1;
+#endif
+#endif /* I40E_TUNNEL_SUPPORT */
 
 	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
 	switch (decoded.inner_prot) {
@@ -1539,11 +1923,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
 	case I40E_RX_PTYPE_INNER_PROT_UDP:
 	case I40E_RX_PTYPE_INNER_PROT_SCTP:
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
-		/* fall though */
+		/* fall through */
 	default:
 		break;
 	}
-
 	return;
 
 checksum_fail:
@@ -1556,7 +1939,7 @@ checksum_fail:
  *
  * Returns a hash type to be used by skb_set_hash
  **/
-static inline int i40e_ptype_to_htype(u8 ptype)
+static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
 {
 	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
 
@@ -1577,12 +1960,15 @@ static inline int i40e_ptype_to_htype(u8 ptype)
  * i40e_rx_hash - set the hash value in the skb
  * @ring: descriptor ring
  * @rx_desc: specific descriptor
+ * @skb: skb currently being received and modified
+ * @rx_ptype: Rx packet type
  **/
 static inline void i40e_rx_hash(struct i40e_ring *ring,
 				union i40e_rx_desc *rx_desc,
 				struct sk_buff *skb,
 				u8 rx_ptype)
 {
+#ifdef NETIF_F_RXHASH
 	u32 hash;
 	const __le64 rss_mask =
 		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
@@ -1595,6 +1981,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
 		hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
 		skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
 	}
+#endif /* NETIF_F_RXHASH */
 }
 
 /**
@@ -1613,6 +2000,7 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
 			     union i40e_rx_desc *rx_desc, struct sk_buff *skb,
 			     u8 rx_ptype)
 {
+#ifdef HAVE_PTP_1588_CLOCK
 	u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
 	u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
 			I40E_RXD_QW1_STATUS_SHIFT;
@@ -1622,6 +2010,7 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
 
 	if (unlikely(tsynvalid))
 		i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
+#endif /* HAVE_PTP_1588_CLOCK */
 
 	i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
 
@@ -1649,7 +2038,6 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
  **/
 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
 				 union i40e_rx_desc *rx_desc)
-
 {
 	/* XDP packets use error pointer so abort at this point */
 	if (IS_ERR(skb))
@@ -1673,6 +2061,51 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
 	return false;
 }
 
+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
+/**
+ * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @size: size of buffer to add to skb
+ *
+ * This function will pull an Rx buffer from the ring and synchronize it
+ * for use by the CPU.
+ *
+ * ONE-BUFF version
+ */
+static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
+						 const unsigned int size)
+{
+	struct i40e_rx_buffer *rx_buffer;
+
+	rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+
+	/* we are reusing so sync this buffer for CPU use */
+	dma_unmap_single(rx_ring->dev, rx_buffer->dma,
+			 rx_ring->rx_buf_len, DMA_FROM_DEVICE);
+
+	prefetch(rx_buffer->skb->data);
+
+	return rx_buffer;
+}
+
+/**
+ * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: rx buffer to pull data from
+ *
+ * This function will clean up the contents of the rx_buffer.  It will
+ * either recycle the bufer or unmap it and free the associated resources.
+ *
+ * ONE-BUFF version
+ */
+static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
+			       struct i40e_rx_buffer *rx_buffer)
+{
+	/* clear contents of buffer_info */
+	rx_buffer->skb = NULL;
+}
+
+#else /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
 /**
  * i40e_page_is_reusable - check if any reuse is possible
  * @page: page struct to check
@@ -1737,10 +2170,17 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
 	 * the pagecnt_bias and page count so that we fully restock the
 	 * number of references the driver holds.
 	 */
+#ifdef HAVE_PAGE_COUNT_BULK_UPDATE
 	if (unlikely(!pagecnt_bias)) {
 		page_ref_add(page, USHRT_MAX);
 		rx_buffer->pagecnt_bias = USHRT_MAX;
 	}
+#else
+	if (likely(!pagecnt_bias)) {
+		get_page(page);
+		rx_buffer->pagecnt_bias = 1;
+	}
+#endif
 
 	return true;
 }
@@ -1765,7 +2205,7 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
 #if (PAGE_SIZE < 8192)
 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
-	unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
+	unsigned int truesize =	SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
 #endif
 
 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
@@ -1822,11 +2262,13 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
 					  struct i40e_rx_buffer *rx_buffer,
 					  struct xdp_buff *xdp)
 {
-	unsigned int size = xdp->data_end - xdp->data;
+	unsigned int size = (u8 *)xdp->data_end - (u8 *)xdp->data;
+
 #if (PAGE_SIZE < 8192)
 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
-	unsigned int truesize = SKB_DATA_ALIGN(size);
+	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+				SKB_DATA_ALIGN(I40E_SKB_PAD + size);
 #endif
 	unsigned int headlen;
 	struct sk_buff *skb;
@@ -1834,7 +2276,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
 	/* prefetch first cache line of first page */
 	prefetch(xdp->data);
 #if L1_CACHE_BYTES < 128
-	prefetch(xdp->data + L1_CACHE_BYTES);
+	prefetch((void *)((u8 *)xdp->data + L1_CACHE_BYTES));
 #endif
 
 	/* allocate a skb to store the frags */
@@ -1847,7 +2289,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
 	/* Determine available headroom for copy */
 	headlen = size;
 	if (headlen > I40E_RX_HDR_SIZE)
-		headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
+		headlen = eth_get_headlen(skb->dev, xdp->data,
+					  I40E_RX_HDR_SIZE);
 
 	/* align pull length to size of long to optimize memcpy performance */
 	memcpy(__skb_put(skb, headlen), xdp->data,
@@ -1874,10 +2317,11 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
 	return skb;
 }
 
+#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC
 /**
  * i40e_build_skb - Build skb around an existing buffer
- * @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buffer: Rx buffer to pull data from
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: rx buffer to pull data from
  * @xdp: xdp_buff pointing to the data
  *
  * This function builds an skb around an existing Rx buffer, taking care
@@ -1887,12 +2331,14 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
 				      struct i40e_rx_buffer *rx_buffer,
 				      struct xdp_buff *xdp)
 {
-	unsigned int size = xdp->data_end - xdp->data;
+	unsigned int size = (u8 *)xdp->data_end - (u8 *)xdp->data;
+
 #if (PAGE_SIZE < 8192)
 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
 	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
-				SKB_DATA_ALIGN(I40E_SKB_PAD + size);
+				SKB_DATA_ALIGN(xdp->data_end -
+					       xdp->data_hard_start);
 #endif
 	struct sk_buff *skb;
 
@@ -1907,7 +2353,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
 		return NULL;
 
 	/* update pointers within the skb to store the data */
-	skb_reserve(skb, I40E_SKB_PAD);
+	skb_reserve(skb, xdp->data - xdp->data_hard_start);
 	__skb_put(skb, size);
 
 	/* buffer is used by skb, update page_offset */
@@ -1920,13 +2366,14 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
 	return skb;
 }
 
+#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */
 /**
  * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
  * @rx_ring: rx descriptor ring to transact packets on
  * @rx_buffer: rx buffer to pull data from
  *
  * This function will clean up the contents of the rx_buffer.  It will
- * either recycle the bufer or unmap it and free the associated resources.
+ * either recycle the buffer or unmap it and free the associated resources.
  */
 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
 			       struct i40e_rx_buffer *rx_buffer)
@@ -1948,6 +2395,7 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
 	rx_buffer->page = NULL;
 }
 
+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
 /**
  * i40e_is_non_eop - process handling of non-EOP buffers
  * @rx_ring: Rx ring being processed
@@ -1981,12 +2429,26 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
 	return true;
 }
 
-#define I40E_XDP_PASS 0
-#define I40E_XDP_CONSUMED 1
-#define I40E_XDP_TX 2
+#ifdef HAVE_XDP_SUPPORT
+#ifdef HAVE_XDP_FRAME_STRUCT
+static int i40e_xmit_xdp_ring(struct xdp_frame *xdp,
+			      struct i40e_ring *xdp_ring);
 
+static int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp,
+				 struct i40e_ring *xdp_ring)
+{
+	struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+
+	if (unlikely(!xdpf))
+		return I40E_XDP_CONSUMED;
+
+	return i40e_xmit_xdp_ring(xdpf, xdp_ring);
+}
+#else
 static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
 			      struct i40e_ring *xdp_ring);
+#endif
+#endif
 
 /**
  * i40e_run_xdp - run an XDP program
@@ -1997,9 +2459,11 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
 				    struct xdp_buff *xdp)
 {
 	int result = I40E_XDP_PASS;
+#ifdef HAVE_XDP_SUPPORT
 	struct i40e_ring *xdp_ring;
 	struct bpf_prog *xdp_prog;
 	u32 act;
+	int err;
 
 	rcu_read_lock();
 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
@@ -2007,26 +2471,46 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
 	if (!xdp_prog)
 		goto xdp_out;
 
+	prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
 	act = bpf_prog_run_xdp(xdp_prog, xdp);
 	switch (act) {
 	case XDP_PASS:
+		rx_ring->xdp_stats.xdp_pass++;
 		break;
 	case XDP_TX:
 		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+#ifdef HAVE_XDP_FRAME_STRUCT
+		result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+#else
 		result = i40e_xmit_xdp_ring(xdp, xdp_ring);
+#endif
+		rx_ring->xdp_stats.xdp_tx++;
+		break;
+	case XDP_REDIRECT:
+		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+		result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+		if (!err)
+			rx_ring->xdp_stats.xdp_redirect++;
+		else
+			rx_ring->xdp_stats.xdp_redirect_fail++;
 		break;
 	default:
 		bpf_warn_invalid_xdp_action(act);
+		/* fallthrough -- abort and drop */
 	case XDP_ABORTED:
 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+		rx_ring->xdp_stats.xdp_unknown++;
 		/* fallthrough -- handle aborts by dropping packet */
 	case XDP_DROP:
 		result = I40E_XDP_CONSUMED;
+		rx_ring->xdp_stats.xdp_drop++;
 		break;
 	}
 xdp_out:
 	rcu_read_unlock();
-	return ERR_PTR(-result);
+#endif /* HAVE_XDP_SUPPORT */
+	return (struct sk_buff *)ERR_PTR(-result);
 }
 
 /**
@@ -2067,12 +2551,17 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 	struct sk_buff *skb = rx_ring->skb;
 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-	bool failure = false, xdp_xmit = false;
+	unsigned int xdp_xmit = 0;
+	bool failure = false;
+	struct xdp_buff xdp;
+
+#ifdef HAVE_XDP_BUFF_RXQ
+	xdp.rxq = &rx_ring->xdp_rxq;
+#endif
 
 	while (likely(total_rx_packets < (unsigned int)budget)) {
 		struct i40e_rx_buffer *rx_buffer;
 		union i40e_rx_desc *rx_desc;
-		struct xdp_buff xdp;
 		unsigned int size;
 		u16 vlan_tag;
 		u8 rx_ptype;
@@ -2114,19 +2603,27 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 		rx_buffer = i40e_get_rx_buffer(rx_ring, size);
 
 		/* retrieve a buffer from the ring */
+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
+		/* we are leaking memory if an skb is already present */
+		WARN_ON(skb);
+		skb = rx_buffer->skb;
+		__skb_put(skb, size);
+#else
 		if (!skb) {
 			xdp.data = page_address(rx_buffer->page) +
 				   rx_buffer->page_offset;
-			xdp.data_hard_start = xdp.data -
-					      i40e_rx_offset(rx_ring);
-			xdp.data_end = xdp.data + size;
+			xdp.data_hard_start = (void *)((u8 *)xdp.data -
+					      i40e_rx_offset(rx_ring));
+			xdp.data_end = (void *)((u8 *)xdp.data + size);
 
 			skb = i40e_run_xdp(rx_ring, &xdp);
 		}
 
 		if (IS_ERR(skb)) {
-			if (PTR_ERR(skb) == -I40E_XDP_TX) {
-				xdp_xmit = true;
+			unsigned int xdp_res = -PTR_ERR(skb);
+
+			if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
+				xdp_xmit |= xdp_res;
 				i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
 			} else {
 				rx_buffer->pagecnt_bias++;
@@ -2135,8 +2632,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 			total_rx_packets++;
 		} else if (skb) {
 			i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
+#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC
 		} else if (ring_uses_build_skb(rx_ring)) {
 			skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
+#endif
 		} else {
 			skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
 		}
@@ -2147,6 +2646,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 			rx_buffer->pagecnt_bias++;
 			break;
 		}
+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
 
 		i40e_put_rx_buffer(rx_ring, rx_buffer);
 		cleaned_count++;
@@ -2180,19 +2680,15 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 		total_rx_packets++;
 	}
 
-	if (xdp_xmit) {
-		struct i40e_ring *xdp_ring;
+	if (xdp_xmit & I40E_XDP_REDIR)
+		xdp_do_flush_map();
 
-		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+	if (xdp_xmit & I40E_XDP_TX) {
+		struct i40e_ring *xdp_ring =
+			rx_ring->vsi->xdp_rings[rx_ring->queue_index];
 
-		/* Force memory writes to complete before letting h/w
-		 * know there are new descriptors to fetch.
-		 */
-		wmb();
-
-		writel(xdp_ring->next_to_use, xdp_ring->tail);
+		i40e_xdp_ring_update_tail(xdp_ring);
 	}
-
 	rx_ring->skb = skb;
 
 	u64_stats_update_begin(&rx_ring->syncp);
@@ -2206,31 +2702,45 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 	return failure ? budget : (int)total_rx_packets;
 }
 
-static u32 i40e_buildreg_itr(const int type, const u16 itr)
+static inline u32 i40e_buildreg_itr(const int type, u16 itr)
 {
 	u32 val;
 
+	/* We don't bother with setting the CLEARPBA bit as the data sheet
+	 * points out doing so is "meaningless since it was already
+	 * auto-cleared". The auto-clearing happens when the interrupt is
+	 * asserted.
+	 *
+	 * Hardware errata 28 for also indicates that writing to a
+	 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
+	 * an event in the PBA anyway so we need to rely on the automask
+	 * to hold pending events for us until the interrupt is re-enabled
+	 *
+	 * The itr value is reported in microseconds, and the register
+	 * value is recorded in 2 microsecond units. For this reason we
+	 * only need to shift by the interval shift - 1 instead of the
+	 * full value.
+	 */
+	itr &= I40E_ITR_MASK;
+
 	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
-	      /* Don't clear PBA because that can cause lost interrupts that
-	       * came in while we were cleaning/polling
-	       */
 	      (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
-	      (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+	      (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
 
 	return val;
 }
 
 /* a small macro to shorten up some long lines */
 #define INTREG I40E_PFINT_DYN_CTLN
-static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
-{
-	return vsi->rx_rings[idx]->rx_itr_setting;
-}
 
-static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
-{
-	return vsi->tx_rings[idx]->tx_itr_setting;
-}
+/* The act of updating the ITR will cause it to immediately trigger. In order
+ * to prevent this from throwing off adaptive update statistics we defer the
+ * update so that it can only happen so often. So after either Tx or Rx are
+ * updated we make the adaptive scheme wait until either the ITR completely
+ * expires via the next_update expiration or we have been through at least
+ * 3 interrupts.
+ */
+#define ITR_COUNTDOWN_START 3
 
 /**
  * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
@@ -2242,79 +2752,57 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
 					  struct i40e_q_vector *q_vector)
 {
 	struct i40e_hw *hw = &vsi->back->hw;
-	bool rx = false, tx = false;
-	u32 rxval, txval;
-	int vector;
-	int idx = q_vector->v_idx;
-	int rx_itr_setting, tx_itr_setting;
+	u32 intval;
 
 	/* If we don't have MSIX, then we only need to re-enable icr0 */
 	if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
-		i40e_irq_dynamic_enable_icr0(vsi->back, false);
+		i40e_irq_dynamic_enable_icr0(vsi->back);
 		return;
 	}
 
-	vector = (q_vector->v_idx + vsi->base_vector);
-
-	/* avoid dynamic calculation if in countdown mode OR if
-	 * all dynamic is disabled
-	 */
-	rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
-
-	rx_itr_setting = get_rx_itr(vsi, idx);
-	tx_itr_setting = get_tx_itr(vsi, idx);
-
-	if (q_vector->itr_countdown > 0 ||
-	    (!ITR_IS_DYNAMIC(rx_itr_setting) &&
-	     !ITR_IS_DYNAMIC(tx_itr_setting))) {
-		goto enable_int;
-	}
-
-	if (ITR_IS_DYNAMIC(rx_itr_setting)) {
-		rx = i40e_set_new_dynamic_itr(&q_vector->rx);
-		rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
-	}
-
-	if (ITR_IS_DYNAMIC(tx_itr_setting)) {
-		tx = i40e_set_new_dynamic_itr(&q_vector->tx);
-		txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
-	}
-
-	if (rx || tx) {
-		/* get the higher of the two ITR adjustments and
-		 * use the same value for both ITR registers
-		 * when in adaptive mode (Rx and/or Tx)
-		 */
-		u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
+	/* These will do nothing if dynamic updates are not enabled */
+	i40e_update_itr(q_vector, &q_vector->tx);
+	i40e_update_itr(q_vector, &q_vector->rx);
 
-		q_vector->tx.itr = q_vector->rx.itr = itr;
-		txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
-		tx = true;
-		rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
-		rx = true;
-	}
-
-	/* only need to enable the interrupt once, but need
-	 * to possibly update both ITR values
+	/* This block of logic allows us to get away with only updating
+	 * one ITR value with each interrupt. The idea is to perform a
+	 * pseudo-lazy update with the following criteria.
+	 *
+	 * 1. Rx is given higher priority than Tx if both are in same state
+	 * 2. If we must reduce an ITR that is given highest priority.
+	 * 3. We then give priority to increasing ITR based on amount.
 	 */
-	if (rx) {
-		/* set the INTENA_MSK_MASK so that this first write
-		 * won't actually enable the interrupt, instead just
-		 * updating the ITR (it's bit 31 PF and VF)
+	if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
+		/* Rx ITR needs to be reduced, this is highest priority */
+		intval = i40e_buildreg_itr(I40E_RX_ITR,
+					   q_vector->rx.target_itr);
+		q_vector->rx.current_itr = q_vector->rx.target_itr;
+		q_vector->itr_countdown = ITR_COUNTDOWN_START;
+	} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
+		   ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
+		    (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
+		/* Tx ITR needs to be reduced, this is second priority
+		 * Tx ITR needs to be increased more than Rx, fourth priority
 		 */
-		rxval |= BIT(31);
-		/* don't check _DOWN because interrupt isn't being enabled */
-		wr32(hw, INTREG(vector - 1), rxval);
+		intval = i40e_buildreg_itr(I40E_TX_ITR,
+					   q_vector->tx.target_itr);
+		q_vector->tx.current_itr = q_vector->tx.target_itr;
+		q_vector->itr_countdown = ITR_COUNTDOWN_START;
+	} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
+		/* Rx ITR needs to be increased, third priority */
+		intval = i40e_buildreg_itr(I40E_RX_ITR,
+					   q_vector->rx.target_itr);
+		q_vector->rx.current_itr = q_vector->rx.target_itr;
+		q_vector->itr_countdown = ITR_COUNTDOWN_START;
+	} else {
+		/* No ITR update, lowest priority */
+		intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+		if (q_vector->itr_countdown)
+			q_vector->itr_countdown--;
 	}
 
-enable_int:
 	if (!test_bit(__I40E_VSI_DOWN, vsi->state))
-		wr32(hw, INTREG(vector - 1), txval);
-
-	if (q_vector->itr_countdown)
-		q_vector->itr_countdown--;
-	else
-		q_vector->itr_countdown = ITR_COUNTDOWN_START;
+		wr32(hw, INTREG(q_vector->reg_idx), intval);
 }
 
 /**
@@ -2332,6 +2820,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
 			       container_of(napi, struct i40e_q_vector, napi);
 	struct i40e_vsi *vsi = q_vector->vsi;
 	struct i40e_ring *ring;
+	u64 flags = vsi->back->flags;
 	bool clean_complete = true;
 	bool arm_wb = false;
 	int budget_per_ring;
@@ -2372,8 +2861,15 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
 			clean_complete = false;
 	}
 
+#ifndef HAVE_NETDEV_NAPI_LIST
+	/* if netdev is disabled we need to stop polling */
+	if (!netif_running(vsi->netdev))
+		clean_complete = true;
+
+#endif
 	/* If work not completed, return budget and polling will return */
 	if (!clean_complete) {
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
 		int cpu_id = smp_processor_id();
 
 		/* It is possible that the interrupt affinity has changed but,
@@ -2393,6 +2889,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
 			/* Return budget-1 so that polling stops */
 			return budget - 1;
 		}
+#endif /* HAVE_IRQ_AFFINITY_NOTIFY */
 tx_only:
 		if (arm_wb) {
 			q_vector->tx.ring[0].tx_stats.tx_force_wb++;
@@ -2401,7 +2898,7 @@ tx_only:
 		return budget;
 	}
 
-	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+	if (flags & I40E_TXR_FLAGS_WB_ON_ITR)
 		q_vector->arm_wb_state = false;
 
 	/* Work is done so exit the polling mode and re-enable the interrupt */
@@ -2438,7 +2935,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
 	if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
 		return;
 
-	if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)
+	if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
 		return;
 
 	/* if sampling is disabled do nothing */
@@ -2450,8 +2947,12 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
 		return;
 
 	/* snag network header to get L4 type and address */
-	hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
+#ifdef HAVE_SKB_INNER_NETWORK_HEADER
+	hdr.network = (tx_flags & I40E_TX_FLAGS_TUNNEL) ?
 		      skb_inner_network_header(skb) : skb_network_header(skb);
+#else
+	hdr.network = skb_network_header(skb);
+#endif /* HAVE_SKB_INNER_NETWORK_HEADER */
 
 	/* Note: tx_flags gets modified to reflect inner protocols in
 	 * tx_enable_csum function if encap is enabled.
@@ -2466,8 +2967,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
 		unsigned int h_offset = inner_hlen;
 
 		/* this function updates h_offset to the end of the header */
-		l4_proto =
-		  ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
+		l4_proto = ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
 		/* hlen will contain our best estimate of the tcp header */
 		hlen = h_offset - inner_hlen;
 	}
@@ -2478,8 +2978,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
 	th = (struct tcphdr *)(hdr.network + hlen);
 
 	/* Due to lack of space, no more new filters can be programmed */
-	if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
+	if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
 		return;
+
 	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
 		/* HW ATR eviction will take care of removing filters on FIN
 		 * and RST packets.
@@ -2531,7 +3032,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
 		     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
 
 	dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
-	if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
+	if (!(tx_flags & I40E_TX_FLAGS_TUNNEL))
 		dtype_cmd |=
 			((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
@@ -2570,8 +3071,13 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
 	__be16 protocol = skb->protocol;
 	u32  tx_flags = 0;
 
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
 	if (protocol == htons(ETH_P_8021Q) &&
 	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
+#else
+	if (protocol == htons(ETH_P_8021Q) &&
+	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_TX)) {
+#endif
 		/* When HW VLAN acceleration is turned off by the user the
 		 * stack sets the protocol to 8021q so that the driver
 		 * can take any steps required to support the SW only
@@ -2629,6 +3135,14 @@ out:
 	return 0;
 }
 
+#ifndef HAVE_ENCAP_TSO_OFFLOAD
+#define inner_ip_hdr(skb) 0
+#define inner_tcp_hdr(skb) 0
+#define inner_ipv6_hdr(skb) 0
+#define inner_tcp_hdrlen(skb) 0
+#define inner_tcp_hdrlen(skb) 0
+#define skb_inner_transport_header(skb) ((skb)->data)
+#endif /* HAVE_ENCAP_TSO_OFFLOAD */
 /**
  * i40e_tso - set up the tso context descriptor
  * @first:    pointer to first Tx buffer for xmit
@@ -2677,14 +3191,32 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
 		ip.v6->payload_len = 0;
 	}
 
+#ifdef HAVE_ENCAP_TSO_OFFLOAD
 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+#ifdef NETIF_F_GSO_PARTIAL
 					 SKB_GSO_GRE_CSUM |
+#endif
+#ifdef NETIF_F_GSO_IPXIP4
 					 SKB_GSO_IPXIP4 |
+#ifdef NETIF_F_GSO_IPXIP6
 					 SKB_GSO_IPXIP6 |
+#endif
+#else
+#ifdef NETIF_F_GSO_IPIP
+					 SKB_GSO_IPIP |
+#endif
+#ifdef NETIF_F_GSO_SIT
+					 SKB_GSO_SIT |
+#endif
+#endif
 					 SKB_GSO_UDP_TUNNEL |
 					 SKB_GSO_UDP_TUNNEL_CSUM)) {
+#ifndef NETIF_F_GSO_PARTIAL
+		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+#else
 		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+#endif
 			l4.udp->len = 0;
 
 			/* determine offset of outer transport header */
@@ -2709,6 +3241,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
 		}
 	}
 
+#endif /* HAVE_ENCAP_TSO_OFFLOAD */
 	/* determine offset of inner transport header */
 	l4_offset = l4.hdr - skb->data;
 
@@ -2723,7 +3256,14 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
 	gso_size = skb_shinfo(skb)->gso_size;
 	gso_segs = skb_shinfo(skb)->gso_segs;
 
-	/* update GSO size and bytecount with header size */
+#ifndef HAVE_NDO_FEATURES_CHECK
+	/* too small a TSO segment size causes problems */
+	if (gso_size < 64) {
+		gso_size = 64;
+		gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, 64);
+	}
+#endif
+	/* update gso size and bytecount with header size */
 	first->gso_segs = gso_segs;
 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
 
@@ -2737,6 +3277,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
 	return 1;
 }
 
+#ifdef HAVE_PTP_1588_CLOCK
 /**
  * i40e_tsyn - set up the tsyn context descriptor
  * @tx_ring:  ptr to the ring to send
@@ -2751,7 +3292,11 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
 {
 	struct i40e_pf *pf;
 
+#ifdef SKB_SHARED_TX_IS_UNION
+	if (likely(!(skb_tx(skb)->hardware)))
+#else
 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+#endif
 		return 0;
 
 	/* Tx timestamps cannot be sampled when doing TSO */
@@ -2767,7 +3312,11 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
 
 	if (pf->ptp_tx &&
 	    !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
+#ifdef SKB_SHARED_TX_IS_UNION
+		skb_tx(skb)->in_progress = 1;
+#else
 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+#endif
 		pf->ptp_tx_start = jiffies;
 		pf->ptp_tx_skb = skb_get(skb);
 	} else {
@@ -2781,6 +3330,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
 	return 1;
 }
 
+#endif /* HAVE_PTP_1588_CLOCK */
 /**
  * i40e_tx_enable_csum - Enable Tx checksum offloads
  * @skb: send buffer
@@ -2819,6 +3369,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
 	/* compute outer L2 header size */
 	offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
 
+#ifdef HAVE_ENCAP_CSUM_OFFLOAD
 	if (skb->encapsulation) {
 		u32 tunnel = 0;
 		/* define outer network header type */
@@ -2842,17 +3393,29 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
 		switch (l4_proto) {
 		case IPPROTO_UDP:
 			tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
-			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
+			*tx_flags |= I40E_TX_FLAGS_TUNNEL;
 			break;
+#ifdef HAVE_GRE_ENCAP_OFFLOAD
 		case IPPROTO_GRE:
 			tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
-			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
+			*tx_flags |= I40E_TX_FLAGS_TUNNEL;
+			/* There was a long-standing issue in GRE where GSO
+			 * was not setting the outer transport header unless
+			 * a GRE checksum was requested. This was fixed in
+			 * the 4.6 version of the kernel.  In the 4.7 kernel
+			 * support for GRE over IPv6 was added to GSO.  So we
+			 * can assume this workaround for all IPv4 headers
+			 * without impacting later versions of the GRE.
+			 */
+			if (ip.v4->version == 4)
+				l4.hdr = ip.hdr + (ip.v4->ihl * 4);
 			break;
 		case IPPROTO_IPIP:
 		case IPPROTO_IPV6:
-			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
+			*tx_flags |= I40E_TX_FLAGS_TUNNEL;
 			l4.hdr = skb_inner_network_header(skb);
 			break;
+#endif
 		default:
 			if (*tx_flags & I40E_TX_FLAGS_TSO)
 				return -1;
@@ -2861,6 +3424,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
 			return 0;
 		}
 
+#ifdef I40E_ADD_PROBES
+		if (*tx_flags & I40E_TX_FLAGS_IPV4)
+			if (*tx_flags & I40E_TX_FLAGS_TSO)
+				tx_ring->vsi->back->tx_ip4_cso++;
+#endif
 		/* compute outer L3 header size */
 		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
 			  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
@@ -2874,7 +3442,9 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
 
 		/* indicate if we need to offload outer UDP header */
 		if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+#ifdef NETIF_F_GSO_PARTIAL
 		    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+#endif
 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
 			tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
 
@@ -2892,16 +3462,22 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
 		if (ip.v6->version == 6)
 			*tx_flags |= I40E_TX_FLAGS_IPV6;
 	}
+#endif /* HAVE_ENCAP_CSUM_OFFLOAD */
 
 	/* Enable IP checksum offloads */
 	if (*tx_flags & I40E_TX_FLAGS_IPV4) {
 		l4_proto = ip.v4->protocol;
+#ifdef I40E_ADD_PROBES
+		if (*tx_flags & I40E_TX_FLAGS_TSO)
+			tx_ring->vsi->back->tx_ip4_cso++;
+#endif
 		/* the stack computes the IP header already, the only time we
 		 * need the hardware to recompute it is in the case of TSO.
 		 */
 		cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
 		       I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
 		       I40E_TX_DESC_CMD_IIPT_IPV4;
+#ifdef NETIF_F_IPV6_CSUM
 	} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
 		cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
 
@@ -2910,6 +3486,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
 		if (l4.hdr != exthdr)
 			ipv6_skip_exthdr(skb, exthdr - skb->data,
 					 &l4_proto, &frag_off);
+#endif
 	}
 
 	/* compute inner L3 header size */
@@ -2921,18 +3498,29 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
 		/* enable checksum offloads */
 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
 		offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+#ifdef I40E_ADD_PROBES
+		tx_ring->vsi->back->tx_tcp_cso++;
+#endif
 		break;
 	case IPPROTO_SCTP:
 		/* enable SCTP checksum offload */
+#ifdef HAVE_SCTP
 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
 		offset |= (sizeof(struct sctphdr) >> 2) <<
 			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+#ifdef I40E_ADD_PROBES
+			tx_ring->vsi->back->tx_sctp_cso++;
+#endif
+#endif /* HAVE_SCTP */
 		break;
 	case IPPROTO_UDP:
 		/* enable UDP checksum offload */
 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
 		offset |= (sizeof(struct udphdr) >> 2) <<
 			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+#ifdef I40E_ADD_PROBES
+			tx_ring->vsi->back->tx_udp_cso++;
+#endif
 		break;
 	default:
 		if (*tx_flags & I40E_TX_FLAGS_TSO)
@@ -3016,7 +3604,7 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  **/
 bool __i40e_chk_linearize(struct sk_buff *skb)
 {
-	const struct skb_frag_struct *frag, *stale;
+	const skb_frag_t *frag, *stale;
 	int nr_frags, sum;
 
 	/* no need to check if number of frags is less than 7 */
@@ -3048,10 +3636,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
 	/* Walk through fragments adding latest fragment, testing it, and
 	 * then removing stale fragments from the sum.
 	 */
-	stale = &skb_shinfo(skb)->frags[0];
-	for (;;) {
+	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+		int stale_size = skb_frag_size(stale);
+
 		sum += skb_frag_size(frag++);
 
+		/* The stale fragment may present us with a smaller
+		 * descriptor than the actual fragment size. To account
+		 * for that we need to remove all the data on the front and
+		 * figure out what the remainder would be in the last
+		 * descriptor associated with the fragment.
+		 */
+		if (stale_size > I40E_MAX_DATA_PER_TXD) {
+			int align_pad = -(skb_frag_off(stale)) &
+					(I40E_MAX_READ_REQ_SIZE - 1);
+
+			sum -= align_pad;
+			stale_size -= align_pad;
+
+			do {
+				sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+				stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+			} while (stale_size > I40E_MAX_DATA_PER_TXD);
+		}
+
 		/* if sum is negative we failed to make sufficient progress */
 		if (sum < 0)
 			return true;
@@ -3059,7 +3667,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
 		if (!nr_frags--)
 			break;
 
-		sum -= skb_frag_size(stale++);
+		sum -= stale_size;
 	}
 
 	return false;
@@ -3075,7 +3683,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
  * @td_cmd:   the command field in the descriptor
  * @td_offset: offset for checksum or crc
  *
- * Returns 0 on success, -1 on failure to DMA
+ * Returns 0 on success, negative error code on DMA failure.
  **/
 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 			      struct i40e_tx_buffer *first, u32 tx_flags,
@@ -3083,7 +3691,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 {
 	unsigned int data_len = skb->data_len;
 	unsigned int size = skb_headlen(skb);
-	struct skb_frag_struct *frag;
+	skb_frag_t *frag;
 	struct i40e_tx_buffer *tx_bi;
 	struct i40e_tx_desc *tx_desc;
 	u16 i = tx_ring->next_to_use;
@@ -3097,6 +3705,11 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 			 I40E_TX_FLAGS_VLAN_SHIFT;
 	}
 
+#ifdef I40E_ADD_PROBES
+	if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+		tx_ring->vsi->back->tcp_segs += first->gso_segs;
+
+#endif
 	first->tx_flags = tx_flags;
 
 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
@@ -3176,38 +3789,12 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 	/* write last descriptor with EOP bit */
 	td_cmd |= I40E_TX_DESC_CMD_EOP;
 
-	/* We can OR these values together as they both are checked against
-	 * 4 below and at this point desc_count will be used as a boolean value
-	 * after this if/else block.
+	/* We OR these values together to check both against 4 (WB_STRIDE)
+	 * below. This is safe since we don't re-use desc_count afterwards.
 	 */
 	desc_count |= ++tx_ring->packet_stride;
 
-	/* Algorithm to optimize tail and RS bit setting:
-	 * if queue is stopped
-	 *	mark RS bit
-	 *	reset packet counter
-	 * else if xmit_more is supported and is true
-	 *	advance packet counter to 4
-	 *	reset desc_count to 0
-	 *
-	 * if desc_count >= 4
-	 *	mark RS bit
-	 *	reset packet counter
-	 * if desc_count > 0
-	 *	update tail
-	 *
-	 * Note: If there are less than 4 descriptors
-	 * pending and interrupts were disabled the service task will
-	 * trigger a force WB.
-	 */
-	if (netif_xmit_stopped(txring_txq(tx_ring))) {
-		goto do_rs;
-	} else if (skb->xmit_more) {
-		/* set stride to arm on next packet and reset desc_count */
-		tx_ring->packet_stride = WB_STRIDE;
-		desc_count = 0;
-	} else if (desc_count >= WB_STRIDE) {
-do_rs:
+	if (desc_count >= WB_STRIDE) {
 		/* write last descriptor with RS bit set */
 		td_cmd |= I40E_TX_DESC_CMD_RS;
 		tx_ring->packet_stride = 0;
@@ -3216,6 +3803,11 @@ do_rs:
 	tx_desc->cmd_type_offset_bsz =
 			build_ctob(td_cmd, td_offset, size, td_tag);
 
+	/* timestamp the skb as late as possible, just prior to notifying
+	 * the MAC that it should transmit this packet
+	 */
+	skb_tx_timestamp(skb);
+
 	/* Force memory writes to complete before letting h/w know there
 	 * are new descriptors to fetch.
 	 *
@@ -3228,14 +3820,37 @@ do_rs:
 	first->next_to_watch = tx_desc;
 
 	/* notify HW of packet */
-	if (desc_count) {
+#ifdef HAVE_SKB_XMIT_MORE
+	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
 		writel(i, tx_ring->tail);
 
-		/* we need this if more than one processor can write to our tail
-		 * at a time, it synchronizes IO on IA64/Altix systems
+#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB
+		/* We need this mmiowb on IA64/Altix systems where wmb() isn't
+		 * guaranteed to synchronize I/O.
+		 *
+		 * Note that mmiowb() only provides a guarantee about ordering
+		 * when in conjunction with a spin_unlock(). This barrier is
+		 * used to guarantee the I/O ordering with respect to a spin
+		 * lock in the networking core code.
 		 */
 		mmiowb();
+#endif
 	}
+#else
+	writel(i, tx_ring->tail);
+
+#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB
+	/* We need this mmiowb on IA64/Altix systems where wmb() isn't
+	 * guaranteed to synchronize I/O.
+	 *
+	 * Note that mmiowb() only provides a guarantee about ordering when in
+	 * conjunction with a spin_unlock(). This barrier is used to guarantee
+	 * the I/O ordering with respect to a spin lock in the networking core
+	 * code.
+	 */
+	mmiowb();
+#endif
+#endif /* HAVE_XMIT_MORE */
 
 	return 0;
 
@@ -3255,36 +3870,63 @@ dma_error:
 
 	tx_ring->next_to_use = i;
 
-	return -1;
+	return -EIO;
+}
+
+#if !defined(HAVE_NET_DEVICE_OPS) && defined(HAVE_NETDEV_SELECT_QUEUE)
+/**
+ * i40e_lan_select_queue - Select the right Tx queue for the skb for LAN VSI
+ * @netdev: network interface device structure
+ * @skb:    send buffer
+ *
+ * Returns the index of the selected Tx queue
+ **/
+u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb)
+{
+	return skb_tx_hash(netdev, skb);
 }
 
+#endif /* !HAVE_NET_DEVICE_OPS && HAVE_NETDEV_SELECT_QUEUE */
+
+#ifdef HAVE_XDP_SUPPORT
 /**
  * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
- * @xdp: data to transmit
+ * @xdp: frame data to transmit
  * @xdp_ring: XDP Tx ring
  **/
+#ifdef HAVE_XDP_FRAME_STRUCT
+static int i40e_xmit_xdp_ring(struct xdp_frame *xdp,
+			      struct i40e_ring *xdp_ring)
+#else
 static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
 			      struct i40e_ring *xdp_ring)
+#endif
 {
-	u32 size = xdp->data_end - xdp->data;
 	u16 i = xdp_ring->next_to_use;
 	struct i40e_tx_buffer *tx_bi;
 	struct i40e_tx_desc *tx_desc;
 	dma_addr_t dma;
+	void *data;
+	u32 size;
+
+	size = xdp_get_len(xdp);
+	data = xdp->data;
 
 	if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
 		xdp_ring->tx_stats.tx_busy++;
 		return I40E_XDP_CONSUMED;
 	}
-
-	dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE);
+	dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
 	if (dma_mapping_error(xdp_ring->dev, dma))
 		return I40E_XDP_CONSUMED;
-
 	tx_bi = &xdp_ring->tx_bi[i];
 	tx_bi->bytecount = size;
 	tx_bi->gso_segs = 1;
-	tx_bi->raw_buf = xdp->data;
+#ifdef HAVE_XDP_FRAME_STRUCT
+	tx_bi->xdpf = xdp;
+#else
+	tx_bi->raw_buf = data;
+#endif
 
 	/* record length, and DMA address */
 	dma_unmap_len_set(tx_bi, len, size);
@@ -3304,12 +3946,11 @@ static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
 	i++;
 	if (i == xdp_ring->count)
 		i = 0;
-
 	tx_bi->next_to_watch = tx_desc;
 	xdp_ring->next_to_use = i;
-
 	return I40E_XDP_TX;
 }
+#endif
 
 /**
  * i40e_xmit_frame_ring - Sends buffer on Tx ring
@@ -3330,7 +3971,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
 	u32 td_cmd = 0;
 	u8 hdr_len = 0;
 	int tso, count;
+#ifdef HAVE_PTP_1588_CLOCK
 	int tsyn;
+#endif /* HAVE_PTP_1588_CLOCK */
 
 	/* prefetch the data, we'll need it later */
 	prefetch(skb->data);
@@ -3390,12 +4033,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
 	if (tso < 0)
 		goto out_drop;
 
+#ifdef HAVE_PTP_1588_CLOCK
 	tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
 
 	if (tsyn)
 		tx_flags |= I40E_TX_FLAGS_TSYN;
 
-	skb_tx_timestamp(skb);
+#endif /* HAVE_PTP_1588_CLOCK */
 
 	/* always enable CRC insertion offload */
 	td_cmd |= I40E_TX_DESC_CMD_ICRC;
@@ -3409,16 +4053,25 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
 	 */
 	i40e_atr(tx_ring, skb, tx_flags);
 
+#ifdef HAVE_PTP_1588_CLOCK
 	if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
 			td_cmd, td_offset))
 		goto cleanup_tx_tstamp;
+#else
+	i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+		    td_cmd, td_offset);
+#endif
 
+#ifndef HAVE_TRANS_START_IN_QUEUE
+	tx_ring->netdev->trans_start = jiffies;
+#endif
 	return NETDEV_TX_OK;
 
 out_drop:
 	i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
 	dev_kfree_skb_any(first->skb);
 	first->skb = NULL;
+#ifdef HAVE_PTP_1588_CLOCK
 cleanup_tx_tstamp:
 	if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
 		struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
@@ -3427,7 +4080,7 @@ cleanup_tx_tstamp:
 		pf->ptp_tx_skb = NULL;
 		clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
 	}
-
+#endif
 	return NETDEV_TX_OK;
 }
 
@@ -3452,3 +4105,90 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
 	return i40e_xmit_frame_ring(skb, tx_ring);
 }
+
+#ifdef HAVE_XDP_SUPPORT
+/**
+ * i40e_xdp_xmit - Implements ndo_xdp_xmit
+ * @dev: netdev
+ * @n: amount of frames
+ * @frames: XDP frames
+ * @flags: XDP xmit flags
+ *
+ * Returns number of frames successfully sent. Frames that fail are
+ * free'ed via XDP return API.
+ *
+ * For error cases, a negative errno code is returned and no-frames
+ * are transmitted (caller must handle freeing frames).
+ **/
+#ifdef HAVE_XDP_FRAME_STRUCT
+int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+		  u32 flags)
+#else
+int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
+#endif
+{
+	struct i40e_netdev_priv *np = netdev_priv(dev);
+	unsigned int queue_index = smp_processor_id();
+	struct i40e_vsi *vsi = np->vsi;
+#ifdef HAVE_XDP_FRAME_STRUCT
+	struct i40e_ring *xdp_ring;
+	int drops = 0;
+	int i;
+#endif
+	int err;
+
+	if (test_bit(__I40E_VSI_DOWN, vsi->state))
+		return -ENETDOWN;
+
+	if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
+		return -ENXIO;
+#ifdef HAVE_XDP_FRAME_STRUCT
+	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+		return -EINVAL;
+
+	xdp_ring = vsi->xdp_rings[queue_index];
+
+	for (i = 0; i < n; i++) {
+		struct xdp_frame *xdpf = frames[i];
+
+		err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
+		if (err != I40E_XDP_TX) {
+			xdp_return_frame_rx_napi(xdpf);
+			drops++;
+		}
+	}
+
+	if (unlikely(flags & XDP_XMIT_FLUSH))
+		i40e_xdp_ring_update_tail(xdp_ring);
+
+	return n - drops;
+#else
+	err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]);
+
+	if (err != I40E_XDP_TX)
+		return -ENOSPC;
+
+	return 0;
+#endif
+}
+
+/**
+ * i40e_xdp_flush - Implements ndo_xdp_flush
+ * @dev: netdev
+ **/
+void i40e_xdp_flush(struct net_device *dev)
+{
+	struct i40e_netdev_priv *np = netdev_priv(dev);
+	unsigned int queue_index = smp_processor_id();
+	struct i40e_vsi *vsi = np->vsi;
+
+	if (test_bit(__I40E_VSI_DOWN, vsi->state))
+		return;
+
+	if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
+		return;
+
+	i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]);
+}
+#endif
+
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 2f848bc5e..da598721f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -1,57 +1,41 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifndef _I40E_TXRX_H_
 #define _I40E_TXRX_H_
 
 /* Interrupt Throttling and Rate Limiting Goodies */
-
-#define I40E_MAX_ITR               0x0FF0  /* reg uses 2 usec resolution */
-#define I40E_MIN_ITR               0x0001  /* reg uses 2 usec resolution */
-#define I40E_ITR_100K              0x0005
-#define I40E_ITR_50K               0x000A
-#define I40E_ITR_20K               0x0019
-#define I40E_ITR_18K               0x001B
-#define I40E_ITR_8K                0x003E
-#define I40E_ITR_4K                0x007A
-#define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
-#define I40E_ITR_RX_DEF            I40E_ITR_20K
-#define I40E_ITR_TX_DEF            I40E_ITR_20K
-#define I40E_ITR_DYNAMIC           0x8000  /* use top bit as a flag */
-#define I40E_MIN_INT_RATE          250     /* ~= 1000000 / (I40E_MAX_ITR * 2) */
-#define I40E_MAX_INT_RATE          500000  /* == 1000000 / (I40E_MIN_ITR * 2) */
 #define I40E_DEFAULT_IRQ_WORK      256
-#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
-#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
-#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+
+/* The datasheet for the X710 and XL710 indicate that the maximum value for
+ * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
+ * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
+ * the register value which is divided by 2 lets use the actual values and
+ * avoid an excessive amount of translation.
+ */
+#define I40E_ITR_DYNAMIC	0x8000	/* use top bit as a flag */
+#define I40E_ITR_MASK		0x1FFE	/* mask for ITR register value */
+#define I40E_MIN_ITR		     2	/* reg uses 2 usec resolution */
+#define I40E_ITR_100K		    10  /* all values below must be even */
+#define I40E_ITR_50K		    20
+#define I40E_ITR_20K		    50
+#define I40E_ITR_18K		    60
+#define I40E_ITR_8K		   122
+#define I40E_MAX_ITR		  8160	/* maximum value as per datasheet */
+#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
+#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
+#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
+
+#define I40E_ITR_RX_DEF            (I40E_ITR_20K | I40E_ITR_DYNAMIC)
+#define I40E_ITR_TX_DEF            (I40E_ITR_20K | I40E_ITR_DYNAMIC)
+
 /* 0x40 is the enable bit for interrupt rate limiting, and must be set if
  * the value of the rate limit is non-zero
  */
 #define INTRL_ENA                  BIT(6)
+#define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
+
 /**
  * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
  * @intrl: interrupt rate limit to convert
@@ -131,10 +115,18 @@ enum i40e_dyn_idx_t {
  */
 #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
 #define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+#ifdef I40E_32BYTE_RX
 #define i40e_rx_desc i40e_32byte_rx_desc
+#else
+#define i40e_rx_desc i40e_16byte_rx_desc
+#endif
 
+#ifdef HAVE_STRUCT_DMA_ATTRS
+#define I40E_RX_DMA_ATTR NULL
+#else
 #define I40E_RX_DMA_ATTR \
 	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+#endif
 
 /* Attempt to maximize the headroom available for incoming frames.  We
  * use a 2K buffer for receives and need 1536/1534 to store the data for
@@ -206,7 +198,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
 }
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define I40E_RX_BUFFER_WRITE	16	/* Must be power of 2 */
+#define I40E_RX_BUFFER_WRITE	32	/* Must be power of 2 */
 #define I40E_RX_INCREMENT(r, i) \
 	do {					\
 		(i)++;				\
@@ -275,7 +267,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
 }
 
 /* Tx Descriptors needed, worst case */
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
 #define I40E_MIN_DESC_PENDING	4
 
 #define I40E_TX_FLAGS_HW_VLAN		BIT(1)
@@ -285,9 +277,11 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
 #define I40E_TX_FLAGS_IPV6		BIT(5)
 #define I40E_TX_FLAGS_FCCRC		BIT(6)
 #define I40E_TX_FLAGS_FSO		BIT(7)
+#ifdef HAVE_PTP_1588_CLOCK
 #define I40E_TX_FLAGS_TSYN		BIT(8)
+#endif /* HAVE_PTP_1588_CLOCK */
 #define I40E_TX_FLAGS_FD_SB		BIT(9)
-#define I40E_TX_FLAGS_UDP_TUNNEL	BIT(10)
+#define I40E_TX_FLAGS_TUNNEL		BIT(10)
 #define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
@@ -296,6 +290,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
 struct i40e_tx_buffer {
 	struct i40e_tx_desc *next_to_watch;
 	union {
+		struct xdp_frame *xdpf;
 		struct sk_buff *skb;
 		void *raw_buf;
 	};
@@ -309,6 +304,9 @@ struct i40e_tx_buffer {
 
 struct i40e_rx_buffer {
 	dma_addr_t dma;
+#ifdef CONFIG_I40E_DISABLE_PACKET_SPLIT
+	struct sk_buff *skb;
+#else
 	struct page *page;
 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
 	__u32 page_offset;
@@ -316,6 +314,7 @@ struct i40e_rx_buffer {
 	__u16 page_offset;
 #endif
 	__u16 pagecnt_bias;
+#endif /* CONFIG_I40E_DISABLE_PACKET_SPLIT */
 };
 
 struct i40e_queue_stats {
@@ -323,12 +322,24 @@ struct i40e_queue_stats {
 	u64 bytes;
 };
 
+#ifdef HAVE_XDP_SUPPORT
+struct i40e_xdp_stats {
+	u64 xdp_pass;
+	u64 xdp_drop;
+	u64 xdp_tx;
+	u64 xdp_unknown;
+	u64 xdp_redirect;
+	u64 xdp_redirect_fail;
+};
+#endif
+
 struct i40e_tx_queue_stats {
 	u64 restart_queue;
 	u64 tx_busy;
 	u64 tx_done_old;
 	u64 tx_linearize;
 	u64 tx_force_wb;
+	int prev_pkt_ctr;
 };
 
 struct i40e_rx_queue_stats {
@@ -342,6 +353,7 @@ struct i40e_rx_queue_stats {
 enum i40e_ring_state_t {
 	__I40E_TX_FDIR_INIT_DONE,
 	__I40E_TX_XPS_INIT_DONE,
+	__I40E_RING_STATE_NBITS /* must be last */
 };
 
 /* some useful defines for virtchannel interface, which
@@ -366,7 +378,7 @@ struct i40e_ring {
 		struct i40e_tx_buffer *tx_bi;
 		struct i40e_rx_buffer *rx_bi;
 	};
-	unsigned long state;
+	DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
 	u16 queue_index;		/* Queue number of ring */
 	u8 dcb_tc;			/* Traffic class of ring */
 	u8 __iomem *tail;
@@ -376,8 +388,7 @@ struct i40e_ring {
 	 * these values always store the USER setting, and must be converted
 	 * before programming to a register.
 	 */
-	u16 rx_itr_setting;
-	u16 tx_itr_setting;
+	u16 itr_setting;
 
 	u16 count;			/* Number of descriptors */
 	u16 reg_idx;			/* HW register index of the ring */
@@ -401,7 +412,12 @@ struct i40e_ring {
 
 	/* stats structs */
 	struct i40e_queue_stats	stats;
+#ifdef HAVE_NDO_GET_STATS64
 	struct u64_stats_sync syncp;
+#endif
+#ifdef HAVE_XDP_SUPPORT
+	struct i40e_xdp_stats xdp_stats;
+#endif
 	union {
 		struct i40e_tx_queue_stats tx_stats;
 		struct i40e_rx_queue_stats rx_stats;
@@ -423,6 +439,11 @@ struct i40e_ring {
 					 * i40e_clean_rx_ring_irq() is called
 					 * for this ring.
 					 */
+
+	struct i40e_channel *ch;
+#ifdef HAVE_XDP_BUFF_RXQ
+	struct xdp_rxq_info xdp_rxq;
+#endif
 } ____cacheline_internodealigned_in_smp;
 
 static inline bool ring_uses_build_skb(struct i40e_ring *ring)
@@ -440,6 +461,13 @@ static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
 	ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
 }
 
+#define I40E_ITR_ADAPTIVE_MIN_INC       0x0002
+#define I40E_ITR_ADAPTIVE_MIN_USECS     0x0002
+#define I40E_ITR_ADAPTIVE_MAX_USECS     0x007e
+#define I40E_ITR_ADAPTIVE_LATENCY       0x8000
+#define I40E_ITR_ADAPTIVE_BULK          0x0000
+#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY))
+
 static inline bool ring_is_xdp(struct i40e_ring *ring)
 {
 	return !!(ring->flags & I40E_TXR_FLAGS_XDP);
@@ -450,21 +478,14 @@ static inline void set_ring_xdp(struct i40e_ring *ring)
 	ring->flags |= I40E_TXR_FLAGS_XDP;
 }
 
-enum i40e_latency_range {
-	I40E_LOWEST_LATENCY = 0,
-	I40E_LOW_LATENCY = 1,
-	I40E_BULK_LATENCY = 2,
-};
-
 struct i40e_ring_container {
-	/* array of pointers to rings */
-	struct i40e_ring *ring;
+	struct i40e_ring *ring;		/* pointer to linked list of ring(s) */
+	unsigned long next_update;	/* jiffies value of next update */
 	unsigned int total_bytes;	/* total bytes processed this int */
 	unsigned int total_packets;	/* total packets processed this int */
-	unsigned long last_itr_update;	/* jiffies of last ITR update */
 	u16 count;
-	enum i40e_latency_range latency_range;
-	u16 itr;
+	u16 target_itr;			/* target ITR setting for ring(s) */
+	u16 current_itr;		/* current ITR setting for ring(s) */
 };
 
 /* iterator for handling rings in ring container */
@@ -484,6 +505,10 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
 
 bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+#if !defined(HAVE_NET_DEVICE_OPS) && defined(HAVE_NETDEV_SELECT_QUEUE)
+extern u16 i40e_lan_select_queue(struct net_device *netdev,
+				 struct sk_buff *skb);
+#endif
 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
 void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
@@ -492,9 +517,31 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring);
 void i40e_free_rx_resources(struct i40e_ring *rx_ring);
 int i40e_napi_poll(struct napi_struct *napi, int budget);
 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
-u32 i40e_get_tx_pending(struct i40e_ring *ring);
+u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
+void i40e_detect_recover_hung(struct i40e_vsi *vsi);
 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
 bool __i40e_chk_linearize(struct sk_buff *skb);
+#ifdef HAVE_XDP_FRAME_STRUCT
+int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+		  u32 flags);
+#else
+int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp);
+#endif
+void i40e_xdp_flush(struct net_device *dev);
+
+#ifdef HAVE_XDP_SUPPORT
+#ifdef HAVE_XDP_FRAME_STRUCT
+static inline u32 xdp_get_len(struct xdp_frame *xdp)
+{
+	return xdp->len;
+}
+#else
+static inline u32 xdp_get_len(struct xdp_buff *xdp)
+{
+	return xdp->data_end - xdp->data;
+}
+#endif
+#endif
 
 /**
  * i40e_get_head - Retrieve head from head writeback
@@ -521,7 +568,7 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
  **/
 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
 {
-	const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
 	int count = 0, size = skb_headlen(skb);
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index fd4bbdd88..147d84ec7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifndef _I40E_TYPE_H_
 #define _I40E_TYPE_H_
@@ -36,16 +13,19 @@
 #include "i40e_devids.h"
 
 /* I40E_MASK is a macro used on 32 bit registers */
-#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
+#define I40E_MASK(mask, shift) (mask << shift)
 
 #define I40E_MAX_VSI_QP			16
-#define I40E_MAX_VF_VSI			3
+#define I40E_MAX_VF_VSI			4
 #define I40E_MAX_CHAINED_RX_BUFFERS	5
 #define I40E_MAX_PF_UDP_OFFLOAD_PORTS	16
 
 /* Max default timeout in ms, */
 #define I40E_MAX_NVM_TIMEOUT		18000
 
+/* Max timeout in ms for the phy to respond */
+#define I40E_MAX_PHY_TIMEOUT		500
+
 /* Switch from ms to the 1usec global time (this is the GTIME resolution) */
 #define I40E_MS_TO_GTIME(time)		((time) * 1000)
 
@@ -53,6 +33,9 @@
 struct i40e_hw;
 typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
 
+#ifndef ETH_ALEN
+#define ETH_ALEN	6
+#endif
 /* Data type manipulation macros. */
 
 #define I40E_DESC_UNUSED(R)	\
@@ -79,7 +62,9 @@ enum i40e_debug_mask {
 	I40E_DEBUG_DIAG			= 0x00000800,
 	I40E_DEBUG_FD			= 0x00001000,
 	I40E_DEBUG_PACKAGE		= 0x00002000,
+
 	I40E_DEBUG_IWARP		= 0x00F00000,
+
 	I40E_DEBUG_AQ_MESSAGE		= 0x01000000,
 	I40E_DEBUG_AQ_DESCRIPTOR	= 0x02000000,
 	I40E_DEBUG_AQ_DESC_BUFFER	= 0x04000000,
@@ -105,16 +90,16 @@ enum i40e_debug_mask {
 #define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK	I40E_MASK(1, \
 						  I40E_GLGEN_MSCA_OPCODE_SHIFT)
 #define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK	I40E_MASK(2, \
-						I40E_GLGEN_MSCA_OPCODE_SHIFT)
+						  I40E_GLGEN_MSCA_OPCODE_SHIFT)
 #define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK	I40E_MASK(3, \
-						I40E_GLGEN_MSCA_OPCODE_SHIFT)
+						  I40E_GLGEN_MSCA_OPCODE_SHIFT)
 
-#define I40E_PHY_COM_REG_PAGE                   0x1E
-#define I40E_PHY_LED_LINK_MODE_MASK             0xF0
-#define I40E_PHY_LED_MANUAL_ON                  0x100
-#define I40E_PHY_LED_PROV_REG_1                 0xC430
-#define I40E_PHY_LED_MODE_MASK                  0xFFFF
-#define I40E_PHY_LED_MODE_ORIG                  0x80000000
+#define I40E_PHY_COM_REG_PAGE			0x1E
+#define I40E_PHY_LED_LINK_MODE_MASK		0xF0
+#define I40E_PHY_LED_MANUAL_ON			0x100
+#define I40E_PHY_LED_PROV_REG_1			0xC430
+#define I40E_PHY_LED_MODE_MASK			0xFFFF
+#define I40E_PHY_LED_MODE_ORIG			0x80000000
 
 /* These are structs for managing the hardware information and the operations.
  * The structures of function pointers are filled out at init time when we
@@ -253,7 +238,8 @@ struct i40e_phy_info {
 #define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
 				BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
 #define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
-/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
+/*
+ * Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
  * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
  * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
  * a shift is needed to adjust for this with values larger than 31. The
@@ -268,7 +254,26 @@ struct i40e_phy_info {
 					     I40E_PHY_TYPE_OFFSET)
 #define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
 					     I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_AOC BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC + \
+					     I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
+					     I40E_PHY_TYPE_OFFSET)
+/* Offset for 2.5G/5G PHY Types value to bit number conversion */
+#define I40E_PHY_TYPE_OFFSET2 (-10)
+#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \
+					     I40E_PHY_TYPE_OFFSET2)
+#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \
+					     I40E_PHY_TYPE_OFFSET2)
 #define I40E_HW_CAP_MAX_GPIO			30
+enum i40e_acpi_programming_method {
+	I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
+	I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
+};
+
+#define I40E_WOL_SUPPORT_MASK			0x1
+#define I40E_ACPI_PROGRAMMING_METHOD_MASK	0x2
+#define I40E_PROXY_SUPPORT_MASK			0x4
+
 /* Capabilities of a PF or a VF or the whole device */
 struct i40e_hw_capabilities {
 	u32  switch_mode;
@@ -276,6 +281,16 @@ struct i40e_hw_capabilities {
 #define I40E_NVM_IMAGE_TYPE_CLOUD	0x2
 #define I40E_NVM_IMAGE_TYPE_UDP_CLOUD	0x3
 
+	/* Cloud filter modes:
+	 * Mode1: Filter on L4 port only
+	 * Mode2: Filter for non-tunneled traffic
+	 * Mode3: Filter for tunnel traffic
+	 */
+#define I40E_CLOUD_FILTER_MODE1	0x6
+#define I40E_CLOUD_FILTER_MODE2	0x7
+#define I40E_CLOUD_FILTER_MODE3	0x8
+#define I40E_SWITCH_MODE_MASK	0xF
+
 	u32  management_mode;
 	u32  mng_protocols_over_mctp;
 #define I40E_MNG_PROTOCOL_PLDM		0x2
@@ -336,6 +351,9 @@ struct i40e_hw_capabilities {
 	u32 enabled_tcmap;
 	u32 maxtc;
 	u64 wr_csr_prot;
+	bool apm_wol_support;
+	enum i40e_acpi_programming_method acpi_prog_method;
+	bool proxy_support;
 };
 
 struct i40e_mac_info {
@@ -385,6 +403,8 @@ enum i40e_nvmupd_cmd {
 	I40E_NVMUPD_STATUS,
 	I40E_NVMUPD_EXEC_AQ,
 	I40E_NVMUPD_GET_AQ_RESULT,
+	I40E_NVMUPD_GET_AQ_EVENT,
+	I40E_NVMUPD_FEATURES,
 };
 
 enum i40e_nvmupd_state {
@@ -404,18 +424,28 @@ enum i40e_nvmupd_state {
 
 #define I40E_NVM_MOD_PNT_MASK 0xFF
 
-#define I40E_NVM_TRANS_SHIFT	8
-#define I40E_NVM_TRANS_MASK	(0xf << I40E_NVM_TRANS_SHIFT)
-#define I40E_NVM_CON		0x0
-#define I40E_NVM_SNT		0x1
-#define I40E_NVM_LCB		0x2
-#define I40E_NVM_SA		(I40E_NVM_SNT | I40E_NVM_LCB)
-#define I40E_NVM_ERA		0x4
-#define I40E_NVM_CSUM		0x8
-#define I40E_NVM_EXEC		0xf
+#define I40E_NVM_TRANS_SHIFT			8
+#define I40E_NVM_TRANS_MASK			(0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SHIFT	12
+#define I40E_NVM_PRESERVATION_FLAGS_MASK \
+				(0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SELECTED	0x01
+#define I40E_NVM_PRESERVATION_FLAGS_ALL		0x02
+#define I40E_NVM_CON				0x0
+#define I40E_NVM_SNT				0x1
+#define I40E_NVM_LCB				0x2
+#define I40E_NVM_SA				(I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA				0x4
+#define I40E_NVM_CSUM				0x8
+#define I40E_NVM_AQE				0xe
+#define I40E_NVM_EXEC				0xf
+
+#define I40E_NVM_EXEC_GET_AQ_RESULT		0x0
+#define I40E_NVM_EXEC_FEATURES			0xe
+#define I40E_NVM_EXEC_STATUS			0xf
 
 #define I40E_NVM_ADAPT_SHIFT	16
-#define I40E_NVM_ADAPT_MASK	(0xffff << I40E_NVM_ADAPT_SHIFT)
+#define I40E_NVM_ADAPT_MASK	(0xffffULL << I40E_NVM_ADAPT_SHIFT)
 
 #define I40E_NVMUPD_MAX_DATA	4096
 #define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
@@ -428,6 +458,33 @@ struct i40e_nvm_access {
 	u8 data[1];
 };
 
+/* NVMUpdate features API */
+#define I40E_NVMUPD_FEATURES_API_VER_MAJOR		0
+#define I40E_NVMUPD_FEATURES_API_VER_MINOR		14
+#define I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN	12
+
+#define I40E_NVMUPD_FEATURE_FLAT_NVM_SUPPORT		BIT(0)
+
+struct i40e_nvmupd_features {
+	u8 major;
+	u8 minor;
+	u16 size;
+	u8 features[I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN];
+};
+
+/* (Q)SFP module access definitions */
+#define I40E_I2C_EEPROM_DEV_ADDR	0xA0
+#define I40E_I2C_EEPROM_DEV_ADDR2	0xA2
+#define I40E_MODULE_TYPE_ADDR		0x00
+#define I40E_MODULE_REVISION_ADDR	0x01
+#define I40E_MODULE_SFF_8472_COMP	0x5E
+#define I40E_MODULE_SFF_8472_SWAP	0x5C
+#define I40E_MODULE_SFF_ADDR_MODE	0x04
+#define I40E_MODULE_SFF_DIAG_CAPAB	0x40
+#define I40E_MODULE_TYPE_QSFP_PLUS	0x0D
+#define I40E_MODULE_TYPE_QSFP28		0x11
+#define I40E_MODULE_QSFP_MAX_LEN	640
+
 /* PCI bus types */
 enum i40e_bus_type {
 	i40e_bus_type_unknown = 0,
@@ -582,6 +639,7 @@ struct i40e_hw {
 	/* state of nvm update process */
 	enum i40e_nvmupd_state nvmupd_state;
 	struct i40e_aq_desc nvm_wb_desc;
+	struct i40e_aq_desc nvm_aq_event_desc;
 	struct i40e_virt_mem nvm_buff;
 	bool nvm_release_on_done;
 	u16 nvm_wait_opcode;
@@ -597,15 +655,34 @@ struct i40e_hw {
 	struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
 	struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
 
+	/* WoL and proxy support */
+	u16 num_wol_proxy_filters;
+	u16 wol_proxy_vsi_seid;
+
 #define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define I40E_HW_FLAG_802_1AD_CAPABLE        BIT_ULL(1)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE  BIT_ULL(2)
+#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
+#define I40E_HW_FLAG_FW_LLDP_STOPPABLE	    BIT_ULL(4)
+#define I40E_HW_FLAG_FW_LLDP_PERSISTENT     BIT_ULL(5)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6)
+#define I40E_HW_FLAG_DROP_MODE		    BIT_ULL(7)
 	u64 flags;
 
+	/* Used in set switch config AQ command */
+	u16 switch_tag;
+	u16 first_tag;
+	u16 second_tag;
+
+	/* NVMUpdate features */
+	struct i40e_nvmupd_features nvmupd_features;
+
 	/* debug mask */
 	u32 debug_mask;
 	char err_str[16];
 };
 
-static inline bool i40e_is_vf(struct i40e_hw *hw)
+static INLINE bool i40e_is_vf(struct i40e_hw *hw)
 {
 	return (hw->mac.type == I40E_MAC_VF ||
 		hw->mac.type == I40E_MAC_X722_VF);
@@ -705,32 +782,28 @@ enum i40e_rx_desc_status_bits {
 	I40E_RX_DESC_STATUS_CRCP_SHIFT		= 4,
 	I40E_RX_DESC_STATUS_TSYNINDX_SHIFT	= 5, /* 2 BITS */
 	I40E_RX_DESC_STATUS_TSYNVALID_SHIFT	= 7,
-	/* Note: Bit 8 is reserved in X710 and XL710 */
 	I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT	= 8,
+
 	I40E_RX_DESC_STATUS_UMBCAST_SHIFT	= 9, /* 2 BITS */
 	I40E_RX_DESC_STATUS_FLM_SHIFT		= 11,
 	I40E_RX_DESC_STATUS_FLTSTAT_SHIFT	= 12, /* 2 BITS */
 	I40E_RX_DESC_STATUS_LPBK_SHIFT		= 14,
 	I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT	= 15,
-	I40E_RX_DESC_STATUS_RESERVED_SHIFT	= 16, /* 2 BITS */
-	/* Note: For non-tunnel packets INT_UDP_0 is the right status for
-	 * UDP header
-	 */
+	I40E_RX_DESC_STATUS_RESERVED2_SHIFT	= 16, /* 2 BITS */
 	I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT	= 18,
 	I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
 };
 
 #define I40E_RXD_QW1_STATUS_SHIFT	0
-#define I40E_RXD_QW1_STATUS_MASK	((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
-					 << I40E_RXD_QW1_STATUS_SHIFT)
+#define I40E_RXD_QW1_STATUS_MASK	((BIT(I40E_RX_DESC_STATUS_LAST) - 1) << \
+					 I40E_RXD_QW1_STATUS_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
 #define I40E_RXD_QW1_STATUS_TSYNINDX_MASK	(0x3UL << \
 					     I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
-				    BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK   BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
 
 enum i40e_rx_desc_fltstat_values {
 	I40E_RX_DESC_FLTSTAT_NO_DATA	= 0,
@@ -793,7 +866,8 @@ enum i40e_rx_l2_ptype {
 	I40E_RX_PTYPE_GRENAT4_MAC_PAY3			= 58,
 	I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4	= 87,
 	I40E_RX_PTYPE_GRENAT6_MAC_PAY3			= 124,
-	I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4	= 153
+	I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4	= 153,
+	I40E_RX_PTYPE_PARSER_ABORTED			= 255
 };
 
 struct i40e_rx_ptype_decoded {
@@ -1044,8 +1118,7 @@ enum i40e_tx_ctx_desc_eipt_offload {
 #define I40E_TXD_CTX_GRE_TUNNELING	(0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
 #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT	11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
-				       BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK	BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
 
 #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST	I40E_TXD_CTX_QW0_EIP_NOINC_MASK
 
@@ -1151,10 +1224,6 @@ enum i40e_filter_program_desc_pcmd {
 					 I40E_TXD_FLTR_QW1_CMD_SHIFT)
 #define I40E_TXD_FLTR_QW1_ATR_MASK	BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
 
-#define I40E_TXD_FLTR_QW1_ATR_SHIFT	(0xEULL + \
-					 I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_ATR_MASK	BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
-
 #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK	(0x1FFUL << \
 					 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
@@ -1268,11 +1337,14 @@ struct i40e_hw_port_stats {
 	u32 rx_lpi_status;
 	u64 tx_lpi_count;		/* etlpic */
 	u64 rx_lpi_count;		/* erlpic */
+	u64 tx_lpi_duration;
+	u64 rx_lpi_duration;
 };
 
 /* Checksum and Shadow RAM pointers */
 #define I40E_SR_NVM_CONTROL_WORD		0x00
-#define I40E_SR_EMP_MODULE_PTR			0x0F
+#define I40E_EMP_MODULE_PTR			0x0F
+#define I40E_SR_EMP_MODULE_PTR			0x48
 #define I40E_SR_PBA_FLAGS			0x15
 #define I40E_SR_PBA_BLOCK_PTR			0x16
 #define I40E_SR_BOOT_CONFIG_PTR			0x17
@@ -1280,17 +1352,27 @@ struct i40e_hw_port_stats {
 #define I40E_SR_NVM_DEV_STARTER_VERSION		0x18
 #define I40E_SR_NVM_WAKE_ON_LAN			0x19
 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR	0x27
+#define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR	0x28
+#define I40E_SR_NVM_MAP_VERSION			0x29
+#define I40E_SR_NVM_IMAGE_VERSION		0x2A
+#define I40E_SR_NVM_STRUCTURE_VERSION		0x2B
 #define I40E_SR_NVM_EETRACK_LO			0x2D
 #define I40E_SR_NVM_EETRACK_HI			0x2E
 #define I40E_SR_VPD_PTR				0x2F
 #define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR		0x3E
 #define I40E_SR_SW_CHECKSUM_WORD		0x3F
+#define I40E_SR_EMP_SR_SETTINGS_PTR		0x48
 
 /* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
 #define I40E_SR_VPD_MODULE_MAX_SIZE		1024
 #define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE	1024
 #define I40E_SR_CONTROL_WORD_1_SHIFT		0x06
 #define I40E_SR_CONTROL_WORD_1_MASK	(0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID	BIT(5)
+#define I40E_SR_NVM_MAP_STRUCTURE_TYPE		BIT(12)
+#define I40E_PTR_TYPE				BIT(15)
+#define I40E_SR_OCP_CFG_WORD0			0x2B
+#define I40E_SR_OCP_ENABLED			BIT(15)
 
 /* Shadow RAM related */
 #define I40E_SR_SECTOR_SIZE_IN_WORDS	0x800
@@ -1405,7 +1487,8 @@ enum i40e_reset_type {
 };
 
 /* IEEE 802.1AB LLDP Agent Variables from NVM */
-#define I40E_NVM_LLDP_CFG_PTR		0xD
+#define I40E_NVM_LLDP_CFG_PTR   0x06
+#define I40E_SR_LLDP_CFG_PTR    0x31
 struct i40e_lldp_variables {
 	u16 length;
 	u16 adminstatus;
@@ -1465,19 +1548,19 @@ struct i40e_lldp_variables {
 #define I40E_FLEX_57_SHIFT		6
 #define I40E_FLEX_57_MASK		(0x1ULL << I40E_FLEX_57_SHIFT)
 
-/* Version format for PPP */
-struct i40e_ppp_version {
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
 	u8 major;
 	u8 minor;
 	u8 update;
 	u8 draft;
 };
 
-#define I40E_PPP_NAME_SIZE	32
+#define I40E_DDP_NAME_SIZE	32
 
 /* Package header */
 struct i40e_package_header {
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u32 segment_count;
 	u32 segment_offset[1];
 };
@@ -1489,16 +1572,18 @@ struct i40e_generic_seg_header {
 #define SEGMENT_TYPE_I40E	0x00000011
 #define SEGMENT_TYPE_X722	0x00000012
 	u32 type;
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u32 size;
-	char name[I40E_PPP_NAME_SIZE];
+	char name[I40E_DDP_NAME_SIZE];
 };
 
 struct i40e_metadata_segment {
 	struct i40e_generic_seg_header header;
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
+#define I40E_DDP_TRACKID_RDONLY		0
+#define I40E_DDP_TRACKID_INVALID	0xFFFFFFFF
 	u32 track_id;
-	char name[I40E_PPP_NAME_SIZE];
+	char name[I40E_DDP_NAME_SIZE];
 };
 
 struct i40e_device_id_entry {
@@ -1508,8 +1593,8 @@ struct i40e_device_id_entry {
 
 struct i40e_profile_segment {
 	struct i40e_generic_seg_header header;
-	struct i40e_ppp_version version;
-	char name[I40E_PPP_NAME_SIZE];
+	struct i40e_ddp_version version;
+	char name[I40E_DDP_NAME_SIZE];
 	u32 device_table_count;
 	struct i40e_device_id_entry device_table[1];
 };
@@ -1525,22 +1610,49 @@ struct i40e_profile_section_header {
 	struct {
 #define SECTION_TYPE_INFO	0x00000010
 #define SECTION_TYPE_MMIO	0x00000800
+#define SECTION_TYPE_RB_MMIO	0x00001800
 #define SECTION_TYPE_AQ		0x00000801
+#define SECTION_TYPE_RB_AQ	0x00001801
 #define SECTION_TYPE_NOTE	0x80000000
 #define SECTION_TYPE_NAME	0x80000001
+#define SECTION_TYPE_PROTO	0x80000002
+#define SECTION_TYPE_PCTYPE	0x80000003
+#define SECTION_TYPE_PTYPE	0x80000004
 		u32 type;
 		u32 offset;
 		u32 size;
 	} section;
 };
 
+struct i40e_profile_tlv_section_record {
+	u8 rtype;
+	u8 type;
+	u16 len;
+	u8 data[12];
+};
+
+/* Generic AQ section in proflie */
+struct i40e_profile_aq_section {
+	u16 opcode;
+	u16 flags;
+	u8  param[16];
+	u16 datalen;
+	u8  data[1];
+};
+
 struct i40e_profile_info {
 	u32 track_id;
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u8 op;
-#define I40E_PPP_ADD_TRACKID		0x01
-#define I40E_PPP_REMOVE_TRACKID	0x02
+#define I40E_DDP_ADD_TRACKID		0x01
+#define I40E_DDP_REMOVE_TRACKID	0x02
 	u8 reserved[7];
-	u8 name[I40E_PPP_NAME_SIZE];
+	u8 name[I40E_DDP_NAME_SIZE];
 };
+
+#define I40E_BCM_PHY_PCS_STATUS1_PAGE	0x3
+#define I40E_BCM_PHY_PCS_STATUS1_REG	0x0001
+#define I40E_BCM_PHY_PCS_STATUS1_RX_LPI	BIT(8)
+#define I40E_BCM_PHY_PCS_STATUS1_TX_LPI	BIT(9)
+
 #endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4d1e670f4..6b2966ffe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #include "i40e.h"
 
@@ -31,8 +8,8 @@
 /**
  * i40e_vc_vf_broadcast
  * @pf: pointer to the PF structure
- * @opcode: operation code
- * @retval: return value
+ * @v_opcode: operation code
+ * @v_retval: return value
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  *
@@ -62,6 +39,39 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
 	}
 }
 
+/**
+ * i40e_vc_link_speed2mbps - Convert AdminQ link_speed bit represented
+ * to integer value of Mbps
+ * @link_speed: the speed to convert
+ *
+ * Returns the speed as direct value of Mbps.
+ **/
+static INLINE u32
+i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
+{
+	switch (link_speed) {
+	case I40E_LINK_SPEED_100MB:
+		return SPEED_100;
+	case I40E_LINK_SPEED_1GB:
+		return SPEED_1000;
+	case I40E_LINK_SPEED_2_5GB:
+		return SPEED_2500;
+	case I40E_LINK_SPEED_5GB:
+		return SPEED_5000;
+	case I40E_LINK_SPEED_10GB:
+		return SPEED_10000;
+	case I40E_LINK_SPEED_20GB:
+		return SPEED_20000;
+	case I40E_LINK_SPEED_25GB:
+		return SPEED_25000;
+	case I40E_LINK_SPEED_40GB:
+		return SPEED_40000;
+	case I40E_LINK_SPEED_UNKNOWN:
+		return SPEED_UNKNOWN;
+	}
+	return SPEED_UNKNOWN;
+}
+
 /**
  * i40e_vc_notify_vf_link_state
  * @vf: pointer to the VF structure
@@ -78,18 +88,64 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
 
 	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
 	pfe.severity = PF_EVENT_SEVERITY_INFO;
-	if (vf->link_forced) {
+
+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
+	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+		/* Always report link is down if the VF queues aren't enabled */
+		if (!vf->queues_enabled) {
+			pfe.event_data.link_event_adv.link_status = false;
+			pfe.event_data.link_event_adv.link_speed = 0;
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+		} else if (vf->link_forced) {
+			pfe.event_data.link_event_adv.link_status = vf->link_up;
+			pfe.event_data.link_event_adv.link_speed = vf->link_up ?
+				i40e_vc_link_speed2mbps(ls->link_speed) : 0;
+#endif
+		} else {
+			pfe.event_data.link_event_adv.link_status =
+				ls->link_info & I40E_AQ_LINK_UP;
+			pfe.event_data.link_event_adv.link_speed =
+				i40e_vc_link_speed2mbps(ls->link_speed);
+		}
+	} else {
+		/* Always report link is down if the VF queues aren't enabled */
+		if (!vf->queues_enabled) {
+			pfe.event_data.link_event.link_status = false;
+			pfe.event_data.link_event.link_speed = 0;
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+		} else if (vf->link_forced) {
+			pfe.event_data.link_event.link_status = vf->link_up;
+			pfe.event_data.link_event.link_speed = (vf->link_up ?
+				i40e_virtchnl_link_speed(ls->link_speed) : 0);
+#endif
+		} else {
+			pfe.event_data.link_event.link_status =
+				ls->link_info & I40E_AQ_LINK_UP;
+			pfe.event_data.link_event.link_speed =
+				i40e_virtchnl_link_speed(ls->link_speed);
+		}
+	}
+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+	/* Always report link is down if the VF queues aren't enabled */
+	if (!vf->queues_enabled) {
+		pfe.event_data.link_event.link_status = false;
+		pfe.event_data.link_event.link_speed = 0;
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+	} else if (vf->link_forced) {
 		pfe.event_data.link_event.link_status = vf->link_up;
-		pfe.event_data.link_event.link_speed =
-			(vf->link_up ? I40E_LINK_SPEED_40GB : 0);
+		pfe.event_data.link_event.link_speed = (vf->link_up ?
+			i40e_virtchnl_link_speed(ls->link_speed) : 0);
+#endif
 	} else {
 		pfe.event_data.link_event.link_status =
 			ls->link_info & I40E_AQ_LINK_UP;
 		pfe.event_data.link_event.link_speed =
-			(enum virtchnl_link_speed)ls->link_speed;
+			i40e_virtchnl_link_speed(ls->link_speed);
 	}
+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+
 	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
-			       0, (u8 *)&pfe, sizeof(pfe), NULL);
+			       I40E_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL);
 }
 
 /**
@@ -118,7 +174,7 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
 
 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
-	i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
+	i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, I40E_SUCCESS,
 			     (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
 }
 
@@ -147,22 +203,51 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
 	i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
-			       0, (u8 *)&pfe,
+			       I40E_SUCCESS, (u8 *)&pfe,
 			       sizeof(struct virtchnl_pf_event), NULL);
 }
 /***********************misc routines*****************************/
 
 /**
- * i40e_vc_disable_vf
- * @pf: pointer to the PF info
+ * i40e_vc_reset_vf
  * @vf: pointer to the VF info
+ * @notify_vf: notify vf about reset or not
  *
- * Disable the VF through a SW reset
+ * Reset VF handler.
  **/
-static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
+static inline void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
 {
-	i40e_vc_notify_vf_reset(vf);
-	i40e_reset_vf(vf, false);
+	struct i40e_pf *pf = vf->pf;
+	int i;
+
+	if (notify_vf)
+		i40e_vc_notify_vf_reset(vf);
+
+	/* We want to ensure that an actual reset occurs initiated after this
+	 * function was called. However, we do not want to wait forever, so
+	 * we'll give a reasonable time and print a message if we failed to
+	 * ensure a reset.
+	 */
+	for (i = 0; i < 20; i++) {
+		/* If pf is in vfs releasing state reset vf is impossible,
+		 * so leave it.
+		 */
+		if (test_bit(__I40E_VFS_RELEASING, pf->state))
+			return;
+
+		if (i40e_reset_vf(vf, false))
+			return;
+		usleep_range(10000, 20000);
+	}
+
+	if (notify_vf)
+		dev_warn(&vf->pf->pdev->dev,
+			 "Failed to initiate reset for VF %d after 200 milliseconds\n",
+			 vf->vf_id);
+	else
+		dev_dbg(&vf->pf->pdev->dev,
+			"Failed to initiate reset for VF %d after 200 milliseconds\n",
+			vf->vf_id);
 }
 
 /**
@@ -189,7 +274,7 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
  * check for the valid queue id
  **/
 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
-					    u8 qid)
+					    u16 qid)
 {
 	struct i40e_pf *pf = vf->pf;
 	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
@@ -204,7 +289,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
  *
  * check for the valid vector id
  **/
-static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
+static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
 {
 	struct i40e_pf *pf = vf->pf;
 
@@ -242,6 +327,38 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
 	return pf_queue_id;
 }
 
+/**
+ * i40e_get_real_pf_qid
+ * @vf: pointer to the VF info
+ * @vsi_id: vsi id
+ * @queue_id: queue number
+ *
+ * wrapper function to get pf_queue_id handling ADq code as well
+ **/
+static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
+{
+	int i;
+
+	if (vf->adq_enabled) {
+		/* Although VF considers all the queues(can be 1 to 16) as its
+		 * own but they may actually belong to different VSIs(up to 4).
+		 * We need to find which queues belongs to which VSI.
+		 */
+		for (i = 0; i < vf->num_tc; i++) {
+			if (queue_id < vf->ch[i].num_qps) {
+				vsi_id = vf->ch[i].vsi_id;
+				break;
+			}
+			/* find right queue id which is relative to a
+			 * given VSI.
+			 */
+			queue_id -= vf->ch[i].num_qps;
+		}
+	}
+
+	return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
+}
+
 /**
  * i40e_config_irq_link_list
  * @vf: pointer to the VF info
@@ -258,7 +375,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
 	struct i40e_hw *hw = &pf->hw;
 	u16 vsi_queue_id, pf_queue_id;
 	enum i40e_queue_type qtype;
-	u16 next_q, vector_id;
+	u16 next_q, vector_id, size;
 	u32 reg, reg_idx;
 	u16 itr_idx = 0;
 
@@ -288,17 +405,19 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
 				     vsi_queue_id + 1));
 	}
 
-	next_q = find_first_bit(&linklistmap,
-				(I40E_MAX_VSI_QP *
-				 I40E_VIRTCHNL_SUPPORTED_QTYPES));
+	size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
+	next_q = find_first_bit(&linklistmap, size);
+	if (unlikely(next_q == size))
+		goto irq_list_done;
+
 	vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 	qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
-	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
+	pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
 	reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
 
 	wr32(hw, reg_idx, reg);
 
-	while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+	while (next_q < size) {
 		switch (qtype) {
 		case I40E_QUEUE_TYPE_RX:
 			reg_idx = I40E_QINT_RQCTL(pf_queue_id);
@@ -312,16 +431,13 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
 			break;
 		}
 
-		next_q = find_next_bit(&linklistmap,
-				       (I40E_MAX_VSI_QP *
-					I40E_VIRTCHNL_SUPPORTED_QTYPES),
-				       next_q + 1);
-		if (next_q <
-		    (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+		next_q = find_next_bit(&linklistmap, size, next_q + 1);
+		if (next_q < size) {
 			vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 			qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
-			pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
-							      vsi_queue_id);
+			pf_queue_id = i40e_get_real_pf_qid(vf,
+							   vsi_id,
+							   vsi_queue_id);
 		} else {
 			pf_queue_id = I40E_QUEUE_END_OF_LIST;
 			qtype = 0;
@@ -352,136 +468,6 @@ irq_list_done:
 	i40e_flush(hw);
 }
 
-/**
- * i40e_release_iwarp_qvlist
- * @vf: pointer to the VF.
- *
- **/
-static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
-{
-	struct i40e_pf *pf = vf->pf;
-	struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
-	u32 msix_vf;
-	u32 i;
-
-	if (!vf->qvlist_info)
-		return;
-
-	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
-	for (i = 0; i < qvlist_info->num_vectors; i++) {
-		struct virtchnl_iwarp_qv_info *qv_info;
-		u32 next_q_index, next_q_type;
-		struct i40e_hw *hw = &pf->hw;
-		u32 v_idx, reg_idx, reg;
-
-		qv_info = &qvlist_info->qv_info[i];
-		if (!qv_info)
-			continue;
-		v_idx = qv_info->v_idx;
-		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
-			/* Figure out the queue after CEQ and make that the
-			 * first queue.
-			 */
-			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
-			reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
-			next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
-					>> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
-			next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
-					>> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
-
-			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
-			reg = (next_q_index &
-			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
-			       (next_q_type <<
-			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
-
-			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
-		}
-	}
-	kfree(vf->qvlist_info);
-	vf->qvlist_info = NULL;
-}
-
-/**
- * i40e_config_iwarp_qvlist
- * @vf: pointer to the VF info
- * @qvlist_info: queue and vector list
- *
- * Return 0 on success or < 0 on error
- **/
-static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
-				    struct virtchnl_iwarp_qvlist_info *qvlist_info)
-{
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_hw *hw = &pf->hw;
-	struct virtchnl_iwarp_qv_info *qv_info;
-	u32 v_idx, i, reg_idx, reg;
-	u32 next_q_idx, next_q_type;
-	u32 msix_vf, size;
-
-	size = sizeof(struct virtchnl_iwarp_qvlist_info) +
-	       (sizeof(struct virtchnl_iwarp_qv_info) *
-						(qvlist_info->num_vectors - 1));
-	vf->qvlist_info = kzalloc(size, GFP_KERNEL);
-	vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
-
-	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
-	for (i = 0; i < qvlist_info->num_vectors; i++) {
-		qv_info = &qvlist_info->qv_info[i];
-		if (!qv_info)
-			continue;
-		v_idx = qv_info->v_idx;
-
-		/* Validate vector id belongs to this vf */
-		if (!i40e_vc_isvalid_vector_id(vf, v_idx))
-			goto err;
-
-		vf->qvlist_info->qv_info[i] = *qv_info;
-
-		reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
-		/* We might be sharing the interrupt, so get the first queue
-		 * index and type, push it down the list by adding the new
-		 * queue on top. Also link it with the new queue in CEQCTL.
-		 */
-		reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
-		next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
-				I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
-		next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
-				I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
-
-		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
-			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
-			reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
-			(v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
-			(qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
-			(next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
-			(next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
-			wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
-
-			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
-			reg = (qv_info->ceq_idx &
-			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
-			       (I40E_QUEUE_TYPE_PE_CEQ <<
-			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
-			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
-		}
-
-		if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
-			reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
-			(v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
-			(qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
-
-			wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
-		}
-	}
-
-	return 0;
-err:
-	kfree(vf->qvlist_info);
-	vf->qvlist_info = NULL;
-	return -EINVAL;
-}
-
 /**
  * i40e_config_vsi_tx_queue
  * @vf: pointer to the VF info
@@ -599,7 +585,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
 		}
 		rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
 
-		/* set split mode 10b */
+		/* set splitalways mode 10b */
 		rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
 	}
 
@@ -621,7 +607,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
 	rx_ctx.dsize = 1;
 
 	/* default values */
-	rx_ctx.lrxqthresh = 2;
+	rx_ctx.lrxqthresh = 1;
 	rx_ctx.crcstrip = 1;
 	rx_ctx.prefena = 1;
 	rx_ctx.l2tsel = 1;
@@ -651,2644 +637,6516 @@ error_param:
 }
 
 /**
- * i40e_alloc_vsi_res
- * @vf: pointer to the VF info
- * @type: type of VSI to allocate
+ * i40e_validate_vf
+ * @pf: the physical function
+ * @vf_id: VF identifier
  *
- * alloc VF vsi context & resources
+ * Check that the VF is enabled and the vsi exists.
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
+static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
 {
-	struct i40e_mac_filter *f = NULL;
-	struct i40e_pf *pf = vf->pf;
 	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
 	int ret = 0;
 
-	vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
-
-	if (!vsi) {
+	if (vf_id >= pf->num_alloc_vfs) {
 		dev_err(&pf->pdev->dev,
-			"add vsi failed for VF %d, aq_err %d\n",
-			vf->vf_id, pf->hw.aq.asq_last_status);
-		ret = -ENOENT;
-		goto error_alloc_vsi_res;
+			"Invalid VF Identifier %d\n", vf_id);
+		ret = -EINVAL;
+		goto err_out;
 	}
-	if (type == I40E_VSI_SRIOV) {
-		u64 hena = i40e_pf_get_default_rss_hena(pf);
-		u8 broadcast[ETH_ALEN];
+	vf = &pf->vf[vf_id];
+	vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
+	if (!vsi)
+		ret = -EINVAL;
+err_out:
+	return ret;
+}
 
-		vf->lan_vsi_idx = vsi->idx;
-		vf->lan_vsi_id = vsi->id;
-		/* If the port VLAN has been configured and then the
-		 * VF driver was removed then the VSI port VLAN
-		 * configuration was destroyed.  Check if there is
-		 * a port VLAN and restore the VSI configuration if
-		 * needed.
-		 */
-		if (vf->port_vlan_id)
-			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
 
-		spin_lock_bh(&vsi->mac_filter_hash_lock);
-		if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
-			f = i40e_add_mac_filter(vsi,
-						vf->default_lan_addr.addr);
-			if (!f)
-				dev_info(&pf->pdev->dev,
-					 "Could not add MAC filter %pM for VF %d\n",
-					vf->default_lan_addr.addr, vf->vf_id);
-		}
-		eth_broadcast_addr(broadcast);
-		f = i40e_add_mac_filter(vsi, broadcast);
-		if (!f)
-			dev_info(&pf->pdev->dev,
-				 "Could not allocate VF broadcast filter\n");
-		spin_unlock_bh(&vsi->mac_filter_hash_lock);
-		wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
-		wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
-	}
+/**
+ * i40e_configure_vf_loopback
+ * @vsi: VF VSI to configure
+ * @vf_id: VF identifier
+ * @enable: enable or disable
+ *
+ * This function configures the VF VSI with the loopback settings
+ *
+ * Returns 0 on success, negative on failure
+ *
+ **/
+static int i40e_configure_vf_loopback(struct i40e_vsi *vsi, int vf_id,
+				      bool enable)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_vsi_context ctxt;
+	int ret = 0;
 
-	/* program mac filter */
-	ret = i40e_sync_vsi_filters(vsi);
-	if (ret)
-		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+	vsi->info.valid_sections = CPU_TO_LE16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+	if (enable)
+		vsi->info.switch_id |=
+				CPU_TO_LE16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+	else
+		vsi->info.switch_id &=
+				~CPU_TO_LE16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 
-	/* Set VF bandwidth if specified */
-	if (vf->tx_rate) {
-		ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
-						  vf->tx_rate / 50, 0, NULL);
-		if (ret)
-			dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
-				vf->vf_id, ret);
+	memset(&ctxt, 0, sizeof(ctxt));
+	ctxt.seid = vsi->seid;
+	ctxt.pf_num = vsi->back->hw.pf_id;
+	ctxt.info = vsi->info;
+	ret = i40e_aq_update_vsi_params(&pf->hw, &ctxt, NULL);
+	if (ret) {
+		dev_err(&pf->pdev->dev, "Error %d configuring loopback for VF %d\n",
+			ret, vf_id);
+		ret = -EIO;
 	}
-
-error_alloc_vsi_res:
 	return ret;
 }
 
 /**
- * i40e_enable_vf_mappings
- * @vf: pointer to the VF info
+ * i40e_configure_vf_vlan_stripping
+ * @vsi: VF VSI to configure
+ * @vf_id: VF identifier
+ * @enable: enable or disable
  *
- * enable VF mappings
+ * This function enables or disables vlan stripping on the VF
+ *
+ * Returns 0 on success, negative on failure
  **/
-static void i40e_enable_vf_mappings(struct i40e_vf *vf)
+static int i40e_configure_vf_vlan_stripping(struct i40e_vsi *vsi, int vf_id,
+					    bool enable)
 {
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_hw *hw = &pf->hw;
-	u32 reg, total_queue_pairs = 0;
-	int j;
-
-	/* Tell the hardware we're using noncontiguous mapping. HW requires
-	 * that VF queues be mapped using this method, even when they are
-	 * contiguous in real life
-	 */
-	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
-			  I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_vsi_context ctxt;
+	int ret = 0;
+	u8 flag;
 
-	/* enable VF vplan_qtable mappings */
-	reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
-	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
+	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+	if (enable) {
+		/* Don't enable vlan stripping if port vlan is set */
+		if (vsi->info.pvid) {
+			dev_err(&pf->pdev->dev,
+				"Cannot enable vlan stripping when port VLAN is set\n");
+			ret = -EINVAL;
+			goto err_out;
+		}
+		flag = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+	} else {
+		flag = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+	}
+	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | flag;
+	ctxt.seid = vsi->seid;
+	ctxt.info = vsi->info;
+	ret = i40e_aq_update_vsi_params(&pf->hw, &ctxt, NULL);
+	if (ret) {
+		dev_err(&pf->pdev->dev, "Error %d configuring vlan stripping for VF %d\n",
+			ret, vf_id);
+		ret = -EIO;
+	}
+err_out:
+	return ret;
+}
 
-	/* map PF queues to VF queues */
-	for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
-		u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
+/**
+ * i40e_configure_vf_promisc_mode
+ * @vf: VF
+ * @vsi: VF VSI to configure
+ * @promisc_mode: promisc mode to configure
+ *
+ * This function configures the requested promisc mode for a vf
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_configure_vf_promisc_mode(struct i40e_vf *vf,
+					  struct i40e_vsi *vsi,
+					  u8 promisc_mode)
+{
+	struct i40e_pf *pf = vsi->back;
+	int ret = 0;
 
-		reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
-		wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
-		total_queue_pairs++;
+	if (promisc_mode & VFD_PROMISC_MULTICAST) {
+		ret = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, vsi->seid,
+							    true, NULL);
+		if (ret)
+			goto err;
+		vf->promisc_mode |= VFD_PROMISC_MULTICAST;
+	} else {
+		ret = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, vsi->seid,
+							    false, NULL);
+		if (ret)
+			goto err;
+		vf->promisc_mode &= ~VFD_PROMISC_MULTICAST;
 	}
-
-	/* map PF queues to VSI */
-	for (j = 0; j < 7; j++) {
-		if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) {
-			reg = 0x07FF07FF;	/* unused */
-		} else {
-			u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
-							  j * 2);
-			reg = qid;
-			qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
-						      (j * 2) + 1);
-			reg |= qid << 16;
-		}
-		i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id),
-				  reg);
+	if (promisc_mode & VFD_PROMISC_UNICAST) {
+		ret = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, vsi->seid,
+							  true, NULL, true);
+		if (ret)
+			goto err;
+		vf->promisc_mode |= VFD_PROMISC_UNICAST;
+	} else {
+		ret = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, vsi->seid,
+							  false, NULL, true);
+		if (ret)
+			goto err;
+		vf->promisc_mode &= ~VFD_PROMISC_UNICAST;
 	}
+err:
+	if (ret)
+		dev_err(&pf->pdev->dev, "Error %d configuring promisc mode for VF %d\n",
+			ret, vf->vf_id);
 
-	i40e_flush(hw);
+	return ret;
 }
 
 /**
- * i40e_disable_vf_mappings
- * @vf: pointer to the VF info
+ * i40e_add_ingress_egress_mirror
+ * @src_vsi: VSI to mirror from
+ * @mirror_vsi: VSI to mirror to
+ * @rule_type: rule type to configure
+ * @rule_id: rule id to store
  *
- * disable VF mappings
+ * This function adds the requested ingress/egress mirror for a vsi
+ *
+ * Returns 0 on success, negative on failure
  **/
-static void i40e_disable_vf_mappings(struct i40e_vf *vf)
+static int i40e_add_ingress_egress_mirror(struct i40e_vsi *src_vsi,
+					  struct i40e_vsi *mirror_vsi,
+					  u16 rule_type, u16 *rule_id)
 {
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_hw *hw = &pf->hw;
-	int i;
-
-	/* disable qp mappings */
-	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
-	for (i = 0; i < I40E_MAX_VSI_QP; i++)
-		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
-		     I40E_QUEUE_END_OF_LIST);
-	i40e_flush(hw);
-}
-
-/**
- * i40e_free_vf_res
- * @vf: pointer to the VF info
- *
- * free VF resources
- **/
-static void i40e_free_vf_res(struct i40e_vf *vf)
-{
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_hw *hw = &pf->hw;
-	u32 reg_idx, reg;
-	int i, msix_vf;
-
-	/* Start by disabling VF's configuration API to prevent the OS from
-	 * accessing the VF's VSI after it's freed / invalidated.
-	 */
-	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
-
-	/* free vsi & disconnect it from the parent uplink */
-	if (vf->lan_vsi_idx) {
-		i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
-		vf->lan_vsi_idx = 0;
-		vf->lan_vsi_id = 0;
-		vf->num_mac = 0;
+	u16 dst_seid, rules_used, rules_free, sw_seid;
+	struct i40e_pf *pf = src_vsi->back;
+	int ret, num = 0, cnt = 1;
+	int *vsi_ingress_vlan;
+	int *vsi_egress_vlan;
+	__le16 *mr_list;
+
+	mr_list = kcalloc(cnt, sizeof(__le16), GFP_KERNEL);
+	if (!mr_list) {
+		ret = -ENOMEM;
+		goto err_out;
 	}
-	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 
-	/* disable interrupts so the VF starts in a known state */
-	for (i = 0; i < msix_vf; i++) {
-		/* format is same for both registers */
-		if (0 == i)
-			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
-		else
-			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
-						      (vf->vf_id))
-						     + (i - 1));
-		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
-		i40e_flush(hw);
+	if (src_vsi->type == I40E_VSI_MAIN) {
+		vsi_ingress_vlan = &pf->ingress_vlan;
+		vsi_egress_vlan = &pf->egress_vlan;
+	} else {
+		vsi_ingress_vlan = &pf->vf[src_vsi->vf_id].ingress_vlan;
+		vsi_egress_vlan = &pf->vf[src_vsi->vf_id].egress_vlan;
 	}
 
-	/* clear the irq settings */
-	for (i = 0; i < msix_vf; i++) {
-		/* format is same for both registers */
-		if (0 == i)
-			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+	if (I40E_IS_MIRROR_VLAN_ID_VALID(*vsi_ingress_vlan)) {
+		if (src_vsi->type == I40E_VSI_MAIN)
+			dev_err(&pf->pdev->dev,
+				"PF already has an ingress mirroring configured, only one rule per PF is supported!\n");
 		else
-			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
-						      (vf->vf_id))
-						     + (i - 1));
-		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
-		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
-		wr32(hw, reg_idx, reg);
-		i40e_flush(hw);
+			dev_err(&pf->pdev->dev,
+				"VF=%d already has an ingress mirroring configured, only one rule per VF is supported!\n",
+				src_vsi->vf_id);
+		ret = -EPERM;
+		goto err_out;
+	} else if (I40E_IS_MIRROR_VLAN_ID_VALID(*vsi_egress_vlan)) {
+		if (src_vsi->type == I40E_VSI_MAIN)
+			dev_err(&pf->pdev->dev,
+				"PF already has an egress mirroring configured, only one rule per PF is supported!\n");
+		else
+			dev_err(&pf->pdev->dev,
+				"VF=%d already has an egress mirroring configured, only one rule per VF is supported!\n",
+				src_vsi->vf_id);
+		ret = -EPERM;
+		goto err_out;
 	}
-	/* reset some of the state variables keeping track of the resources */
-	vf->num_queue_pairs = 0;
-	vf->vf_states = 0;
+
+	sw_seid = src_vsi->uplink_seid;
+	dst_seid = mirror_vsi->seid;
+	mr_list[num] = CPU_TO_LE16(src_vsi->seid);
+	ret = i40e_aq_add_mirrorrule(&pf->hw, sw_seid,
+				     rule_type, dst_seid,
+				     cnt, mr_list, NULL,
+				     rule_id, &rules_used,
+				     &rules_free);
+	kfree(mr_list);
+err_out:
+	return ret;
 }
 
 /**
- * i40e_alloc_vf_res
- * @vf: pointer to the VF info
+ * i40e_del_ingress_egress_mirror
+ * @src_vsi: the mirrored VSI
+ * @rule_type: rule type to configure
+ * @rule_id : rule id to delete
  *
- * allocate VF resources
+ * This function deletes the ingress/egress mirror on a VSI
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_alloc_vf_res(struct i40e_vf *vf)
+static int i40e_del_ingress_egress_mirror(struct i40e_vsi *src_vsi,
+					  u16 rule_type, u16 rule_id)
 {
-	struct i40e_pf *pf = vf->pf;
-	int total_queue_pairs = 0;
+	u16 rules_used, rules_free, sw_seid;
+	struct i40e_pf *pf = src_vsi->back;
 	int ret;
 
-	/* allocate hw vsi context & associated resources */
-	ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
-	if (ret)
-		goto error_alloc;
-	total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
-
-	if (vf->trusted)
-		set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-	else
-		clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-
-	/* store the total qps number for the runtime
-	 * VF req validation
-	 */
-	vf->num_queue_pairs = total_queue_pairs;
-
-	/* VF is now completely initialized */
-	set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
-
-error_alloc:
-	if (ret)
-		i40e_free_vf_res(vf);
-
+	sw_seid = src_vsi->uplink_seid;
+	ret = i40e_aq_delete_mirrorrule(&pf->hw, sw_seid, rule_type,
+					rule_id, 0, NULL, NULL,
+					&rules_used, &rules_free);
 	return ret;
 }
 
-#define VF_DEVICE_STATUS 0xAA
-#define VF_TRANS_PENDING_MASK 0x20
 /**
- * i40e_quiesce_vf_pci
- * @vf: pointer to the VF structure
+ * i40e_restore_ingress_egress_mirror
+ * @src_vsi: the mirrored VSI
+ * @mirror: VSI to mirror to
+ * @rule_type: rule type to configure
+ * @rule_id : rule id to delete
  *
- * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
- * if the transactions never clear.
+ * This function restores the configured ingress/egress mirrors
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
+int i40e_restore_ingress_egress_mirror(struct i40e_vsi *src_vsi,
+				       int mirror, u16 rule_type, u16 *rule_id)
 {
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_hw *hw = &pf->hw;
-	int vf_abs_id, i;
-	u32 reg;
+	struct i40e_vsi *mirror_vsi;
+	struct i40e_vf *mirror_vf;
+	struct i40e_pf *pf;
+	int ret = 0;
 
-	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+	pf = src_vsi->back;
 
-	wr32(hw, I40E_PF_PCI_CIAA,
-	     VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
-	for (i = 0; i < 100; i++) {
-		reg = rd32(hw, I40E_PF_PCI_CIAD);
-		if ((reg & VF_TRANS_PENDING_MASK) == 0)
-			return 0;
-		udelay(1);
-	}
-	return -EIO;
+	/* validate the mirror */
+	ret = i40e_validate_vf(pf, mirror);
+	if (ret)
+		goto err_out;
+	mirror_vf = &pf->vf[mirror];
+	mirror_vsi = pf->vsi[mirror_vf->lan_vsi_idx];
+	ret = i40e_add_ingress_egress_mirror(src_vsi, mirror_vsi, rule_type,
+					     rule_id);
+
+err_out:
+	return ret;
 }
 
 /**
- * i40e_trigger_vf_reset
- * @vf: pointer to the VF structure
- * @flr: VFLR was issued or not
+ * i40e_configure_vf_link
+ * @vf: VF
+ * @link: link state to configure
  *
- * Trigger hardware to start a reset for a particular VF. Expects the caller
- * to wait the proper amount of time to allow hardware to reset the VF before
- * it cleans up and restores VF functionality.
+ * This function configures the requested link state for a VF
+ *
+ * Returns 0 on success, negative on failure
  **/
-static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
+static int i40e_configure_vf_link(struct i40e_vf *vf, u8 link)
 {
+	struct virtchnl_pf_event pfe;
+	struct i40e_link_status *ls;
 	struct i40e_pf *pf = vf->pf;
-	struct i40e_hw *hw = &pf->hw;
-	u32 reg, reg_idx, bit_idx;
-
-	/* warn the VF */
-	clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
-
-	/* Disable VF's configuration API during reset. The flag is re-enabled
-	 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
-	 * It's normally disabled in i40e_free_vf_res(), but it's safer
-	 * to do it earlier to give some time to finish to any VF config
-	 * functions that may still be running at this point.
-	 */
-	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
+	struct i40e_hw *hw;
+	int abs_vf_id;
+	int ret = 0;
 
-	/* In the case of a VFLR, the HW has already reset the VF and we
-	 * just need to clean up, so don't hit the VFRTRIG register.
+	hw = &pf->hw;
+	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+	pfe.severity = PF_EVENT_SEVERITY_INFO;
+	ls = &pf->hw.phy.link_info;
+	switch (link) {
+	case VFD_LINKSTATE_AUTO:
+		vf->link_forced = false;
+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
+		pfe.event_data.link_event_adv.link_status =
+			ls->link_info & I40E_AQ_LINK_UP;
+		pfe.event_data.link_event_adv.link_speed =
+			i40e_vc_link_speed2mbps(ls->link_speed);
+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+		pfe.event_data.link_event.link_status =
+			ls->link_info & I40E_AQ_LINK_UP;
+		pfe.event_data.link_event.link_speed =
+			i40e_virtchnl_link_speed(ls->link_speed);
+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+		break;
+	case VFD_LINKSTATE_ON:
+		vf->link_forced = true;
+		vf->link_up = true;
+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
+		pfe.event_data.link_event_adv.link_status = true;
+		pfe.event_data.link_event_adv.link_speed =
+			i40e_vc_link_speed2mbps(ls->link_speed);
+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+		pfe.event_data.link_event.link_status = true;
+		pfe.event_data.link_event.link_speed =
+			i40e_virtchnl_link_speed(ls->link_speed);
+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+		break;
+	case VFD_LINKSTATE_OFF:
+		vf->link_forced = true;
+		vf->link_up = false;
+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
+		pfe.event_data.link_event_adv.link_status = false;
+		pfe.event_data.link_event_adv.link_speed = 0;
+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+		pfe.event_data.link_event.link_status = false;
+		pfe.event_data.link_event.link_speed = 0;
+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+		break;
+	default:
+		ret = -EINVAL;
+		goto error_out;
+	}
+	/* Do not allow change link state when VF is disabled
+	 * Check if requested link state is not VFD_LINKSTATE_OFF, to prevent
+	 * false positive warning in case of reloading the driver
 	 */
-	if (!flr) {
-		/* reset VF using VPGEN_VFRTRIG reg */
-		reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
-		reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
-		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
-		i40e_flush(hw);
+	if (vf->pf_ctrl_disable && link != VFD_LINKSTATE_OFF) {
+		vf->link_up = false;
+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
+		pfe.event_data.link_event_adv.link_status = false;
+		pfe.event_data.link_event_adv.link_speed = 0;
+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+		pfe.event_data.link_event.link_status = false;
+		pfe.event_data.link_event.link_speed = 0;
+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+		dev_warn(&pf->pdev->dev,
+			 "Not possible to change VF link state, please enable it first\n");
 	}
-	/* clear the VFLR bit in GLGEN_VFLRSTAT */
-	reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
-	bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
-	wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
-	i40e_flush(hw);
 
-	if (i40e_quiesce_vf_pci(vf))
-		dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
-			vf->vf_id);
+	/* Notify the VF of its new link state */
+	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
+			       I40E_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL);
+error_out:
+	return ret;
 }
 
 /**
- * i40e_cleanup_reset_vf
+ * i40e_vf_del_vlan_mirror
  * @vf: pointer to the VF structure
+ * @vsi: pointer to the VSI structure
+ *
+ * Delete configured mirror vlans
+ *
+ * Returns 0 on success, negative on failure
  *
- * Cleanup a VF after the hardware reset is finished. Expects the caller to
- * have verified whether the reset is finished properly, and ensure the
- * minimum amount of wait time has passed.
  **/
-static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
+static int i40e_vf_del_vlan_mirror(struct i40e_vf *vf, struct i40e_vsi *vsi)
 {
+	u16 rules_used, rules_free, vid;
 	struct i40e_pf *pf = vf->pf;
-	struct i40e_hw *hw = &pf->hw;
-	u32 reg;
-
-	/* free VF resources to begin resetting the VSI state */
-	i40e_free_vf_res(vf);
+	int ret = 0, num = 0, cnt;
+	__le16 *mr_list;
+
+	cnt = bitmap_weight(vf->mirror_vlans, VLAN_N_VID);
+	if (cnt) {
+		mr_list = kcalloc(cnt, sizeof(__le16), GFP_KERNEL);
+		if (!mr_list)
+			return -ENOMEM;
+
+		for_each_set_bit(vid, vf->mirror_vlans, VLAN_N_VID) {
+			mr_list[num] = CPU_TO_LE16(vid);
+			num++;
+		}
 
-	/* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
-	 * By doing this we allow HW to access VF memory at any point. If we
-	 * did it any sooner, HW could access memory while it was being freed
-	 * in i40e_free_vf_res(), causing an IOMMU fault.
-	 *
-	 * On the other hand, this needs to be done ASAP, because the VF driver
-	 * is waiting for this to happen and may report a timeout. It's
-	 * harmless, but it gets logged into Guest OS kernel log, so best avoid
-	 * it.
-	 */
-	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
-	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
-	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+		ret = i40e_aq_delete_mirrorrule(&pf->hw, vsi->uplink_seid,
+						I40E_AQC_MIRROR_RULE_TYPE_VLAN,
+						vf->vlan_rule_id, cnt, mr_list,
+						NULL, &rules_used,
+						&rules_free);
 
-	/* reallocate VF resources to finish resetting the VSI state */
-	if (!i40e_alloc_vf_res(vf)) {
-		int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
-		i40e_enable_vf_mappings(vf);
-		set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
-		clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
-		/* Do not notify the client during VF init */
-		if (test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
-				       &vf->vf_states))
-			i40e_notify_client_of_vf_reset(pf, abs_vf_id);
-		vf->num_vlan = 0;
+		vf->vlan_rule_id = 0;
+		kfree(mr_list);
 	}
 
-	/* Tell the VF driver the reset is done. This needs to be done only
-	 * after VF has been fully initialized, because the VF driver may
-	 * request resources immediately after setting this flag.
-	 */
-	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+	return ret;
 }
 
 /**
- * i40e_reset_vf
+ * i40e_restore_vfd_config
  * @vf: pointer to the VF structure
- * @flr: VFLR was issued or not
+ * @vsi: VF VSI to be configured
+ *
+ * Restore the VF-d config as per the stored configuration
+ *
+ * Returns 0 on success, negative on failure
  *
- * reset the VF
  **/
-void i40e_reset_vf(struct i40e_vf *vf, bool flr)
+static int i40e_restore_vfd_config(struct i40e_vf *vf, struct i40e_vsi *vsi)
 {
 	struct i40e_pf *pf = vf->pf;
-	struct i40e_hw *hw = &pf->hw;
-	bool rsd = false;
-	u32 reg;
-	int i;
+	int ret = 0, cnt = 0;
+	u16 vid;
 
-	/* If VFs have been disabled, there is no need to reset */
-	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
-		return;
+	/* Restore all VF-d configuration on reset */
+	for_each_set_bit(vid, vf->trunk_vlans, VLAN_N_VID) {
+		ret = i40e_vsi_add_vlan(vsi, vid);
+		if (ret)
+			goto err_out;
+	}
+	if (!vf->allow_untagged) {
+		spin_lock_bh(&vsi->mac_filter_hash_lock);
+		i40e_rm_vlan_all_mac(vsi, 0);
+		spin_unlock_bh(&vsi->mac_filter_hash_lock);
+		i40e_service_event_schedule(vsi->back);
+	}
+
+	cnt = bitmap_weight(vf->mirror_vlans, VLAN_N_VID);
+	if (cnt) {
+		u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
+		u16 rule_id, rules_used, rules_free;
+		u16 sw_seid = vsi->uplink_seid;
+		u16 dst_seid = vsi->seid;
+		__le16 *mr_list;
+		int num = 0;
+
+		mr_list = kcalloc(cnt, sizeof(__le16), GFP_KERNEL);
+		if (!mr_list)
+			return -ENOMEM;
+		for_each_set_bit(vid, vf->mirror_vlans, VLAN_N_VID) {
+			mr_list[num] = CPU_TO_LE16(vid);
+			num++;
+		}
+		ret = i40e_aq_add_mirrorrule(&pf->hw, sw_seid, rule_type,
+					     dst_seid, cnt, mr_list, NULL,
+					     &rule_id, &rules_used,
+					     &rules_free);
+		if (!ret)
+			vf->vlan_rule_id = rule_id;
+		kfree(mr_list);
+	}
 
-	i40e_trigger_vf_reset(vf, flr);
+	ret = i40e_configure_vf_loopback(vsi, vf->vf_id, vf->loopback);
+	if (ret) {
+		vf->loopback = false;
+		goto err_out;
+	}
 
-	/* poll VPGEN_VFRSTAT reg to make sure
-	 * that reset is complete
-	 */
-	for (i = 0; i < 10; i++) {
-		/* VF reset requires driver to first reset the VF and then
-		 * poll the status register to make sure that the reset
-		 * completed successfully. Due to internal HW FIFO flushes,
-		 * we must wait 10ms before the register will be valid.
-		 */
-		usleep_range(10000, 20000);
-		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
-		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
-			rsd = true;
-			break;
+	if (vf->vlan_stripping) {
+		ret = i40e_configure_vf_vlan_stripping(vsi, vf->vf_id, true);
+		if (ret) {
+			vf->vlan_stripping = false;
+			goto err_out;
 		}
 	}
 
-	if (flr)
-		usleep_range(10000, 20000);
+	if (vf->promisc_mode) {
+		ret = i40e_configure_vf_promisc_mode(vf, vsi, vf->promisc_mode);
+		if (ret) {
+			vf->promisc_mode = VFD_PROMISC_OFF;
+			goto err_out;
+		}
+	}
 
-	if (!rsd)
-		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
-			vf->vf_id);
-	usleep_range(10000, 20000);
+	if (vf->link_forced) {
+		u8 link;
 
-	/* On initial reset, we don't have any queues to disable */
-	if (vf->lan_vsi_idx != 0)
-		i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
+		link = (vf->link_up ? VFD_LINKSTATE_ON : VFD_LINKSTATE_OFF);
+		ret = i40e_configure_vf_link(vf, link);
+		if (ret) {
+			vf->link_forced = false;
+			goto err_out;
+		}
+	}
 
-	i40e_cleanup_reset_vf(vf);
+	if (vf->bw_share_applied && vf->bw_share) {
+		struct i40e_aqc_configure_vsi_tc_bw_data bw_data = {0};
+		int i;
 
-	i40e_flush(hw);
-	clear_bit(__I40E_VF_DISABLE, pf->state);
-}
+		bw_data.tc_valid_bits = 1;
+		bw_data.tc_bw_credits[0] = vf->bw_share;
+
+		ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "AQ command Config VSI BW allocation per TC failed = %d\n",
+				 pf->hw.aq.asq_last_status);
+			vf->bw_share_applied = false;
+			goto err_out;
+		}
+
+		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+			vsi->info.qs_handle[i] = bw_data.qs_handles[i];
+	}
 
+err_out:
+	return ret;
+}
+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
 /**
- * i40e_reset_all_vfs
- * @pf: pointer to the PF structure
- * @flr: VFLR was issued or not
+ * i40e_alloc_vsi_res
+ * @vf: pointer to the VF info
+ * @idx: VSI index, applies only for ADq mode, zero otherwise
  *
- * Reset all allocated VFs in one go. First, tell the hardware to reset each
- * VF, then do all the waiting in one chunk, and finally finish restoring each
- * VF after the wait. This is useful during PF routines which need to reset
- * all VFs, as otherwise it must perform these resets in a serialized fashion.
+ * alloc VF vsi context & resources
  **/
-void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
 {
-	struct i40e_hw *hw = &pf->hw;
-	struct i40e_vf *vf;
-	int i, v;
-	u32 reg;
-
-	/* If we don't have any VFs, then there is nothing to reset */
-	if (!pf->num_alloc_vfs)
-		return;
+	struct i40e_mac_filter *f = NULL;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi;
+	u64 max_tx_rate = 0;
+	int ret = 0;
 
-	/* If VFs have been disabled, there is no need to reset */
-	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
-		return;
+	vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
+			     vf->vf_id);
 
-	/* Begin reset on all VFs at once */
-	for (v = 0; v < pf->num_alloc_vfs; v++)
-		i40e_trigger_vf_reset(&pf->vf[v], flr);
+	if (!vsi) {
+		dev_err(&pf->pdev->dev,
+			"add vsi failed for VF %d, aq_err %d\n",
+			vf->vf_id, pf->hw.aq.asq_last_status);
+		ret = -ENOENT;
+		goto error_alloc_vsi_res;
+	}
 
-	/* HW requires some time to make sure it can flush the FIFO for a VF
-	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
-	 * sequence to make sure that it has completed. We'll keep track of
-	 * the VFs using a simple iterator that increments once that VF has
-	 * finished resetting.
-	 */
-	for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
-		usleep_range(10000, 20000);
+	if (!idx) {
+		u64 hena = i40e_pf_get_default_rss_hena(pf);
+		bool trunk_conf = false;
+		u8 broadcast[ETH_ALEN];
+		u16 vid;
 
-		/* Check each VF in sequence, beginning with the VF to fail
-		 * the previous check.
+		vf->lan_vsi_idx = vsi->idx;
+		vf->lan_vsi_id = vsi->id;
+		/* If the port VLAN has been configured and then the
+		 * VF driver was removed then the VSI port VLAN
+		 * configuration was destroyed.  Check if there is
+		 * a port VLAN and restore the VSI configuration if
+		 * needed.
 		 */
-		while (v < pf->num_alloc_vfs) {
-			vf = &pf->vf[v];
-			reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
-			if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
-				break;
+		for_each_set_bit(vid, vf->trunk_vlans, VLAN_N_VID) {
+			if (vid != vf->port_vlan_id)
+				trunk_conf = true;
+		}
+		if (vf->port_vlan_id && !trunk_conf)
+			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 
-			/* If the current VF has finished resetting, move on
-			 * to the next VF in sequence.
-			 */
-			v++;
+		spin_lock_bh(&vsi->mac_filter_hash_lock);
+		if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
+			f = i40e_add_mac_filter(vsi,
+						vf->default_lan_addr.addr);
+			if (!f)
+				dev_info(&pf->pdev->dev,
+					"Could not add MAC filter %pM for VF %d\n",
+					vf->default_lan_addr.addr, vf->vf_id);
 		}
+		eth_broadcast_addr(broadcast);
+		f = i40e_add_mac_filter(vsi, broadcast);
+		if (!f)
+			dev_info(&pf->pdev->dev,
+				 "Could not allocate VF broadcast filter\n");
+		spin_unlock_bh(&vsi->mac_filter_hash_lock);
+		wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
+		wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
+		/* program mac filter only for VF VSI */
+		ret = i40e_sync_vsi_filters(vsi);
+		if (ret)
+			dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 	}
 
-	if (flr)
-		usleep_range(10000, 20000);
-
-	/* Display a warning if at least one VF didn't manage to reset in
-	 * time, but continue on with the operation.
-	 */
-	if (v < pf->num_alloc_vfs)
-		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
-			pf->vf[v].vf_id);
-	usleep_range(10000, 20000);
-
-	/* Begin disabling all the rings associated with VFs, but do not wait
-	 * between each VF.
-	 */
-	for (v = 0; v < pf->num_alloc_vfs; v++) {
-		/* On initial reset, we don't have any queues to disable */
-		if (pf->vf[v].lan_vsi_idx == 0)
-			continue;
-
-		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
+	/* storing VSI index and id for ADq and don't apply the mac filter */
+	if (vf->adq_enabled) {
+		vf->ch[idx].vsi_idx = vsi->idx;
+		vf->ch[idx].vsi_id = vsi->id;
 	}
 
-	/* Now that we've notified HW to disable all of the VF rings, wait
-	 * until they finish.
-	 */
-	for (v = 0; v < pf->num_alloc_vfs; v++) {
-		/* On initial reset, we don't have any queues to disable */
-		if (pf->vf[v].lan_vsi_idx == 0)
-			continue;
-
-		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
+	/* Set VF bandwidth if specified */
+	if (vf->tx_rate) {
+		max_tx_rate = vf->tx_rate;
+	} else if (vf->ch[idx].max_tx_rate) {
+		max_tx_rate = vf->ch[idx].max_tx_rate;
 	}
 
-	/* Hw may need up to 50ms to finish disabling the RX queues. We
-	 * minimize the wait by delaying only once for all VFs.
-	 */
-	mdelay(50);
-
-	/* Finish the reset on each VF */
-	for (v = 0; v < pf->num_alloc_vfs; v++)
-		i40e_cleanup_reset_vf(&pf->vf[v]);
+	if (max_tx_rate) {
+		max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
+		ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
+						  max_tx_rate, 0, NULL);
+		if (ret)
+			dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
+				vf->vf_id, ret);
+	}
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+	ret = i40e_restore_vfd_config(vf, vsi);
+	if (ret)
+		dev_err(&pf->pdev->dev,
+			"Failed to restore VF-d config error %d\n", ret);
+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
 
-	i40e_flush(hw);
-	clear_bit(__I40E_VF_DISABLE, pf->state);
+error_alloc_vsi_res:
+	return ret;
 }
 
 /**
- * i40e_free_vfs
- * @pf: pointer to the PF structure
+ * i40e_map_pf_queues_to_vsi
+ * @vf: pointer to the VF info
  *
- * free VF resources
+ * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
+ * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
  **/
-void i40e_free_vfs(struct i40e_pf *pf)
+static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
 {
+	struct i40e_pf *pf = vf->pf;
 	struct i40e_hw *hw = &pf->hw;
-	u32 reg_idx, bit_idx;
-	int i, tmp, vf_id;
-
-	if (!pf->vf)
-		return;
-	while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
-		usleep_range(1000, 2000);
+	u32 reg, num_tc = 1; /* VF has at least one traffic class */
+	u16 vsi_id, qps;
+	int i, j;
 
-	i40e_notify_client_of_vf_enable(pf, 0);
+	if (vf->adq_enabled)
+		num_tc = vf->num_tc;
 
-	/* Amortize wait time by stopping all VFs at the same time */
-	for (i = 0; i < pf->num_alloc_vfs; i++) {
-		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
-			continue;
+	for (i = 0; i < num_tc; i++) {
+		if (vf->adq_enabled) {
+			qps = vf->ch[i].num_qps;
+			vsi_id =  vf->ch[i].vsi_id;
+		} else {
+			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
+			vsi_id = vf->lan_vsi_id;
+		}
 
-		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
+		for (j = 0; j < 7; j++) {
+			if (j * 2 >= qps) {
+				/* end of list */
+				reg = 0x07FF07FF;
+			} else {
+				u16 qid = i40e_vc_get_pf_queue_id(vf,
+								  vsi_id,
+								  j * 2);
+				reg = qid;
+				qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
+							      (j * 2) + 1);
+				reg |= qid << 16;
+			}
+			i40e_write_rx_ctl(hw,
+					  I40E_VSILAN_QTABLE(j, vsi_id),
+					  reg);
+		}
 	}
+}
 
-	for (i = 0; i < pf->num_alloc_vfs; i++) {
-		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
-			continue;
+/**
+ * i40e_map_pf_to_vf_queues
+ * @vf: pointer to the VF info
+ *
+ * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
+ * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
+ **/
+static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	u32 reg, total_qps = 0;
+	u32 qps, num_tc = 1; /* VF has at least one traffic class */
+	u16 vsi_id, qid;
+	int i, j;
+
+	if (vf->adq_enabled)
+		num_tc = vf->num_tc;
+
+	for (i = 0; i < num_tc; i++) {
+		if (vf->adq_enabled) {
+			qps = vf->ch[i].num_qps;
+			vsi_id =  vf->ch[i].vsi_id;
+		} else {
+			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
+			vsi_id = vf->lan_vsi_id;
+		}
 
-		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
+		for (j = 0; j < qps; j++) {
+			qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
+
+			reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
+			wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
+			     reg);
+			total_qps++;
+		}
 	}
+}
 
-	/* Disable IOV before freeing resources. This lets any VF drivers
-	 * running in the host get themselves cleaned up before we yank
-	 * the carpet out from underneath their feet.
+/**
+ * i40e_enable_vf_mappings
+ * @vf: pointer to the VF info
+ *
+ * enable VF mappings
+ **/
+static void i40e_enable_vf_mappings(struct i40e_vf *vf)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	u32 reg;
+
+	/* Tell the hardware we're using noncontiguous mapping. HW requires
+	 * that VF queues be mapped using this method, even when they are
+	 * contiguous in real life
 	 */
-	if (!pci_vfs_assigned(pf->pdev))
-		pci_disable_sriov(pf->pdev);
-	else
-		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
+			  I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
 
-	/* free up VF resources */
-	tmp = pf->num_alloc_vfs;
-	pf->num_alloc_vfs = 0;
-	for (i = 0; i < tmp; i++) {
-		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
-			i40e_free_vf_res(&pf->vf[i]);
-		/* disable qp mappings */
-		i40e_disable_vf_mappings(&pf->vf[i]);
-	}
+	/* enable VF vplan_qtable mappings */
+	reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
+	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
 
-	kfree(pf->vf);
-	pf->vf = NULL;
+	i40e_map_pf_to_vf_queues(vf);
+	i40e_map_pf_queues_to_vsi(vf);
 
-	/* This check is for when the driver is unloaded while VFs are
-	 * assigned. Setting the number of VFs to 0 through sysfs is caught
-	 * before this function ever gets called.
-	 */
-	if (!pci_vfs_assigned(pf->pdev)) {
-		/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
-		 * work correctly when SR-IOV gets re-enabled.
-		 */
-		for (vf_id = 0; vf_id < tmp; vf_id++) {
-			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
-			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
-			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
-		}
-	}
-	clear_bit(__I40E_VF_DISABLE, pf->state);
+	i40e_flush(hw);
 }
 
-#ifdef CONFIG_PCI_IOV
 /**
- * i40e_alloc_vfs
- * @pf: pointer to the PF structure
- * @num_alloc_vfs: number of VFs to allocate
+ * i40e_disable_vf_mappings
+ * @vf: pointer to the VF info
  *
- * allocate VF resources
+ * disable VF mappings
  **/
-int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
+static void i40e_disable_vf_mappings(struct i40e_vf *vf)
 {
-	struct i40e_vf *vfs;
-	int i, ret = 0;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	int i;
 
-	/* Disable interrupt 0 so we don't try to handle the VFLR. */
-	i40e_irq_dynamic_disable_icr0(pf);
+	/* disable qp mappings */
+	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
+	for (i = 0; i < I40E_MAX_VSI_QP; i++)
+		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
+		     I40E_QUEUE_END_OF_LIST);
+	i40e_flush(hw);
+}
 
-	/* Check to see if we're just allocating resources for extant VFs */
-	if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
-		ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
-		if (ret) {
-			pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
-			pf->num_alloc_vfs = 0;
-			goto err_iov;
-		}
-	}
-	/* allocate memory */
-	vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
-	if (!vfs) {
-		ret = -ENOMEM;
-		goto err_alloc;
-	}
-	pf->vf = vfs;
+/**
+ * i40e_free_vf_res
+ * @vf: pointer to the VF info
+ *
+ * free VF resources
+ **/
+static void i40e_free_vf_res(struct i40e_vf *vf)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	u32 reg_idx, reg;
+	int i, j, msix_vf;
 
-	/* apply default profile */
-	for (i = 0; i < num_alloc_vfs; i++) {
-		vfs[i].pf = pf;
-		vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
-		vfs[i].vf_id = i;
+	/* Start by disabling VF's configuration API to prevent the OS from
+	 * accessing the VF's VSI after it's freed / invalidated.
+	 */
+	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
 
-		/* assign default capabilities */
-		set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
-		vfs[i].spoofchk = true;
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+	/* Release vlan mirror */
+	if (vf->lan_vsi_idx)
+		i40e_vf_del_vlan_mirror(vf, pf->vsi[vf->lan_vsi_idx]);
+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
 
-		set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
+	/* It's possible the VF had requeuested more queues than the default so
+	 * do the accounting here when we're about to free them.
+	 */
+	if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
+		pf->queues_left +=
+			vf->num_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
+	}
 
+	/* free vsi & disconnect it from the parent uplink */
+	if (vf->lan_vsi_idx) {
+		i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
+		vf->lan_vsi_idx = 0;
+		vf->lan_vsi_id = 0;
 	}
-	pf->num_alloc_vfs = num_alloc_vfs;
 
-	/* VF resources get allocated during reset */
-	i40e_reset_all_vfs(pf, false);
+	/* do the accounting and remove additional ADq VSI's */
+	if (vf->adq_enabled && vf->ch[0].vsi_idx) {
+		for (j = 0; j < vf->num_tc; j++) {
+			/* At this point VSI0 is already released so don't
+			 * release it again and only clear their values in
+			 * structure variables
+			 */
+			if (j)
+				i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
+			vf->ch[j].vsi_idx = 0;
+			vf->ch[j].vsi_id = 0;
+		}
+	}
+	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 
-	i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
+	/* disable interrupts so the VF starts in a known state */
+	for (i = 0; i < msix_vf; i++) {
+		/* format is same for both registers */
+		if (0 == i)
+			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
+		else
+			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
+						      (vf->vf_id))
+						     + (i - 1));
+		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+		i40e_flush(hw);
+	}
 
-err_alloc:
-	if (ret)
-		i40e_free_vfs(pf);
-err_iov:
-	/* Re-enable interrupt 0. */
-	i40e_irq_dynamic_enable_icr0(pf, false);
-	return ret;
+	/* clear the irq settings */
+	for (i = 0; i < msix_vf; i++) {
+		/* format is same for both registers */
+		if (0 == i)
+			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+		else
+			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
+						      (vf->vf_id))
+						     + (i - 1));
+		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+		wr32(hw, reg_idx, reg);
+		i40e_flush(hw);
+	}
+	/* reset some of the state variables keeping track of the resources */
+	vf->num_queue_pairs = 0;
+	clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
+	clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
 }
 
-#endif
 /**
- * i40e_pci_sriov_enable
- * @pdev: pointer to a pci_dev structure
- * @num_vfs: number of VFs to allocate
+ * i40e_alloc_vf_res
+ * @vf: pointer to the VF info
  *
- * Enable or change the number of VFs
+ * allocate VF resources
  **/
-static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+static int i40e_alloc_vf_res(struct i40e_vf *vf)
 {
-#ifdef CONFIG_PCI_IOV
-	struct i40e_pf *pf = pci_get_drvdata(pdev);
-	int pre_existing_vfs = pci_num_vf(pdev);
-	int err = 0;
+	struct i40e_pf *pf = vf->pf;
+	int total_queue_pairs = 0;
+	int ret, idx;
 
-	if (test_bit(__I40E_TESTING, pf->state)) {
-		dev_warn(&pdev->dev,
-			 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
-		err = -EPERM;
-		goto err_out;
-	}
+	if (vf->num_req_queues &&
+	    vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
+		pf->num_vf_qps = vf->num_req_queues;
+	else
+		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
 
-	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
-		i40e_free_vfs(pf);
-	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
-		goto out;
+	/* allocate hw vsi context & associated resources */
+	ret = i40e_alloc_vsi_res(vf, 0);
+	if (ret)
+		goto error_alloc;
+	total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
 
-	if (num_vfs > pf->num_req_vfs) {
-		dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
-			 num_vfs, pf->num_req_vfs);
-		err = -EPERM;
-		goto err_out;
+	/* allocate additional VSIs based on tc information for ADq */
+	if (vf->adq_enabled) {
+		if (pf->queues_left >=
+		    (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
+			/* TC 0 always belongs to VF VSI */
+			for (idx = 1; idx < vf->num_tc; idx++) {
+				ret = i40e_alloc_vsi_res(vf, idx);
+				if (ret)
+					goto error_alloc;
+			}
+			/* send correct number of queues */
+			total_queue_pairs = I40E_MAX_VF_QUEUES;
+		} else {
+			dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
+				 vf->vf_id);
+			vf->adq_enabled = false;
+		}
 	}
 
-	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
-	err = i40e_alloc_vfs(pf, num_vfs);
-	if (err) {
-		dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
-		goto err_out;
-	}
+	/* We account for each VF to get a default number of queue pairs.  If
+	 * the VF has now requested more, we need to account for that to make
+	 * certain we never request more queues than we actually have left in
+	 * HW.
+	 */
+	if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
+		pf->queues_left -=
+			total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
 
-out:
-	return num_vfs;
+	if (vf->trusted)
+		set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+	else
+		clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 
-err_out:
-	return err;
-#endif
-	return 0;
+	/* store the total qps number for the runtime
+	 * VF req validation
+	 */
+	vf->num_queue_pairs = total_queue_pairs;
+
+	/* VF is now completely initialized */
+	set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
+
+error_alloc:
+	if (ret)
+		i40e_free_vf_res(vf);
+
+	return ret;
 }
 
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_MASK 0x20
 /**
- * i40e_pci_sriov_configure
- * @pdev: pointer to a pci_dev structure
- * @num_vfs: number of VFs to allocate
+ * i40e_quiesce_vf_pci
+ * @vf: pointer to the VF structure
  *
- * Enable or change the number of VFs. Called when the user updates the number
- * of VFs in sysfs.
+ * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
+ * if the transactions never clear.
  **/
-int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
 {
-	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	int vf_abs_id, i;
+	u32 reg;
 
-	if (num_vfs) {
-		if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
-			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
-			i40e_do_reset_safe(pf,
-					   BIT_ULL(__I40E_PF_RESET_REQUESTED));
-		}
-		return i40e_pci_sriov_enable(pdev, num_vfs);
-	}
+	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
 
-	if (!pci_vfs_assigned(pf->pdev)) {
-		i40e_free_vfs(pf);
-		pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
-		i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
-	} else {
-		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
-		return -EINVAL;
+	wr32(hw, I40E_PF_PCI_CIAA,
+	     VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
+	for (i = 0; i < 100; i++) {
+		reg = rd32(hw, I40E_PF_PCI_CIAD);
+		if ((reg & VF_TRANS_PENDING_MASK) == 0)
+			return 0;
+		udelay(1);
 	}
-	return 0;
+	return -EIO;
 }
 
-/***********************virtual channel routines******************/
+static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi);
+static inline void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, int *num_vlans,
+					   s16 **vlan_list);
+static inline i40e_status
+i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+		     bool unicast_enable, s16 *vl, int num_vlans);
 
 /**
- * i40e_vc_send_msg_to_vf
+ * i40e_config_vf_promiscuous_mode
  * @vf: pointer to the VF info
- * @v_opcode: virtual channel opcode
- * @v_retval: virtual channel return value
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * @vsi_id: VSI id
+ * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
+ * @alluni: set MAC L2 layer unicast promiscuous enable/disable
  *
- * send msg to VF
+ * Called from the VF to configure the promiscuous mode of
+ * VF vsis and from the VF reset path to reset promiscuous mode.
  **/
-static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
-				  u32 v_retval, u8 *msg, u16 msglen)
+static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
+						   u16 vsi_id,
+						   bool allmulti,
+						   bool alluni)
 {
-	struct i40e_pf *pf;
-	struct i40e_hw *hw;
-	int abs_vf_id;
-	i40e_status aq_ret;
+	i40e_status aq_ret = I40E_SUCCESS;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi;
+	int num_vlans;
+	s16 *vl;
 
-	/* validate the request */
-	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
-		return -EINVAL;
+	vsi = i40e_find_vsi_from_id(pf, vsi_id);
+	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
+		return I40E_ERR_PARAM;
 
-	pf = vf->pf;
-	hw = &pf->hw;
-	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+	if (vf->port_vlan_id) {
+		aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
+					      alluni, &vf->port_vlan_id, 1);
+		return aq_ret;
+	} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+		i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
 
-	/* single place to detect unsuccessful return values */
-	if (v_retval) {
-		vf->num_invalid_msgs++;
-		dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
-			 vf->vf_id, v_opcode, v_retval);
-		if (vf->num_invalid_msgs >
-		    I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
-			dev_err(&pf->pdev->dev,
-				"Number of invalid messages exceeded for VF %d\n",
-				vf->vf_id);
-			dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
-			set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
-		}
-	} else {
-		vf->num_valid_msgs++;
-		/* reset the invalid counter, if a valid message is received. */
-		vf->num_invalid_msgs = 0;
-	}
+		if (!vl)
+			return I40E_ERR_NO_MEMORY;
 
-	aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,	v_opcode, v_retval,
-					msg, msglen, NULL);
-	if (aq_ret) {
-		dev_info(&pf->pdev->dev,
-			 "Unable to send the message to VF %d aq_err %d\n",
-			 vf->vf_id, pf->hw.aq.asq_last_status);
-		return -EIO;
+		aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
+					      vl, num_vlans);
+		kfree(vl);
+		return aq_ret;
 	}
-
-	return 0;
+	/* no vlans to set on, set on vsi */
+	aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
+				      NULL, 0);
+	return aq_ret;
 }
 
 /**
- * i40e_vc_send_resp_to_vf
- * @vf: pointer to the VF info
- * @opcode: operation code
- * @retval: return value
+ * i40e_trigger_vf_reset
+ * @vf: pointer to the VF structure
+ * @flr: VFLR was issued or not
  *
- * send resp msg to VF
+ * Trigger hardware to start a reset for a particular VF. Expects the caller
+ * to wait the proper amount of time to allow hardware to reset the VF before
+ * it cleans up and restores VF functionality.
  **/
-static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
-				   enum virtchnl_ops opcode,
-				   i40e_status retval)
+static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
 {
-	return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	u32 reg, reg_idx, bit_idx;
+
+	/* warn the VF */
+	clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+
+	/* Disable VF's configuration API during reset. The flag is re-enabled
+	 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
+	 * It's normally disabled in i40e_free_vf_res(), but it's safer
+	 * to do it earlier to give some time to finish to any VF config
+	 * functions that may still be running at this point.
+	 */
+	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
+
+	/* In the case of a VFLR, the HW has already reset the VF and we
+	 * just need to clean up, so don't hit the VFRTRIG register.
+	 */
+	if (!flr) {
+		/* reset VF using VPGEN_VFRTRIG reg */
+		reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+		reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+	}
+	/* clear the VFLR bit in GLGEN_VFLRSTAT */
+	reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+	bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+	wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+	i40e_flush(hw);
+
+	if (i40e_quiesce_vf_pci(vf))
+		dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
+			vf->vf_id);
 }
 
 /**
- * i40e_vc_get_version_msg
- * @vf: pointer to the VF info
+ * i40e_cleanup_reset_vf
+ * @vf: pointer to the VF structure
  *
- * called from the VF to request the API version used by the PF
+ * Cleanup a VF after the hardware reset is finished. Expects the caller to
+ * have verified whether the reset is finished properly, and ensure the
+ * minimum amount of wait time has passed.
  **/
-static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
+static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
 {
-	struct virtchnl_version_info info = {
-		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
-	};
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	u32 reg;
 
-	vf->vf_ver = *(struct virtchnl_version_info *)msg;
-	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
-	if (VF_IS_V10(&vf->vf_ver))
-		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
-	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
-				      I40E_SUCCESS, (u8 *)&info,
-				      sizeof(struct virtchnl_version_info));
+	/* disable promisc modes in case they were enabled */
+	i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
+
+	/* free VF resources to begin resetting the VSI state */
+	i40e_free_vf_res(vf);
+
+	/* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
+	 * By doing this we allow HW to access VF memory at any point. If we
+	 * did it any sooner, HW could access memory while it was being freed
+	 * in i40e_free_vf_res(), causing an IOMMU fault.
+	 *
+	 * On the other hand, this needs to be done ASAP, because the VF driver
+	 * is waiting for this to happen and may report a timeout. It's
+	 * harmless, but it gets logged into Guest OS kernel log, so best avoid
+	 * it.
+	 */
+	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+
+	/* reallocate VF resources to finish resetting the VSI state */
+	if (!i40e_alloc_vf_res(vf)) {
+		int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
+		i40e_enable_vf_mappings(vf);
+		set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+		clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+		/* Do not notify the client during VF init */
+		if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
+					&vf->vf_states))
+			i40e_notify_client_of_vf_reset(pf, abs_vf_id);
+		vf->num_vlan = 0;
+	}
+
+	/* Tell the VF driver the reset is done. This needs to be done only
+	 * after VF has been fully initialized, because the VF driver may
+	 * request resources immediately after setting this flag.
+	 */
+	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
 }
 
 /**
- * i40e_vc_get_vf_resources_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_reset_vf
+ * @vf: pointer to the VF structure
+ * @flr: VFLR was issued or not
  *
- * called from the VF to request its resources
+ * Returns true if the VF is reset, false otherwise.
  **/
-static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
+bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
 {
-	struct virtchnl_vf_resource *vfres = NULL;
 	struct i40e_pf *pf = vf->pf;
-	i40e_status aq_ret = 0;
-	struct i40e_vsi *vsi;
-	int num_vsis = 1;
-	int len = 0;
-	int ret;
-
-	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto err;
-	}
-
-	len = (sizeof(struct virtchnl_vf_resource) +
-	       sizeof(struct virtchnl_vsi_resource) * num_vsis);
+	struct i40e_hw *hw = &pf->hw;
+	bool rsd = false;
+	u32 reg;
+	int i;
 
-	vfres = kzalloc(len, GFP_KERNEL);
-	if (!vfres) {
-		aq_ret = I40E_ERR_NO_MEMORY;
-		len = 0;
-		goto err;
-	}
-	if (VF_IS_V11(&vf->vf_ver))
-		vf->driver_caps = *(u32 *)msg;
-	else
-		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
-				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
-				  VIRTCHNL_VF_OFFLOAD_VLAN;
+	/* If the VFs have been disabled, this means something else is
+	 * resetting the VF, so we shouldn't continue.
+	 */
+	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+		return false;
 
-	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
-	vsi = pf->vsi[vf->lan_vsi_idx];
-	if (!vsi->info.pvid)
-		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
+	i40e_trigger_vf_reset(vf, flr);
 
-	if (i40e_vf_client_capable(pf, vf->vf_id) &&
-	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
-		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
-		set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
+	/* poll VPGEN_VFRSTAT reg to make sure
+	 * that reset is complete
+	 */
+	for (i = 0; i < 10; i++) {
+		/* VF reset requires driver to first reset the VF and then
+		 * poll the status register to make sure that the reset
+		 * completed successfully. Due to internal HW FIFO flushes,
+		 * we must wait 10ms before the register will be valid.
+		 */
+		usleep_range(10000, 20000);
+		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
+			rsd = true;
+			break;
+		}
 	}
 
-	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
-		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
-	} else {
-		if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
-		    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
-			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
-		else
-			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
-	}
+	if (flr)
+		usleep_range(10000, 20000);
 
-	if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
-		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
-			vfres->vf_cap_flags |=
-				VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
-	}
+	if (!rsd)
+		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+			vf->vf_id);
+	usleep_range(10000, 20000);
 
-	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
-		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
+	/* On initial reset, we don't have any queues to disable */
+	if (vf->lan_vsi_idx != 0)
+		i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
 
-	if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
-	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
-		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+	i40e_cleanup_reset_vf(vf);
 
-	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
-		if (pf->flags & I40E_FLAG_MFP_ENABLED) {
-			dev_err(&pf->pdev->dev,
-				"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
-				 vf->vf_id);
-			aq_ret = I40E_ERR_PARAM;
-			goto err;
-		}
-		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
-	}
+	i40e_flush(hw);
+	clear_bit(__I40E_VF_DISABLE, pf->state);
 
-	if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
-		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
-			vfres->vf_cap_flags |=
-					VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
-	}
+	return true;
+}
 
-	vfres->num_vsis = num_vsis;
+/**
+ * i40e_reset_all_vfs
+ * @pf: pointer to the PF structure
+ * @flr: VFLR was issued or not
+ *
+ * Reset all allocated VFs in one go. First, tell the hardware to reset each
+ * VF, then do all the waiting in one chunk, and finally finish restoring each
+ * VF after the wait. This is useful during PF routines which need to reset
+ * all VFs, as otherwise it must perform these resets in a serialized fashion.
+ *
+ * Returns true if any VFs were reset, and false otherwise.
+ **/
+bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+{
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_vf *vf;
+	int i, v;
+	u32 reg;
+
+	/* If we don't have any VFs, then there is nothing to reset */
+	if (!pf->num_alloc_vfs)
+		return false;
+
+	/* If VFs have been disabled, there is no need to reset */
+	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+		return false;
+
+	/* Begin reset on all VFs at once */
+	for (v = 0; v < pf->num_alloc_vfs; v++)
+		i40e_trigger_vf_reset(&pf->vf[v], flr);
+
+	/* HW requires some time to make sure it can flush the FIFO for a VF
+	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
+	 * sequence to make sure that it has completed. We'll keep track of
+	 * the VFs using a simple iterator that increments once that VF has
+	 * finished resetting.
+	 */
+	for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
+		usleep_range(10000, 20000);
+
+		/* Check each VF in sequence, beginning with the VF to fail
+		 * the previous check.
+		 */
+		while (v < pf->num_alloc_vfs) {
+			vf = &pf->vf[v];
+			reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+			if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
+				break;
+
+			/* If the current VF has finished resetting, move on
+			 * to the next VF in sequence.
+			 */
+			v++;
+		}
+	}
+
+	if (flr)
+		usleep_range(10000, 20000);
+
+	/* Display a warning if at least one VF didn't manage to reset in
+	 * time, but continue on with the operation.
+	 */
+	if (v < pf->num_alloc_vfs)
+		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+			pf->vf[v].vf_id);
+	usleep_range(10000, 20000);
+
+	/* Begin disabling all the rings associated with VFs, but do not wait
+	 * between each VF.
+	 */
+	for (v = 0; v < pf->num_alloc_vfs; v++) {
+		/* On initial reset, we don't have any queues to disable */
+		if (pf->vf[v].lan_vsi_idx == 0)
+			continue;
+
+		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
+	}
+
+	/* Now that we've notified HW to disable all of the VF rings, wait
+	 * until they finish.
+	 */
+	for (v = 0; v < pf->num_alloc_vfs; v++) {
+		/* On initial reset, we don't have any queues to disable */
+		if (pf->vf[v].lan_vsi_idx == 0)
+			continue;
+
+		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
+	}
+
+	/* Hw may need up to 50ms to finish disabling the RX queues. We
+	 * minimize the wait by delaying only once for all VFs.
+	 */
+	mdelay(50);
+
+	/* Finish the reset on each VF */
+	for (v = 0; v < pf->num_alloc_vfs; v++)
+		i40e_cleanup_reset_vf(&pf->vf[v]);
+
+	i40e_flush(hw);
+	clear_bit(__I40E_VF_DISABLE, pf->state);
+
+	return true;
+}
+
+/**
+ * i40e_free_vfs
+ * @pf: pointer to the PF structure
+ *
+ * free VF resources
+ **/
+void i40e_free_vfs(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	u32 reg_idx, bit_idx;
+	int i, tmp, vf_id;
+	struct i40e_vsi *src_vsi;
+	u16 rule_type, rule_id;
+	int ret;
+
+	if (!pf->vf)
+		return;
+
+	set_bit(__I40E_VFS_RELEASING, pf->state);
+
+	while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+		usleep_range(1000, 2000);
+
+	i40e_notify_client_of_vf_enable(pf, 0);
+
+	/* At start we need to clear all ingress and egress mirroring setup.
+	 * We can contiune when we remove all mirroring.
+	 */
+	for (i = 0; i < pf->num_alloc_vfs; i++) {
+		src_vsi = pf->vsi[pf->vf[i].lan_vsi_idx];
+		if (I40E_IS_MIRROR_VLAN_ID_VALID(pf->vf[i].ingress_vlan)) {
+			rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS;
+			rule_id = pf->vf[i].ingress_rule_id;
+			ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type, rule_id);
+			if (ret)
+				dev_warn(&pf->pdev->dev,
+					 "Error %s when tried to remove ingress mirror on VF %d",
+					 i40e_aq_str
+					 (hw, hw->aq.asq_last_status),
+					 pf->vf[i].vf_id);
+		}
+		if (I40E_IS_MIRROR_VLAN_ID_VALID(pf->vf[i].egress_vlan)) {
+			rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
+			rule_id = pf->vf[i].egress_rule_id;
+			ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type, rule_id);
+			if (ret)
+				dev_warn(&pf->pdev->dev,
+					 "Error %s when tried to remove egress mirror on VF %d",
+					 i40e_aq_str
+					 (hw, hw->aq.asq_last_status),
+					 pf->vf[i].vf_id);
+		}
+	}
+
+	/* Amortize wait time by stopping all VFs at the same time */
+	for (i = 0; i < pf->num_alloc_vfs; i++) {
+		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
+			continue;
+
+		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
+	}
+
+	for (i = 0; i < pf->num_alloc_vfs; i++) {
+		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
+			continue;
+
+		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
+	}
+
+	/* Disable IOV before freeing resources. This lets any VF drivers
+	 * running in the host get themselves cleaned up before we yank
+	 * the carpet out from underneath their feet.
+	 */
+	if (!pci_vfs_assigned(pf->pdev))
+		pci_disable_sriov(pf->pdev);
+	else
+		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
+	/* free up VF resources */
+	tmp = pf->num_alloc_vfs;
+	pf->num_alloc_vfs = 0;
+	for (i = 0; i < tmp; i++) {
+		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
+			i40e_free_vf_res(&pf->vf[i]);
+		/* disable qp mappings */
+		i40e_disable_vf_mappings(&pf->vf[i]);
+	}
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+	if (pf->vfd_obj) {
+		destroy_vfd_sysfs(pf->pdev, pf->vfd_obj);
+		pf->vfd_obj = NULL;
+	}
+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
+
+	kfree(pf->vf);
+	pf->vf = NULL;
+
+	/* This check is for when the driver is unloaded while VFs are
+	 * assigned. Setting the number of VFs to 0 through sysfs is caught
+	 * before this function ever gets called.
+	 */
+	if (!pci_vfs_assigned(pf->pdev)) {
+		/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
+		 * work correctly when SR-IOV gets re-enabled.
+		 */
+		for (vf_id = 0; vf_id < tmp; vf_id++) {
+			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+		}
+	}
+	clear_bit(__I40E_VF_DISABLE, pf->state);
+	clear_bit(__I40E_VFS_RELEASING, pf->state);
+}
+
+#ifdef CONFIG_PCI_IOV
+/**
+ * i40e_alloc_vfs
+ * @pf: pointer to the PF structure
+ * @num_alloc_vfs: number of VFs to allocate
+ *
+ * allocate VF resources
+ **/
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
+{
+	struct i40e_vf *vfs;
+	int i, ret = 0;
+
+	/* Disable interrupt 0 so we don't try to handle the VFLR. */
+	i40e_irq_dynamic_disable_icr0(pf);
+
+	/* Check to see if we're just allocating resources for extant VFs */
+	if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
+		ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+		if (ret) {
+			pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+			pf->num_alloc_vfs = 0;
+			goto err_iov;
+		}
+	}
+	/* allocate memory */
+	vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
+	if (!vfs) {
+		ret = -ENOMEM;
+		goto err_alloc;
+	}
+	pf->vf = vfs;
+
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+	/* set vfd ops */
+	vfd_ops = &i40e_vfd_ops;
+	/* create the sriov kobjects */
+	pf->vfd_obj = create_vfd_sysfs(pf->pdev, num_alloc_vfs);
+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
+
+	/* apply default profile */
+	for (i = 0; i < num_alloc_vfs; i++) {
+		vfs[i].pf = pf;
+		vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
+		vfs[i].vf_id = i;
+
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+		/* setup default mirror values */
+		vfs[i].ingress_vlan = I40E_NO_VF_MIRROR;
+		vfs[i].egress_vlan = I40E_NO_VF_MIRROR;
+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
+		/* assign default loopback value */
+		vfs[i].loopback = true;
+		/* assign default mac anti spoof value for untrusted VF */
+		vfs[i].mac_anti_spoof = true;
+		/* assign default allow_untagged value */
+		vfs[i].allow_untagged = true;
+		/* assign default capabilities */
+		set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
+		set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
+	}
+	pf->num_alloc_vfs = num_alloc_vfs;
+
+	/* VF resources get allocated during reset */
+	i40e_reset_all_vfs(pf, false);
+
+	i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
+
+err_alloc:
+	if (ret)
+		i40e_free_vfs(pf);
+err_iov:
+	/* Re-enable interrupt 0. */
+	i40e_irq_dynamic_enable_icr0(pf);
+	return ret;
+}
+
+#endif
+#if defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)
+/**
+ * i40e_pci_sriov_enable
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate
+ *
+ * Enable or change the number of VFs
+ **/
+static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	int pre_existing_vfs = pci_num_vf(pdev);
+	int err = 0;
+
+	if (test_bit(__I40E_TESTING, pf->state)) {
+		dev_warn(&pdev->dev,
+			 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
+		err = -EPERM;
+		goto err_out;
+	}
+
+	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+		i40e_free_vfs(pf);
+	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+		goto out;
+
+	if (num_vfs > pf->num_req_vfs) {
+		dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
+			 num_vfs, pf->num_req_vfs);
+		err = -EPERM;
+		goto err_out;
+	}
+
+	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
+	err = i40e_alloc_vfs(pf, num_vfs);
+	if (err) {
+		dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
+		goto err_out;
+	}
+
+out:
+	return num_vfs;
+
+err_out:
+	return err;
+#endif
+	return 0;
+}
+
+/**
+ * i40e_pci_sriov_configure
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of vfs to allocate
+ *
+ * Enable or change the number of VFs. Called when the user updates the number
+ * of VFs in sysfs.
+ **/
+int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	int ret = 0;
+
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
+
+	if (num_vfs) {
+		if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+			i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+		}
+		ret = i40e_pci_sriov_enable(pdev, num_vfs);
+		goto sriov_configure_out;
+	}
+	if (!pci_vfs_assigned(pdev)) {
+		i40e_free_vfs(pf);
+		pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+		i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+	} else {
+		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
+		ret = -EINVAL;
+		goto sriov_configure_out;
+	}
+sriov_configure_out:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+	return ret;
+}
+#endif
+
+/***********************virtual channel routines******************/
+
+/**
+ * i40e_vc_send_msg_to_vf_ex
+ * @vf: pointer to the VF info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @is_quiet: true for not printing unsuccessful return values, false otherwise
+ *
+ * send msg to VF
+ **/
+static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
+				     u32 v_retval, u8 *msg, u16 msglen,
+				     bool is_quiet)
+{
+	struct i40e_pf *pf;
+	struct i40e_hw *hw;
+	int abs_vf_id;
+	i40e_status aq_ret;
+
+	/* validate the request */
+	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+		return -EINVAL;
+
+	pf = vf->pf;
+	hw = &pf->hw;
+	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+	/* single place to detect unsuccessful return values */
+	if (v_retval && !is_quiet) {
+		vf->num_invalid_msgs++;
+		dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
+			 vf->vf_id, v_opcode, v_retval);
+		if (vf->num_invalid_msgs >
+		    I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
+			dev_err(&pf->pdev->dev,
+				"Number of invalid messages exceeded for VF %d\n",
+				vf->vf_id);
+			dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
+			set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+		}
+	} else {
+		vf->num_valid_msgs++;
+		/* reset the invalid counter, if a valid message is received. */
+		vf->num_invalid_msgs = 0;
+	}
+
+	aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
+					msg, msglen, NULL);
+	if (aq_ret) {
+		dev_info(&pf->pdev->dev,
+			 "Unable to send the message to VF %d aq_err %d\n",
+			 vf->vf_id, pf->hw.aq.asq_last_status);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_vc_send_msg_to_vf
+ * @vf: pointer to the VF info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to VF
+ **/
+static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+				  u32 v_retval, u8 *msg, u16 msglen)
+{
+	return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval,
+					 msg, msglen, false);
+}
+
+/**
+ * i40e_vc_send_resp_to_vf
+ * @vf: pointer to the VF info
+ * @opcode: operation code
+ * @retval: return value
+ *
+ * send resp msg to VF
+ **/
+static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
+				   enum virtchnl_ops opcode,
+				   i40e_status retval)
+{
+	return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
+}
+
+/**
+ * i40e_vc_get_version_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request the API version used by the PF
+ **/
+static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_version_info info = {
+		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
+	};
+
+	vf->vf_ver = *(struct virtchnl_version_info *)msg;
+	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+	if (VF_IS_V10(&vf->vf_ver))
+		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
+				      I40E_SUCCESS, (u8 *)&info,
+				      sizeof(struct virtchnl_version_info));
+}
+
+#ifdef __TC_MQPRIO_MODE_MAX
+/**
+ * i40e_del_qch - delete all the additional VSIs created as a part of ADq
+ * @vf: pointer to VF structure
+ **/
+static void i40e_del_qch(struct i40e_vf *vf)
+{
+	struct i40e_pf *pf = vf->pf;
+	int i;
+
+	/* first element in the array belongs to primary VF VSI and we shouldn't
+	 * delete it. We should however delete the rest of the VSIs created
+	 */
+	for (i = 1; i < vf->num_tc; i++) {
+		if (vf->ch[i].vsi_idx) {
+			i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
+			vf->ch[i].vsi_idx = 0;
+			vf->ch[i].vsi_id = 0;
+		}
+	}
+}
+
+#endif /* __TC_MQPRIO_MODE_MAX */
+
+/**
+ * i40e_vc_get_vf_resources_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request its resources
+ **/
+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_vf_resource *vfres = NULL;
+	struct i40e_pf *pf = vf->pf;
+	i40e_status aq_ret = 0;
+	struct i40e_vsi *vsi;
+	int num_vsis = 1;
+	int len = 0;
+	int ret;
+
+	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	len = (sizeof(struct virtchnl_vf_resource) +
+	       sizeof(struct virtchnl_vsi_resource) * num_vsis);
+
+	vfres = kzalloc(len, GFP_KERNEL);
+	if (!vfres) {
+		aq_ret = I40E_ERR_NO_MEMORY;
+		len = 0;
+		goto err;
+	}
+	if (VF_IS_V11(&vf->vf_ver))
+		vf->driver_caps = *(u32 *)msg;
+	else
+		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
+				  VIRTCHNL_VF_OFFLOAD_VLAN;
+
+	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
+	vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	if (!vsi->info.pvid)
+		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
+
+	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
+	} else {
+		if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
+		    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
+			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+		else
+			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
+	}
+	if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+			vfres->vf_cap_flags |=
+				VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+	}
+
+	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
+		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
+
+	if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
+	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
+		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+
+	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
+		if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+			dev_err(&pf->pdev->dev,
+				"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
+				 vf->vf_id);
+			aq_ret = I40E_ERR_PARAM;
+			goto err;
+		}
+		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+	}
+
+	if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
+		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+			vfres->vf_cap_flags |=
+					VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+	}
+
+	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
+		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+
+#ifdef __TC_MQPRIO_MODE_MAX
+	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
+		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
+#endif /* __TC_MQPRIO_MODE_MAX */
+
+	vfres->num_vsis = num_vsis;
 	vfres->num_queue_pairs = vf->num_queue_pairs;
 	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
 	vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
 	vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
 
-	if (vf->lan_vsi_idx) {
-		vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
-		vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
-		vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
-		/* VFs only use TC 0 */
-		vfres->vsi_res[0].qset_handle
-					  = le16_to_cpu(vsi->info.qs_handle[0]);
-		ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
-				vf->default_lan_addr.addr);
+	if (vf->lan_vsi_idx) {
+		vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
+		vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+		vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
+		/* VFs only use TC 0 */
+		vfres->vsi_res[0].qset_handle
+					  = LE16_TO_CPU(vsi->info.qs_handle[0]);
+		ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
+				vf->default_lan_addr.addr);
+	}
+	set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+	set_bit(I40E_VF_STATE_LOADED_VF_DRIVER, &vf->vf_states);
+	/* if vf is in base mode, keep only the base capabilities that are
+	 * negotiated
+	 */
+	if (pf->vf_base_mode_only)
+		vfres->vf_cap_flags &= VF_BASE_MODE_OFFLOADS;
+err:
+	/* send the response back to the VF */
+	ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
+				     aq_ret, (u8 *)vfres, len);
+
+	kfree(vfres);
+	return ret;
+}
+
+/**
+ * i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * called to get the number of VLANs offloaded on this VF
+ **/
+static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+	struct i40e_mac_filter *f;
+	int num_vlans = 0, bkt;
+
+	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+		if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
+			num_vlans++;
+	}
+
+	return num_vlans;
+}
+
+/**
+ * i40e_get_vlan_list_sync
+ * @vsi: pointer to the vsi
+ * @num_vlans: number of vlans present in mac_filter_hash, returned to caller
+ * @vlan_list: list of vlans present in mac_filter_hash, returned to caller.
+ *	       This array is allocated here, but has to be freed in caller.
+ *
+ * Called to get number of vlans and vlan list present in mac_filter_hash.
+ **/
+
+static inline void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, int *num_vlans,
+					   s16 **vlan_list)
+{
+	struct i40e_mac_filter *f;
+	int bkt;
+	int i;
+
+	spin_lock_bh(&vsi->mac_filter_hash_lock);
+	*num_vlans = i40e_getnum_vf_vsi_vlan_filters(vsi);
+	*vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list),
+			     GFP_ATOMIC);
+	if (!(*vlan_list))
+		goto err;
+
+	i = 0;
+	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+		if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
+			continue;
+		(*vlan_list)[i++] = f->vlan;
+	}
+err:
+	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+}
+
+/**
+ * i40e_set_vsi_promisc
+ * @vf: pointer to the vf struct
+ * @seid: vsi number
+ * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
+ *		  for a given VLAN
+ * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
+ *		    for a given VLAN
+ * @vl: List of vlans - apply filter for given vlans
+ * @num_vlans: Number of elements in @vl
+ **/
+static inline i40e_status
+i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+		     bool unicast_enable, s16 *vl, int num_vlans)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	i40e_status aq_ret = 0;
+	int i;
+
+	/* No vlan to set promisc on, set on vsi */
+	if (!num_vlans || !vl) {
+		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
+							       multi_enable,
+							       NULL);
+		if (aq_ret) {
+			int aq_err = pf->hw.aq.asq_last_status;
+
+			dev_err(&pf->pdev->dev,
+				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+				vf->vf_id,
+				i40e_stat_str(&pf->hw, aq_ret),
+				i40e_aq_str(&pf->hw, aq_err));
+
+			return aq_ret;
+		}
+
+		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
+							     unicast_enable,
+							     NULL, true);
+
+		if (aq_ret) {
+			int aq_err = pf->hw.aq.asq_last_status;
+
+			dev_err(&pf->pdev->dev,
+				"VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+				vf->vf_id,
+				i40e_stat_str(&pf->hw, aq_ret),
+				i40e_aq_str(&pf->hw, aq_err));
+		}
+
+		return aq_ret;
+	}
+
+	for (i = 0; i < num_vlans; i++) {
+		aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
+							    multi_enable,
+							    vl[i], NULL);
+		if (aq_ret) {
+			int aq_err = pf->hw.aq.asq_last_status;
+
+			dev_err(&pf->pdev->dev,
+				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+				vf->vf_id,
+				i40e_stat_str(&pf->hw, aq_ret),
+				i40e_aq_str(&pf->hw, aq_err));
+		}
+
+		aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
+							    unicast_enable,
+							    vl[i], NULL);
+		if (aq_ret) {
+			int aq_err = pf->hw.aq.asq_last_status;
+
+			dev_err(&pf->pdev->dev,
+				"VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+				vf->vf_id,
+				i40e_stat_str(&pf->hw, aq_ret),
+				i40e_aq_str(&pf->hw, aq_err));
+		}
+	}
+	return aq_ret;
+}
+
+/**
+ * i40e_vc_config_promiscuous_mode_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the promiscuous mode of
+ * VF vsis
+ **/
+static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_promisc_info *info =
+	    (struct virtchnl_promisc_info *)msg;
+	i40e_status aq_ret = I40E_SUCCESS;
+	struct i40e_pf *pf = vf->pf;
+	bool allmulti = false;
+	bool alluni = false;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err_out;
+	}
+	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+		dev_err(&pf->pdev->dev,
+			"Unprivileged VF %d is attempting to configure promiscuous mode\n",
+			vf->vf_id);
+		if (pf->vf_base_mode_only)
+			dev_err(&pf->pdev->dev, "VF %d is in base mode only, promiscuous mode is not be supported\n",
+				vf->vf_id);
+
+		/* Lie to the VF on purpose, because this is an error we can
+		 * ignore. Unprivileged VF is not a virtual channel error.
+		 */
+		aq_ret = I40E_SUCCESS;
+		goto err_out;
+	}
+
+	if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err_out;
+	}
+
+	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err_out;
+	}
+
+	/* Multicast promiscuous handling*/
+	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
+		allmulti = true;
+
+	if (info->flags & FLAG_VF_UNICAST_PROMISC)
+		alluni = true;
+
+	aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
+						 alluni);
+	if (aq_ret)
+		goto err_out;
+
+	if (allmulti) {
+		if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
+				      &vf->vf_states))
+			dev_info(&pf->pdev->dev,
+				 "VF %d successfully set multicast promiscuous mode\n",
+				 vf->vf_id);
+	} else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
+				      &vf->vf_states))
+		dev_info(&pf->pdev->dev,
+			 "VF %d successfully unset multicast promiscuous mode\n",
+			 vf->vf_id);
+
+	if (alluni) {
+		if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
+				      &vf->vf_states))
+			dev_info(&pf->pdev->dev,
+				 "VF %d successfully set unicast promiscuous mode\n",
+				 vf->vf_id);
+	} else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
+				      &vf->vf_states))
+		dev_info(&pf->pdev->dev,
+			 "VF %d successfully unset unicast promiscuous mode\n",
+			 vf->vf_id);
+
+err_out:
+	/* send the response to the VF */
+	return i40e_vc_send_resp_to_vf(vf,
+				       VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_config_queues_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the rx/tx
+ * queues
+ **/
+static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_vsi_queue_config_info *qci =
+	    (struct virtchnl_vsi_queue_config_info *)msg;
+	struct virtchnl_queue_pair_info *qpi;
+	struct i40e_pf *pf = vf->pf;
+	u16 vsi_id, vsi_queue_id = 0;
+	u16 num_qps_all = 0;
+	i40e_status aq_ret = 0;
+	int i, j = 0, idx = 0;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (vf->adq_enabled) {
+		for (i = 0; i < I40E_MAX_VF_VSI; i++)
+			num_qps_all += vf->ch[i].num_qps;
+		if (num_qps_all != qci->num_queue_pairs) {
+			aq_ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+	}
+
+	vsi_id = qci->vsi_id;
+
+	for (i = 0; i < qci->num_queue_pairs; i++) {
+		qpi = &qci->qpair[i];
+
+		if (!vf->adq_enabled) {
+			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+						      qpi->txq.queue_id)) {
+				aq_ret = I40E_ERR_PARAM;
+				goto error_param;
+			}
+
+			vsi_queue_id = qpi->txq.queue_id;
+
+			if (qpi->txq.vsi_id != qci->vsi_id ||
+			    qpi->rxq.vsi_id != qci->vsi_id ||
+			    qpi->rxq.queue_id != vsi_queue_id) {
+				aq_ret = I40E_ERR_PARAM;
+				goto error_param;
+			}
+		}
+
+		if (vf->adq_enabled) {
+			if (idx >= ARRAY_SIZE(vf->ch)) {
+				aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
+				goto error_param;
+			}
+			vsi_id = vf->ch[idx].vsi_id;
+		}
+
+		if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
+					     &qpi->rxq) ||
+		    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
+					     &qpi->txq)) {
+			aq_ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+
+		/* For ADq there can be up to 4 VSIs with max 4 queues each.
+		 * VF does not know about these additional VSIs and all
+		 * it cares is about its own queues. PF configures these queues
+		 * to its appropriate VSIs based on TC mapping
+		 */
+		if (vf->adq_enabled) {
+			if (idx >= ARRAY_SIZE(vf->ch)) {
+				aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
+				goto error_param;
+			}
+			if (j == (vf->ch[idx].num_qps - 1)) {
+				idx++;
+				j = 0; /* resetting the queue count */
+				vsi_queue_id = 0;
+			} else {
+				j++;
+				vsi_queue_id++;
+			}
+		}
+	}
+
+	/* set vsi num_queue_pairs in use to num configured by VF */
+	if (!vf->adq_enabled) {
+		pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
+			qci->num_queue_pairs;
+	} else {
+		for (i = 0; i < vf->num_tc; i++)
+			pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
+			       vf->ch[i].num_qps;
+	}
+
+error_param:
+	/* send the response to the VF */
+	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+				       aq_ret);
+}
+
+/**
+ * i40e_validate_queue_map
+ * @vf: pointer to the VF info
+ * @vsi_id: vsi id
+ * @queuemap: Tx or Rx queue map
+ *
+ * check if Tx or Rx queue map is valid
+ **/
+static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
+				   unsigned long queuemap)
+{
+	u16 vsi_queue_id, queue_id;
+
+	for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
+		if (vf->adq_enabled) {
+			vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
+			queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
+		} else {
+			queue_id = vsi_queue_id;
+		}
+
+		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_vc_config_irq_map_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the irq to
+ * queue map
+ **/
+static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_irq_map_info *irqmap_info =
+	    (struct virtchnl_irq_map_info *)msg;
+	struct virtchnl_vector_map *map;
+	u16 vsi_id;
+	i40e_status aq_ret = 0;
+	int i;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (irqmap_info->num_vectors >
+	    vf->pf->hw.func_caps.num_msix_vectors_vf) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	for (i = 0; i < irqmap_info->num_vectors; i++) {
+		map = &irqmap_info->vecmap[i];
+		/* validate msg params */
+		if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
+		    !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
+			aq_ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+		vsi_id = map->vsi_id;
+
+		if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
+			aq_ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+
+		if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
+			aq_ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+
+		i40e_config_irq_link_list(vf, vsi_id, map);
+	}
+error_param:
+	/* send the response to the VF */
+	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
+				       aq_ret);
+}
+
+/**
+ * i40e_ctrl_vf_tx_rings
+ * @vsi: the SRIOV VSI being configured
+ * @q_map: bit map of the queues to be enabled
+ * @enable: start or stop the queue
+ **/
+static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
+				 bool enable)
+{
+	struct i40e_pf *pf = vsi->back;
+	int ret = 0;
+	u16 q_id;
+
+	for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
+		ret = i40e_control_wait_tx_q(vsi->seid, pf,
+					     vsi->base_queue + q_id,
+					     false /*is xdp*/, enable);
+		if (ret)
+			break;
+	}
+	return ret;
+}
+
+/**
+ * i40e_ctrl_vf_rx_rings
+ * @vsi: the SRIOV VSI being configured
+ * @q_map: bit map of the queues to be enabled
+ * @enable: start or stop the queue
+ **/
+static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
+				 bool enable)
+{
+	struct i40e_pf *pf = vsi->back;
+	int ret = 0;
+	u16 q_id;
+
+	for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
+		ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
+					     enable);
+		if (ret)
+			break;
+	}
+	return ret;
+}
+
+/**
+ * i40e_vc_enable_queues_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to enable all or specific queue(s)
+ **/
+static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_queue_select *vqs =
+	    (struct virtchnl_queue_select *)msg;
+	struct i40e_pf *pf = vf->pf;
+	i40e_status aq_ret = 0;
+	int i;
+
+	if (vf->pf_ctrl_disable) {
+		aq_ret = I40E_ERR_PARAM;
+		dev_err(&pf->pdev->dev,
+			"Admin has disabled VF %d via sysfs, will not enable queues",
+			 vf->vf_id);
+		goto error_param;
+	}
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	/* Use the queue bit map sent by the VF */
+	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
+				  true)) {
+		aq_ret = I40E_ERR_TIMEOUT;
+		goto error_param;
+	}
+	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
+				  true)) {
+		aq_ret = I40E_ERR_TIMEOUT;
+		goto error_param;
+	}
+
+	/* need to start the rings for additional ADq VSI's as well */
+	if (vf->adq_enabled) {
+		/* zero belongs to LAN VSI */
+		for (i = 1; i < vf->num_tc; i++) {
+			if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->ch[i].vsi_idx],
+						  vqs->rx_queues, true)) {
+				aq_ret = I40E_ERR_TIMEOUT;
+				goto error_param;
+			}
+			if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->ch[i].vsi_idx],
+						  vqs->tx_queues, true)) {
+				aq_ret = I40E_ERR_TIMEOUT;
+				goto error_param;
+			}
+		}
+	}
+
+	vf->queues_enabled = true;
+
+error_param:
+	/* send the response to the VF */
+	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_disable_queues_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to disable all or specific
+ * queue(s)
+ **/
+static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_queue_select *vqs =
+	    (struct virtchnl_queue_select *)msg;
+	struct i40e_pf *pf = vf->pf;
+	i40e_status aq_ret = 0;
+
+	/* Immediately mark queues as disabled */
+	vf->queues_enabled = false;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) ||
+	    vqs->rx_queues > I40E_MAX_VF_QUEUES ||
+	    vqs->tx_queues > I40E_MAX_VF_QUEUES) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	/* Use the queue bit map sent by the VF */
+	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
+				  false)) {
+		aq_ret = I40E_ERR_TIMEOUT;
+		goto error_param;
+	}
+	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
+				  false)) {
+		aq_ret = I40E_ERR_TIMEOUT;
+		goto error_param;
+	}
+error_param:
+	/* send the response to the VF */
+	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_request_queues_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * VFs get a default number of queues but can use this message to request a
+ * different number.  If the request is successful, PF will reset the VF and
+ * return 0.  If unsuccessful, PF will send message informing VF of number of
+ * available queues and return result of sending VF a message.
+ **/
+static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_vf_res_request *vfres =
+		(struct virtchnl_vf_res_request *)msg;
+	u16 req_pairs = vfres->num_queue_pairs;
+	u8 cur_pairs = vf->num_queue_pairs;
+	struct i40e_pf *pf = vf->pf;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
+		return -EINVAL;
+
+	if (req_pairs > I40E_MAX_VF_QUEUES) {
+		dev_err(&pf->pdev->dev,
+			"VF %d tried to request more than %d queues.\n",
+			vf->vf_id,
+			I40E_MAX_VF_QUEUES);
+		vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
+	} else if (req_pairs - cur_pairs > pf->queues_left) {
+		dev_warn(&pf->pdev->dev,
+			 "VF %d requested %d more queues, but only %d left.\n",
+			 vf->vf_id,
+			 req_pairs - cur_pairs,
+			 pf->queues_left);
+		vfres->num_queue_pairs = pf->queues_left + cur_pairs;
+	} else {
+		/* successful request */
+		vf->num_req_queues = req_pairs;
+		i40e_vc_reset_vf(vf, true);
+		return 0;
+	}
+
+	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
+				      (u8 *)vfres, sizeof(*vfres));
+}
+
+/**
+ * i40e_vc_get_stats_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to get vsi stats
+ **/
+static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_queue_select *vqs =
+	    (struct virtchnl_queue_select *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_eth_stats stats;
+	i40e_status aq_ret = 0;
+	struct i40e_vsi *vsi;
+
+	memset(&stats, 0, sizeof(struct i40e_eth_stats));
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	if (!vsi) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+	i40e_update_eth_stats(vsi);
+	stats = vsi->eth_stats;
+
+error_param:
+	/* send the response back to the VF */
+	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
+				      (u8 *)&stats, sizeof(stats));
+}
+
+/* If the VF is not trusted restrict the number of MAC/VLAN it can program
+ * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
+ */
+#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
+#define I40E_VC_MAX_VLAN_PER_VF 16
+
+/**
+ * i40e_check_vf_permission
+ * @vf: pointer to the VF info
+ * @al: MAC address list from virtchnl
+ * @is_quiet: set true for printing msg without opcode info, false otherwise
+ *
+ * Check that the given list of MAC addresses is allowed. Will return -EPERM
+ * if any address in the list is not valid. Checks the following conditions:
+ *
+ * 1) broadcast and zero addresses are never valid
+ * 2) unicast addresses are not allowed if the VMM has administratively set
+ *    the VF MAC address, unless the VF is marked as privileged.
+ * 3) There is enough space to add all the addresses.
+ *
+ * Note that to guarantee consistency, it is expected this function be called
+ * while holding the mac_filter_hash_lock, as otherwise the current number of
+ * addresses might not be accurate.
+ **/
+static inline int i40e_check_vf_permission(struct i40e_vf *vf,
+					   struct virtchnl_ether_addr_list *al,
+					   bool *is_quiet)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
+	int mac2add_cnt = 0;
+	int i;
+
+	if (!is_quiet)
+		return -EINVAL;
+
+	*is_quiet = false;
+	for (i = 0; i < al->num_elements; i++) {
+		struct i40e_mac_filter *f;
+		u8 *addr = al->list[i].addr;
+
+		if (is_broadcast_ether_addr(addr) ||
+		    is_zero_ether_addr(addr)) {
+			dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", addr);
+			return I40E_ERR_INVALID_MAC_ADDR;
+		}
+
+		/* If the host VMM administrator has set the VF MAC address
+		 * administratively via the ndo_set_vf_mac command then deny
+		 * permission to the VF to add or delete unicast MAC addresses.
+		 * Unless the VF is privileged and then it can do whatever.
+		 * The VF may request to set the MAC address filter already
+		 * assigned to it so do not return an error in that case.
+		 */
+		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+		    !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
+		    !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
+			dev_err(&pf->pdev->dev,
+				"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+			*is_quiet = true;
+			return -EPERM;
+		}
+
+		/* count filters that really will be added */
+		f = i40e_find_mac(vsi, addr);
+		if (!f)
+			++mac2add_cnt;
+	}
+
+	/* If this VF is not privileged, then we can't add more than a limited
+	 * number of addresses. Check to make sure that the additions do not
+	 * push us over the limit.
+	 */
+	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+	    (i40e_count_filters(vsi) + mac2add_cnt) >
+		    I40E_VC_MAX_MAC_ADDR_PER_VF) {
+		dev_err(&pf->pdev->dev,
+			"Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+		if (pf->vf_base_mode_only)
+			dev_err(&pf->pdev->dev, "VF %d is in base mode only, cannot add more than %d filters\n",
+				vf->vf_id, I40E_VC_MAX_MAC_ADDR_PER_VF);
+		return -EPERM;
+	}
+	return 0;
+}
+
+/**
+ * i40e_vc_add_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * add guest mac address filter
+ **/
+static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_ether_addr_list *al =
+	    (struct virtchnl_ether_addr_list *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	bool is_quiet = false;
+	i40e_status ret = 0;
+	int i;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+	    !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	vsi = pf->vsi[vf->lan_vsi_idx];
+
+	/* Lock once, because all function inside for loop accesses VSI's
+	 * MAC filter list which needs to be protected using same lock.
+	 */
+	spin_lock_bh(&vsi->mac_filter_hash_lock);
+
+	ret = i40e_check_vf_permission(vf, al, &is_quiet);
+	if (ret) {
+		spin_unlock_bh(&vsi->mac_filter_hash_lock);
+		goto error_param;
+	}
+
+	/* add new addresses to the list */
+	for (i = 0; i < al->num_elements; i++) {
+		struct i40e_mac_filter *f;
+
+		f = i40e_find_mac(vsi, al->list[i].addr);
+		if (!f) {
+			f = i40e_add_mac_filter(vsi, al->list[i].addr);
+			if (!f) {
+				dev_err(&pf->pdev->dev,
+					"Unable to add MAC filter %pM for VF %d\n",
+					al->list[i].addr, vf->vf_id);
+				ret = I40E_ERR_PARAM;
+				spin_unlock_bh(&vsi->mac_filter_hash_lock);
+				goto error_param;
+			}
+			if (is_valid_ether_addr(al->list[i].addr))
+				ether_addr_copy(vf->default_lan_addr.addr,
+						al->list[i].addr);
+		}
+	}
+	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+	/* program the updated filter list */
+	ret = i40e_sync_vsi_filters(vsi);
+	if (ret)
+		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
+			vf->vf_id, ret);
+
+error_param:
+	/* send the response to the VF */
+	return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+					 ret, NULL, 0, is_quiet);
+}
+
+/**
+ * i40e_vc_del_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove guest mac address filter
+ **/
+static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_ether_addr_list *al =
+	    (struct virtchnl_ether_addr_list *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	i40e_status ret = 0;
+	int i;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+	    !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+		ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	for (i = 0; i < al->num_elements; i++) {
+		if (is_broadcast_ether_addr(al->list[i].addr) ||
+		    is_zero_ether_addr(al->list[i].addr)) {
+			dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
+				al->list[i].addr, vf->vf_id);
+			ret = I40E_ERR_INVALID_MAC_ADDR;
+			goto error_param;
+		}
+	}
+	vsi = pf->vsi[vf->lan_vsi_idx];
+
+	spin_lock_bh(&vsi->mac_filter_hash_lock);
+	/* delete addresses from the list */
+	for (i = 0; i < al->num_elements; i++)
+		if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
+			ret = I40E_ERR_INVALID_MAC_ADDR;
+			spin_unlock_bh(&vsi->mac_filter_hash_lock);
+			goto error_param;
+		}
+
+	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+	/* program the updated filter list */
+	ret = i40e_sync_vsi_filters(vsi);
+	if (ret)
+		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
+			vf->vf_id, ret);
+
+error_param:
+	/* send the response to the VF */
+	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
+				       ret);
+}
+
+/**
+ * i40e_vc_add_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * program guest vlan id
+ **/
+static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_vlan_filter_list *vfl =
+	    (struct virtchnl_vlan_filter_list *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	i40e_status aq_ret = 0;
+	int i;
+
+	if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
+	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+		dev_err(&pf->pdev->dev,
+			"VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
+		if (pf->vf_base_mode_only)
+			dev_err(&pf->pdev->dev, "VF %d is in base mode only, cannot add more than %d vlans\n",
+				vf->vf_id, I40E_VC_MAX_VLAN_PER_VF);
+		goto error_param;
+	}
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+	    !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+	    bitmap_weight(vf->trunk_vlans, VLAN_N_VID))
+		/* Silently fail the request if the VF is untrusted and trunk
+		 * VLANs are configured.
+		 */
+		goto error_param;
+
+	for (i = 0; i < vfl->num_elements; i++) {
+		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
+			aq_ret = I40E_ERR_PARAM;
+			dev_err(&pf->pdev->dev,
+				"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
+			goto error_param;
+		}
+	}
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	if (vsi->info.pvid) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	i40e_vlan_stripping_enable(vsi);
+	for (i = 0; i < vfl->num_elements; i++) {
+		/* add new VLAN filter */
+		int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+		if (!ret)
+			vf->num_vlan++;
+
+		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
+			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+							   true,
+							   vfl->vlan_id[i],
+							   NULL);
+		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
+			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+							   true,
+							   vfl->vlan_id[i],
+							   NULL);
+
+		if (ret)
+			dev_err(&pf->pdev->dev,
+				"Unable to add VLAN filter %d for VF %d, error %d\n",
+				vfl->vlan_id[i], vf->vf_id, ret);
+	}
+
+error_param:
+	/* send the response to the VF */
+	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
+}
+
+/**
+ * i40e_vc_remove_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove programmed guest vlan id
+ **/
+static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_vlan_filter_list *vfl =
+	    (struct virtchnl_vlan_filter_list *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	i40e_status aq_ret = 0;
+	int i;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+	    !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+	    bitmap_weight(vf->trunk_vlans, VLAN_N_VID))
+		/* Silently fail the request if the VF is untrusted and trunk
+		 * VLANs are configured.
+		 */
+		goto error_param;
+
+	for (i = 0; i < vfl->num_elements; i++) {
+		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
+			aq_ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+	}
+
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	if (vsi->info.pvid) {
+		if (vfl->num_elements > 1 || vfl->vlan_id[0])
+			aq_ret = I40E_ERR_PARAM;
+		goto error_param;
+	}
+
+	for (i = 0; i < vfl->num_elements; i++) {
+		i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+		vf->num_vlan--;
+
+		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
+			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+							   false,
+							   vfl->vlan_id[i],
+							   NULL);
+		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
+			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+							   false,
+							   vfl->vlan_id[i],
+							   NULL);
+	}
+
+error_param:
+	/* send the response to the VF */
+	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
+}
+
+/**
+ * i40e_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS key
+ **/
+static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_rss_key *vrk =
+		(struct virtchnl_rss_key *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	i40e_status aq_ret = 0;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+	    !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
+	    (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
+err:
+	/* send the response to the VF */
+	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS LUT
+ **/
+static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_rss_lut *vrl =
+		(struct virtchnl_rss_lut *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	i40e_status aq_ret = 0;
+	u16 i;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+	    !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
+	    (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	for (i = 0; i < vrl->lut_entries; i++)
+		if (vrl->lut[i] >= vf->num_queue_pairs) {
+			aq_ret = I40E_ERR_PARAM;
+			goto err;
+		}
+
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
+	/* send the response to the VF */
+err:
+	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_get_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Return the RSS HENA bits allowed by the hardware
+ **/
+static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_rss_hena *vrh = NULL;
+	struct i40e_pf *pf = vf->pf;
+	i40e_status aq_ret = 0;
+	int len = 0;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+	len = sizeof(struct virtchnl_rss_hena);
+
+	vrh = kzalloc(len, GFP_KERNEL);
+	if (!vrh) {
+		aq_ret = I40E_ERR_NO_MEMORY;
+		len = 0;
+		goto err;
+	}
+	vrh->hena = i40e_pf_get_default_rss_hena(pf);
+err:
+	/* send the response back to the VF */
+	aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+					aq_ret, (u8 *)vrh, len);
+	kfree(vrh);
+	return aq_ret;
+}
+
+/**
+ * i40e_vc_set_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Set the RSS HENA bits for the VF
+ **/
+static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_rss_hena *vrh =
+		(struct virtchnl_rss_hena *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	i40e_status aq_ret = 0;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
+	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
+			  (u32)(vrh->hena >> 32));
+
+	/* send the response to the VF */
+err:
+	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
+}
+
+/**
+ * i40e_vc_enable_vlan_stripping
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Enable vlan header stripping for the VF
+ **/
+static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
+{
+	i40e_status aq_ret = 0;
+	struct i40e_vsi *vsi;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	vsi = vf->pf->vsi[vf->lan_vsi_idx];
+	i40e_vlan_stripping_enable(vsi);
+
+	/* send the response to the VF */
+err:
+	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_disable_vlan_stripping
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Disable vlan header stripping for the VF
+ **/
+static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
+{
+	i40e_status aq_ret = 0;
+	struct i40e_vsi *vsi;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	vsi = vf->pf->vsi[vf->lan_vsi_idx];
+	i40e_vlan_stripping_disable(vsi);
+
+	/* send the response to the VF */
+err:
+	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+				       aq_ret);
+}
+
+#ifdef __TC_MQPRIO_MODE_MAX
+/**
+ * i40e_validate_cloud_filter
+ * @vf: pointer to the VF info
+ * @tc_filter: pointer to virtchnl_filter
+ *
+ * This function validates cloud filter programmed as TC filter for ADq
+ **/
+static int i40e_validate_cloud_filter(struct i40e_vf *vf,
+				      struct virtchnl_filter *tc_filter)
+{
+	struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
+	struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	struct i40e_mac_filter *f;
+	struct hlist_node *h;
+	bool found = false;
+	int bkt;
+
+	if (!tc_filter->action) {
+		dev_info(&pf->pdev->dev,
+			 "VF %d: Currently ADq doesn't support Drop Action\n",
+			 vf->vf_id);
+		goto err;
+	}
+
+	/* action_meta is TC number here to which the filter is applied */
+	if (!tc_filter->action_meta ||
+	    tc_filter->action_meta > I40E_MAX_VF_VSI) {
+		dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
+			 vf->vf_id, tc_filter->action_meta);
+		goto err;
+	}
+
+	/* Check filter if it's programmed for advanced mode or basic mode.
+	 * There are two ADq modes (for VF only),
+	 * 1. Basic mode: intended to allow as many filter options as possible
+	 *		  to be added to a VF in Non-trusted mode. Main goal is
+	 *		  to add filters to its own MAC and VLAN id.
+	 * 2. Advanced mode: is for allowing filters to be applied other than
+	 *		  its own MAC or VLAN. This mode requires the VF to be
+	 *		  Trusted.
+	 */
+	if (mask.dst_mac[0] && !mask.dst_ip[0]) {
+		vsi = pf->vsi[vf->lan_vsi_idx];
+		f = i40e_find_mac(vsi, data.dst_mac);
+
+		if (!f) {
+			dev_info(&pf->pdev->dev,
+				 "Destination MAC %pM doesn't belong to VF %d\n",
+				 data.dst_mac, vf->vf_id);
+			goto err;
+		}
+
+		if (mask.vlan_id) {
+			hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
+					   hlist) {
+				if (f->vlan == ntohs(data.vlan_id)) {
+					found = true;
+					break;
+				}
+			}
+			if (!found) {
+				dev_info(&pf->pdev->dev,
+					 "VF %d doesn't have any VLAN id %u\n",
+					 vf->vf_id, ntohs(data.vlan_id));
+				goto err;
+			}
+		}
+	} else {
+		/* Check if VF is trusted */
+		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+			dev_err(&pf->pdev->dev,
+				"VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
+				vf->vf_id);
+			return I40E_ERR_CONFIG;
+		}
+	}
+
+	if (mask.dst_mac[0] & data.dst_mac[0]) {
+		if (is_broadcast_ether_addr(data.dst_mac) ||
+		    is_zero_ether_addr(data.dst_mac)) {
+			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
+				 vf->vf_id, data.dst_mac);
+			goto err;
+		}
+	}
+
+	if (mask.src_mac[0] & data.src_mac[0]) {
+		if (is_broadcast_ether_addr(data.src_mac) ||
+		    is_zero_ether_addr(data.src_mac)) {
+			dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
+				 vf->vf_id, data.src_mac);
+			goto err;
+		}
+	}
+
+	if (mask.dst_port & data.dst_port) {
+		if (!data.dst_port) {
+			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
+				 vf->vf_id);
+			goto err;
+		}
+	}
+
+	if (mask.src_port & data.src_port) {
+		if (!data.src_port) {
+			dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
+				 vf->vf_id);
+			goto err;
+		}
+	}
+
+	if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
+	    tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
+		dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
+			 vf->vf_id);
+		goto err;
+	}
+
+	if (mask.vlan_id & data.vlan_id) {
+		if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
+			dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
+				 vf->vf_id);
+			goto err;
+		}
+	}
+
+	return I40E_SUCCESS;
+err:
+	return I40E_ERR_CONFIG;
+}
+
+/**
+ * i40e_find_vf_vsi_from_seid - searches for the vsi with the given seid
+ * @vf: pointer to the VF info
+ * @seid: seid of the vsi it is searching for
+ **/
+static struct i40e_vsi *i40e_find_vf_vsi_from_seid(struct i40e_vf *vf, u16 seid)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	int i;
+
+	for (i = 0; i < vf->num_tc ; i++) {
+		vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
+		if (vsi && vsi->seid == seid)
+			return vsi;
+	}
+	return NULL;
+}
+
+/**
+ * i40e_del_all_cloud_filters
+ * @vf: pointer to the VF info
+ *
+ * This function deletes all cloud filters
+ **/
+static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
+{
+	struct i40e_cloud_filter *cfilter = NULL;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	struct hlist_node *node;
+	int ret;
+
+	hlist_for_each_entry_safe(cfilter, node,
+				  &vf->cloud_filter_list, cloud_node) {
+		vsi = i40e_find_vf_vsi_from_seid(vf, cfilter->seid);
+
+		if (!vsi) {
+			dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
+				vf->vf_id, cfilter->seid);
+			continue;
+		}
+
+		if (cfilter->dst_port)
+			ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
+								false);
+		else
+			ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
+		if (ret)
+			dev_err(&pf->pdev->dev,
+				"VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
+				vf->vf_id, i40e_stat_str(&pf->hw, ret),
+				i40e_aq_str(&pf->hw,
+					    pf->hw.aq.asq_last_status));
+
+		hlist_del(&cfilter->cloud_node);
+		kfree(cfilter);
+		vf->num_cloud_filters--;
+	}
+}
+
+/**
+ * i40e_vc_del_cloud_filter
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * This function deletes a cloud filter programmed as TC filter for ADq
+ **/
+static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
+	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
+	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
+	struct i40e_cloud_filter cfilter, *cf = NULL;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	struct hlist_node *node;
+	i40e_status aq_ret = 0;
+	int i, ret;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	if (!vf->adq_enabled) {
+		dev_info(&pf->pdev->dev,
+			 "VF %d: ADq not enabled, can't apply cloud filter\n",
+			 vf->vf_id);
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	if (i40e_validate_cloud_filter(vf, vcf)) {
+		dev_info(&pf->pdev->dev,
+			 "VF %d: Invalid input, can't apply cloud filter\n",
+			 vf->vf_id);
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	memset(&cfilter, 0, sizeof(cfilter));
+	/* parse destination mac address */
+	for (i = 0; i < ETH_ALEN; i++)
+		cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
+
+	/* parse source mac address */
+	for (i = 0; i < ETH_ALEN; i++)
+		cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
+
+	cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
+	cfilter.dst_port = mask.dst_port & tcf.dst_port;
+	cfilter.src_port = mask.src_port & tcf.src_port;
+
+	switch (vcf->flow_type) {
+	case VIRTCHNL_TCP_V4_FLOW:
+		cfilter.n_proto = ETH_P_IP;
+		if (mask.dst_ip[0] & tcf.dst_ip[0])
+			memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
+			       ARRAY_SIZE(tcf.dst_ip));
+		else if (mask.src_ip[0] & tcf.dst_ip[0])
+			memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
+			       ARRAY_SIZE(tcf.dst_ip));
+		break;
+	case VIRTCHNL_TCP_V6_FLOW:
+		cfilter.n_proto = ETH_P_IPV6;
+		if (mask.dst_ip[3] & tcf.dst_ip[3])
+			memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
+			       sizeof(cfilter.ip.v6.dst_ip6));
+		if (mask.src_ip[3] & tcf.src_ip[3])
+			memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
+			       sizeof(cfilter.ip.v6.src_ip6));
+		break;
+	default:
+		/* TC filter can be configured based on different combinations
+		 * and in this case IP is not a part of filter config
+		 */
+		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
+			 vf->vf_id);
+	}
+
+	/* get the vsi to which the tc belongs to */
+	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
+	cfilter.seid = vsi->seid;
+	cfilter.flags = vcf->field_flags;
+
+	/* Deleting TC filter */
+	if (tcf.dst_port)
+		ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
+	else
+		ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
+	if (ret) {
+		dev_err(&pf->pdev->dev,
+			"VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
+			vf->vf_id, i40e_stat_str(&pf->hw, ret),
+			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+		goto err;
+	}
+
+	hlist_for_each_entry_safe(cf, node,
+				  &vf->cloud_filter_list, cloud_node) {
+		if (cf->seid != cfilter.seid)
+			continue;
+		if (mask.dst_port)
+			if (cfilter.dst_port != cf->dst_port)
+				continue;
+		if (mask.dst_mac[0])
+			if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
+				continue;
+		/* for ipv4 data to be valid, only first byte of mask is set */
+		if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
+			if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
+				   ARRAY_SIZE(tcf.dst_ip)))
+				continue;
+		/* for ipv6, mask is set for all sixteen bytes (4 words) */
+		if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
+			if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
+				   sizeof(cfilter.ip.v6.src_ip6)))
+				continue;
+		if (mask.vlan_id)
+			if (cfilter.vlan_id != cf->vlan_id)
+				continue;
+
+		hlist_del(&cf->cloud_node);
+		kfree(cf);
+		vf->num_cloud_filters--;
+	}
+
+err:
+	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_add_cloud_filter
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * This function adds a cloud filter programmed as TC filter for ADq
+ **/
+static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
+	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
+	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
+	struct i40e_cloud_filter *cfilter = NULL;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	i40e_status aq_ret = 0;
+	char err_msg_buf[100];
+	bool is_quiet = false;
+	u16 err_msglen = 0;
+	u8 *err_msg = NULL;
+	int i, ret;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err_out;
+	}
+
+	if (!vf->adq_enabled) {
+		dev_info(&pf->pdev->dev,
+			 "VF %d: ADq is not enabled, can't apply cloud filter\n",
+			 vf->vf_id);
+		aq_ret = I40E_ERR_PARAM;
+		goto err_out;
+	}
+
+	if (pf->fdir_pf_active_filters ||
+	    (!hlist_empty(&pf->fdir_filter_list))) {
+		aq_ret = I40E_ERR_PARAM;
+		err_msglen = strlcpy(err_msg_buf,
+				     "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters",
+				     sizeof(err_msg_buf));
+		err_msg = err_msg_buf;
+		is_quiet = true;
+		goto err_out;
+	}
+
+	if (i40e_validate_cloud_filter(vf, vcf)) {
+		dev_info(&pf->pdev->dev,
+			 "VF %d: Invalid input/s, can't apply cloud filter\n",
+			 vf->vf_id);
+			aq_ret = I40E_ERR_PARAM;
+			goto err_out;
+	}
+
+	cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
+	if (!cfilter)
+		return -ENOMEM;
+
+	/* parse destination mac address */
+	for (i = 0; i < ETH_ALEN; i++)
+		cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
+
+	/* parse source mac address */
+	for (i = 0; i < ETH_ALEN; i++)
+		cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
+
+	cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
+	cfilter->dst_port = mask.dst_port & tcf.dst_port;
+	cfilter->src_port = mask.src_port & tcf.src_port;
+
+	switch (vcf->flow_type) {
+	case VIRTCHNL_TCP_V4_FLOW:
+		cfilter->n_proto = ETH_P_IP;
+		if (mask.dst_ip[0] & tcf.dst_ip[0])
+			memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
+			       ARRAY_SIZE(tcf.dst_ip));
+		else if (mask.src_ip[0] & tcf.dst_ip[0])
+			memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
+			       ARRAY_SIZE(tcf.dst_ip));
+		break;
+	case VIRTCHNL_TCP_V6_FLOW:
+		cfilter->n_proto = ETH_P_IPV6;
+		if (mask.dst_ip[3] & tcf.dst_ip[3])
+			memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
+			       sizeof(cfilter->ip.v6.dst_ip6));
+		if (mask.src_ip[3] & tcf.src_ip[3])
+			memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
+			       sizeof(cfilter->ip.v6.src_ip6));
+		break;
+	default:
+		/* TC filter can be configured based on different combinations
+		 * and in this case IP is not a part of filter config
+		 */
+		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
+			 vf->vf_id);
+	}
+
+	/* get the VSI to which the TC belongs to */
+	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
+	cfilter->seid = vsi->seid;
+	cfilter->flags = vcf->field_flags;
+
+	/* Adding cloud filter programmed as TC filter */
+	if (tcf.dst_port)
+		ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
+	else
+		ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+	if (ret) {
+		dev_err(&pf->pdev->dev,
+			"VF %d: Failed to add cloud filter, err %s aq_err %s\n",
+			vf->vf_id, i40e_stat_str(&pf->hw, ret),
+			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+		goto err_free;
+	}
+
+	INIT_HLIST_NODE(&cfilter->cloud_node);
+	hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
+	/* release the pointer passing it to the collection */
+	cfilter = NULL;
+	vf->num_cloud_filters++;
+err_free:
+	kfree(cfilter);
+err_out:
+	return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
+					 aq_ret, err_msg, err_msglen,
+					 is_quiet);
+}
+
+/**
+ * i40e_vc_add_qch_msg: Add queue channel and enable ADq
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ **/
+static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
+{
+	struct virtchnl_tc_info *tci = (struct virtchnl_tc_info *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_link_status *ls;
+	int i, adq_request_qps = 0;
+	i40e_status aq_ret = 0;
+	u32 speed;
+
+	ls = &pf->hw.phy.link_info;
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	/* ADq cannot be applied if spoof check is ON */
+	if (vf->mac_anti_spoof) {
+		dev_err(&pf->pdev->dev,
+			"Spoof check is ON, turn OFF both MAC and VLAN anti spoof to enable ADq\n");
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
+		dev_err(&pf->pdev->dev,
+			"VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
+			vf->vf_id);
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	/* max number of traffic classes for VF currently capped at 4 */
+	if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
+		dev_err(&pf->pdev->dev,
+			"VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
+			vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	/* validate queues for each TC */
+	for (i = 0; i < tci->num_tc; i++)
+		if (!tci->list[i].count ||
+		    tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
+			dev_err(&pf->pdev->dev,
+				"VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
+				vf->vf_id, i, tci->list[i].count,
+				I40E_DEFAULT_QUEUES_PER_VF);
+			aq_ret = I40E_ERR_PARAM;
+			goto err;
+		}
+
+	/* need Max VF queues but already have default number of queues */
+	adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
+
+	if (pf->queues_left < adq_request_qps) {
+		dev_err(&pf->pdev->dev,
+			"No queues left to allocate to VF %d\n",
+			vf->vf_id);
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	} else {
+		/* we need to allocate max VF queues to enable ADq so as to
+		 * make sure ADq enabled VF always gets back queues when it
+		 * goes through a reset.
+		 */
+		vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
+	}
+
+	/* get link speed in MB to validate rate limit */
+	speed = i40e_vc_link_speed2mbps(ls->link_speed);
+	if (speed == SPEED_UNKNOWN) {
+		dev_err(&pf->pdev->dev, "Cannot detect link speed\n");
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	/* parse data from the queue channel info */
+	vf->num_tc = tci->num_tc;
+	for (i = 0; i < vf->num_tc; i++) {
+		if (tci->list[i].max_tx_rate) {
+			if (tci->list[i].max_tx_rate > speed) {
+				dev_err(&pf->pdev->dev,
+					"Invalid max tx rate %llu specified for VF %d.",
+					tci->list[i].max_tx_rate,
+					vf->vf_id);
+				aq_ret = I40E_ERR_PARAM;
+				goto err;
+			} else {
+				vf->ch[i].max_tx_rate =
+					tci->list[i].max_tx_rate;
+			}
+		}
+		vf->ch[i].num_qps = tci->list[i].count;
+	}
+
+	/* set this flag only after making sure all inputs are sane */
+	vf->adq_enabled = true;
+	/* num_req_queues is set when user changes number of queues via ethtool
+	 * and this causes issue for default VSI(which depends on this variable)
+	 * when ADq is enabled, hence reset it.
+	 */
+	vf->num_req_queues = 0;
+
+	/* reset the VF in order to allocate resources */
+	i40e_vc_reset_vf(vf, true);
+
+	return I40E_SUCCESS;
+
+	/* send the response to the VF */
+err:
+	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_del_qch_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ **/
+static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
+{
+	struct i40e_pf *pf = vf->pf;
+	i40e_status aq_ret = 0;
+
+	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	if (vf->adq_enabled) {
+		i40e_del_all_cloud_filters(vf);
+		i40e_del_qch(vf);
+		vf->adq_enabled = false;
+		vf->num_tc = 0;
+		dev_info(&pf->pdev->dev,
+			 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
+			 vf->vf_id);
+	} else {
+		dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
+			 vf->vf_id);
+		aq_ret = I40E_ERR_PARAM;
+	}
+
+	/* reset the VF in order to allocate resources */
+	i40e_vc_reset_vf(vf, true);
+
+	return I40E_SUCCESS;
+
+err:
+	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
+				       aq_ret);
+}
+
+#endif /* __TC_MQPRIO_MODE_MAX */
+
+/**
+ * i40e_vc_process_vf_msg
+ * @pf: pointer to the PF structure
+ * @vf_id: source VF id
+ * @v_opcode: operation code
+ * @v_retval: unused return value code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the common aeq/arq handler to
+ * process request from VF
+ **/
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
+			   u32 __always_unused v_retval, u8 *msg, u16 msglen)
+{
+	struct i40e_hw *hw = &pf->hw;
+	int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
+	struct i40e_vf *vf;
+	int ret;
+
+	pf->vf_aq_requests++;
+	if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
+		return -EINVAL;
+	vf = &(pf->vf[local_vf_id]);
+
+	/* Check if VF is disabled. */
+	if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
+		return I40E_ERR_PARAM;
+
+	/* perform basic checks on the msg */
+	ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
+
+	if (ret) {
+		i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
+		dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
+			local_vf_id, v_opcode, msglen);
+		switch (ret) {
+		case VIRTCHNL_STATUS_ERR_PARAM:
+			return -EPERM;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	switch (v_opcode) {
+	case VIRTCHNL_OP_VERSION:
+		ret = i40e_vc_get_version_msg(vf, msg);
+		break;
+	case VIRTCHNL_OP_GET_VF_RESOURCES:
+		ret = i40e_vc_get_vf_resources_msg(vf, msg);
+		i40e_vc_notify_vf_link_state(vf);
+		break;
+	case VIRTCHNL_OP_RESET_VF:
+		i40e_vc_reset_vf(vf, false);
+		if (!test_bit(__I40E_VFS_RELEASING, pf->state))
+			clear_bit(I40E_VF_STATE_LOADED_VF_DRIVER,
+				  &vf->vf_states);
+		ret = 0;
+		break;
+	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+		ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
+		break;
+	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+		ret = i40e_vc_config_queues_msg(vf, msg);
+		break;
+	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+		ret = i40e_vc_config_irq_map_msg(vf, msg);
+		break;
+	case VIRTCHNL_OP_ENABLE_QUEUES:
+		ret = i40e_vc_enable_queues_msg(vf, msg);
+		i40e_vc_notify_vf_link_state(vf);
+		break;
+	case VIRTCHNL_OP_DISABLE_QUEUES:
+		ret = i40e_vc_disable_queues_msg(vf, msg);
+		break;
+	case VIRTCHNL_OP_ADD_ETH_ADDR:
+		ret = i40e_vc_add_mac_addr_msg(vf, msg);
+		break;
+	case VIRTCHNL_OP_DEL_ETH_ADDR:
+		ret = i40e_vc_del_mac_addr_msg(vf, msg);
+		break;
+	case VIRTCHNL_OP_ADD_VLAN:
+		ret = i40e_vc_add_vlan_msg(vf, msg);
+		break;
+	case VIRTCHNL_OP_DEL_VLAN:
+		ret = i40e_vc_remove_vlan_msg(vf, msg);
+		break;
+	case VIRTCHNL_OP_GET_STATS:
+		ret = i40e_vc_get_stats_msg(vf, msg);
+		break;
+	case VIRTCHNL_OP_CONFIG_RSS_KEY:
+		ret = i40e_vc_config_rss_key(vf, msg);
+		break;
+	case VIRTCHNL_OP_CONFIG_RSS_LUT:
+		ret = i40e_vc_config_rss_lut(vf, msg);
+		break;
+	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+		ret = i40e_vc_get_rss_hena(vf, msg);
+		break;
+	case VIRTCHNL_OP_SET_RSS_HENA:
+		ret = i40e_vc_set_rss_hena(vf, msg);
+		break;
+	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+		ret = i40e_vc_enable_vlan_stripping(vf, msg);
+		break;
+	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+		ret = i40e_vc_disable_vlan_stripping(vf, msg);
+		break;
+	case VIRTCHNL_OP_REQUEST_QUEUES:
+		ret = i40e_vc_request_queues_msg(vf, msg);
+		break;
+#ifdef __TC_MQPRIO_MODE_MAX
+	case VIRTCHNL_OP_ENABLE_CHANNELS:
+		ret = i40e_vc_add_qch_msg(vf, msg);
+		break;
+	case VIRTCHNL_OP_DISABLE_CHANNELS:
+		ret = i40e_vc_del_qch_msg(vf, msg);
+		break;
+	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+		ret = i40e_vc_add_cloud_filter(vf, msg);
+		break;
+	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+		ret = i40e_vc_del_cloud_filter(vf, msg);
+		break;
+#endif /* __TC_MQPRIO_MODE_MAX */
+	case VIRTCHNL_OP_UNKNOWN:
+	default:
+		dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
+			v_opcode, local_vf_id);
+		ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
+					      I40E_ERR_NOT_IMPLEMENTED);
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_vc_process_vflr_event
+ * @pf: pointer to the PF structure
+ *
+ * called from the vlfr irq handler to
+ * free up VF resources and state variables
+ **/
+int i40e_vc_process_vflr_event(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	u32 reg, reg_idx, bit_idx;
+	struct i40e_vf *vf;
+	int vf_id;
+
+	if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
+		return 0;
+
+	/* Re-enable the VFLR interrupt cause here, before looking for which
+	 * VF got reset. Otherwise, if another VF gets a reset while the
+	 * first one is being processed, that interrupt will be lost, and
+	 * that VF will be stuck in reset forever.
+	 */
+	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+	reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
+	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+	i40e_flush(hw);
+
+	clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
+	for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
+		vf = &pf->vf[vf_id];
+		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
+		if (reg & BIT(bit_idx))
+			/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
+			i40e_reset_vf(vf, true);
+	}
+
+	return 0;
+}
+
+#ifdef IFLA_VF_MAX
+
+/**
+ * i40e_set_vf_mac
+ * @vf: the VF
+ * @vsi: VF VSI to configure
+ * @mac: the mac address
+ *
+ * This function allows the administrator to set the mac address for the VF
+ *
+ * Returns 0 on success, negative on failure
+ *
+ **/
+static int i40e_set_vf_mac(struct i40e_vf *vf, struct i40e_vsi *vsi,
+			   const u8 *mac)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_mac_filter *f;
+	struct hlist_node *h;
+	int ret = 0;
+	int bkt;
+	u8 i;
+
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
+
+	if (is_multicast_ether_addr(mac)) {
+		dev_err(&pf->pdev->dev,
+			"Invalid Ethernet address %pM for VF %d\n",
+			mac, vf->vf_id);
+		ret = -EINVAL;
+		goto error_param;
+	}
+
+	/* When the VF is resetting wait until it is done.
+	 * It can take up to 200 milliseconds,
+	 * but wait for up to 300 milliseconds to be safe.
+	 * If the VF is indeed in reset, the vsi pointer has
+	 * to show on the newly loaded vsi under pf->vsi[id].
+	 */
+	for (i = 0; i < 15; i++) {
+		if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+			if (i > 0)
+				vsi = pf->vsi[vf->lan_vsi_idx];
+			break;
+		}
+		msleep(20);
+	}
+	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
+			vf->vf_id);
+		ret = -EAGAIN;
+		goto error_param;
+	}
+
+	/* Lock once because below invoked function add/del_filter requires
+	 * mac_filter_hash_lock to be held
+	 */
+	spin_lock_bh(&vsi->mac_filter_hash_lock);
+
+	/* delete the temporary mac address */
+	if (!is_zero_ether_addr(vf->default_lan_addr.addr))
+		i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
+
+	/* Delete all the filters for this VSI - we're going to kill it
+	 * anyway.
+	 */
+	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+		__i40e_del_filter(vsi, f);
+
+	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+	/* program mac filter */
+	if (i40e_sync_vsi_filters(vsi)) {
+		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+		ret = -EIO;
+		goto error_param;
+	}
+	ether_addr_copy(vf->default_lan_addr.addr, mac);
+
+	if (is_zero_ether_addr(mac)) {
+		vf->pf_set_mac = false;
+		dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf->vf_id);
+	} else {
+		vf->pf_set_mac = true;
+		dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
+			 mac, vf->vf_id);
+	}
+
+	/* Force the VF interface down so it has to bring up with new MAC
+	 * address
+	 */
+	i40e_vc_reset_vf(vf, true);
+	dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
+error_param:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+	return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: mac address
+ *
+ * program VF mac address
+ **/
+int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_vf *vf;
+	int ret = 0;
+
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto error_param;
+
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	ret = i40e_set_vf_mac(vf, vsi, mac);
+error_param:
+	return ret;
+}
+
+/**
+ * i40e_vsi_has_vlans - True if VSI has configured VLANs
+ * @vsi: pointer to the vsi
+ *
+ * Check if a VSI has configured any VLANs. False if we have a port VLAN or if
+ * we have no configured VLANs. Do not call while holding the
+ * mac_filter_hash_lock.
+ */
+static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
+{
+	bool have_vlans;
+
+	/* If we have a port VLAN, then the VSI cannot have any VLANs
+	 * configured, as all MAC/VLAN filters will be assigned to the PVID.
+	 */
+	if (vsi->info.pvid)
+		return false;
+
+	/* Since we don't have a PVID, we know that if the device is in VLAN
+	 * mode it must be because of a VLAN filter configured on this VSI.
+	 */
+	spin_lock_bh(&vsi->mac_filter_hash_lock);
+	have_vlans = i40e_is_vsi_in_vlan(vsi);
+	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+	return have_vlans;
+}
+
+/**
+ * i40e_ndo_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @vlan_id: mac address
+ * @qos: priority setting
+ * @vlan_proto: vlan protocol
+ *
+ * program VF vlan id and/or qos
+ **/
+#ifdef IFLA_VF_VLAN_INFO_MAX
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+			      u16 vlan_id, u8 qos, __be16 vlan_proto)
+#else
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
+			      int vf_id, u16 vlan_id, u8 qos)
+#endif /* IFLA_VF_VLAN_INFO_MAX */
+{
+	u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	bool allmulti = false, alluni = false;
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret;
+
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
+
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto error_pvid;
+
+	if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
+		dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
+		ret = -EINVAL;
+		goto error_pvid;
+	}
+#ifdef IFLA_VF_VLAN_INFO_MAX
+
+	if (vlan_proto != htons(ETH_P_8021Q)) {
+		dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
+		ret = -EPROTONOSUPPORT;
+		goto error_pvid;
+	}
+#endif
+
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
+			vf_id);
+		ret = -EAGAIN;
+		goto error_pvid;
+	}
+
+	if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+		/* if vlan is being removed then clear trunk_vlan */
+		if (!vsi->info.pvid)
+			memset(vf->trunk_vlans, 0,
+			       BITS_TO_LONGS(VLAN_N_VID) * sizeof(long));
+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
+		goto error_pvid;
+	}
+
+	if (i40e_vsi_has_vlans(vsi)) {
+		dev_err(&pf->pdev->dev,
+			"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
+			vf_id);
+		/* Administrator Error - knock the VF offline until he does
+		 * the right thing by reconfiguring his network correctly
+		 * and then reloading the VF driver.
+		 */
+		i40e_vc_reset_vf(vf, true);
+		/* During reset the VF got a new VSI, so refresh the pointer. */
+		vsi = pf->vsi[vf->lan_vsi_idx];
+	}
+
+	/* Locked once because multiple functions below iterate list */
+	spin_lock_bh(&vsi->mac_filter_hash_lock);
+
+	/* Check for condition where there was already a port VLAN ID
+	 * filter set and now it is being deleted by setting it to zero.
+	 * Additionally check for the condition where there was a port
+	 * VLAN but now there is a new and different port VLAN being set.
+	 * Before deleting all the old VLAN filters we must add new ones
+	 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
+	 * MAC addresses deleted.
+	 */
+	if ((!(vlan_id || qos) ||
+	    vlanprio != le16_to_cpu(vsi->info.pvid)) &&
+	    vsi->info.pvid) {
+		ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
+		if (ret) {
+			dev_info(&vsi->back->pdev->dev,
+				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
+				 vsi->back->hw.aq.asq_last_status);
+			spin_unlock_bh(&vsi->mac_filter_hash_lock);
+			goto error_pvid;
+		}
+	}
+
+	if (vsi->info.pvid) {
+		/* remove all filters on the old VLAN */
+		i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
+					   VLAN_VID_MASK));
+	}
+
+	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+	/* disable promisc modes in case they were enabled */
+	ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
+					      allmulti, alluni);
+	if (ret) {
+		dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
+		goto error_pvid;
+	}
+
+	if (vlan_id || qos) {
+		ret = i40e_vsi_add_pvid(vsi, vlanprio);
+		if (ret) {
+			dev_info(&vsi->back->pdev->dev,
+				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
+				 vsi->back->hw.aq.asq_last_status);
+			goto error_pvid;
+		}
+	} else {
+		i40e_vsi_remove_pvid(vsi);
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+		/* if vlan is being removed then clear also trunk_vlan */
+		if (!vsi->info.pvid)
+			memset(vf->trunk_vlans, 0,
+			       BITS_TO_LONGS(VLAN_N_VID) * sizeof(long));
+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
+	}
+	spin_lock_bh(&vsi->mac_filter_hash_lock);
+
+	if (vlan_id) {
+		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+			 vlan_id, qos, vf_id);
+
+		/* add new VLAN filter for each MAC */
+		ret = i40e_add_vlan_all_mac(vsi, vlan_id);
+		if (ret) {
+			dev_info(&vsi->back->pdev->dev,
+				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
+				 vsi->back->hw.aq.asq_last_status);
+			spin_unlock_bh(&vsi->mac_filter_hash_lock);
+			goto error_pvid;
+		}
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+		/* only pvid should be present in trunk */
+		memset(vf->trunk_vlans, 0,
+		       BITS_TO_LONGS(VLAN_N_VID) * sizeof(long));
+		set_bit(vsi->info.pvid, vf->trunk_vlans);
+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
+
+		/* remove the previously added non-VLAN MAC filters */
+		i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
+	}
+
+	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+	if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
+		alluni = true;
+
+	if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
+		allmulti = true;
+
+	/* The Port VLAN needs to be saved across resets the same as the
+	 * default LAN MAC address.
+	 */
+	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+	if (vsi->info.pvid) {
+		ret = i40e_config_vf_promiscuous_mode(vf,
+						      vsi->id,
+						      allmulti,
+						      alluni);
+		if (ret) {
+			dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
+			goto error_pvid;
+		}
+	}
+
+	/* Schedule the worker thread to take care of applying changes */
+	i40e_service_event_schedule(vsi->back);
+
+error_pvid:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+	return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_bw
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @min_tx_rate: Minimum Tx rate
+ * @max_tx_rate: Maximum Tx rate
+ *
+ * configure VF Tx rate
+ **/
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+		       int max_tx_rate)
+#else
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int max_tx_rate)
+#endif
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret = 0;
+
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
+
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto error;
+
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
+			vf_id);
+		ret = -EAGAIN;
+		goto error;
+	}
+
+	ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
+	if (ret)
+		goto error;
+
+	vf->tx_rate = max_tx_rate;
+error:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+	return ret;
+}
+
+/**
+ * i40e_ndo_enable_vf
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @enable: true to enable & false to disable
+ *
+ * enable/disable VF
+ **/
+int i40e_ndo_enable_vf(struct net_device *netdev, int vf_id, bool enable)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_ndo_get_vf_config
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ivi: VF configuration structure
+ *
+ * return VF configuration
+ **/
+int i40e_ndo_get_vf_config(struct net_device *netdev,
+			   int vf_id, struct ifla_vf_info *ivi)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_vf *vf;
+	int ret = 0;
+
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
 	}
-	set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
 
-err:
-	/* send the response back to the VF */
-	ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
-				     aq_ret, (u8 *)vfres, len);
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto error_param;
 
-	kfree(vfres);
+	vf = &pf->vf[vf_id];
+	/* first vsi is always the LAN vsi */
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	if (!vsi) {
+		ret = -ENOENT;
+		goto error_param;
+	}
+
+	ivi->vf = vf_id;
+
+	ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
+
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+	ivi->max_tx_rate = vf->tx_rate;
+	ivi->min_tx_rate = 0;
+#else
+	ivi->tx_rate = vf->tx_rate;
+#endif
+	ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
+	ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
+		   I40E_VLAN_PRIORITY_SHIFT;
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+	if (vf->link_forced == false)
+		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+	else if (vf->link_up == true)
+		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+	else
+		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+#endif
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+	ivi->spoofchk = vf->mac_anti_spoof;
+#endif
+#ifdef HAVE_NDO_SET_VF_TRUST
+	ivi->trusted = vf->trusted;
+#endif /* HAVE_NDO_SET_VF_TRUST */
+	ret = 0;
+
+error_param:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+	return ret;
+}
+
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+/**
+ * i40e_ndo_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @link: required link state
+ *
+ * Set the link state of a specified VF, regardless of physical link state
+ **/
+int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_link_status *ls = &pf->hw.phy.link_info;
+	struct virtchnl_pf_event pfe;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_vf *vf;
+	int abs_vf_id;
+	int ret = 0;
+
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
+
+	/* validate the request */
+	if (vf_id >= pf->num_alloc_vfs) {
+		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+		ret = -EINVAL;
+		goto error_out;
+	}
+
+	vf = &pf->vf[vf_id];
+	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+	pfe.severity = PF_EVENT_SEVERITY_INFO;
+
+	switch (link) {
+	case IFLA_VF_LINK_STATE_AUTO:
+		vf->link_forced = false;
+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
+		pfe.event_data.link_event_adv.link_status =
+			ls->link_info & I40E_AQ_LINK_UP;
+		pfe.event_data.link_event_adv.link_speed =
+			i40e_vc_link_speed2mbps(ls->link_speed);
+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+		pfe.event_data.link_event.link_status =
+			ls->link_info & I40E_AQ_LINK_UP;
+		pfe.event_data.link_event.link_speed =
+			i40e_virtchnl_link_speed(ls->link_speed);
+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+		break;
+	case IFLA_VF_LINK_STATE_ENABLE:
+		vf->link_forced = true;
+		vf->link_up = true;
+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
+		pfe.event_data.link_event_adv.link_status = true;
+		pfe.event_data.link_event_adv.link_speed =
+			i40e_vc_link_speed2mbps(ls->link_speed);
+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+		pfe.event_data.link_event.link_status = true;
+		pfe.event_data.link_event.link_speed =
+			i40e_virtchnl_link_speed(ls->link_speed);
+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+		break;
+	case IFLA_VF_LINK_STATE_DISABLE:
+		vf->link_forced = true;
+		vf->link_up = false;
+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
+		pfe.event_data.link_event_adv.link_status = false;
+		pfe.event_data.link_event_adv.link_speed = 0;
+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+		pfe.event_data.link_event.link_status = false;
+		pfe.event_data.link_event.link_speed = 0;
+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+		break;
+	default:
+		ret = -EINVAL;
+		goto error_out;
+	}
+	/* Do not allow change link state when VF is disabled
+	 * Check if requested link state is not IFLA_VF_LINK_STATE_DISABLE,
+	 * to prevent false positive warning in case of reloading the driver
+	 */
+	if (vf->pf_ctrl_disable && link != IFLA_VF_LINK_STATE_DISABLE) {
+		vf->link_up = false;
+#ifdef VIRTCHNL_VF_CAP_ADV_LINK_SPEED
+		pfe.event_data.link_event_adv.link_status = false;
+		pfe.event_data.link_event_adv.link_speed = 0;
+#else /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+		pfe.event_data.link_event.link_status = false;
+		pfe.event_data.link_event.link_speed = 0;
+#endif /* VIRTCHNL_VF_CAP_ADV_LINK_SPEED */
+		dev_warn(&pf->pdev->dev,
+			 "Not possible to change VF link state, please enable it first\n");
+	}
+
+	/* Notify the VF of its new link state */
+	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
+			       I40E_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL);
+
+error_out:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+	return ret;
+}
+
+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+/**
+ * i40e_ndo_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @enable: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ **/
+int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_vsi_context ctxt;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_vf *vf;
+	int ret = 0;
+
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
+
+	/* validate the request */
+	if (vf_id >= pf->num_alloc_vfs) {
+		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	vf = &(pf->vf[vf_id]);
+	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
+			vf_id);
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	if (enable == vf->mac_anti_spoof)
+		goto out;
+
+	vf->mac_anti_spoof = enable;
+	memset(&ctxt, 0, sizeof(ctxt));
+	ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
+	ctxt.pf_num = pf->hw.pf_id;
+	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+	if (enable)
+		ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+	if (ret) {
+		dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
+			ret);
+		ret = -EIO;
+	}
+out:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+	return ret;
+}
+
+#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */
+#ifdef HAVE_NDO_SET_VF_TRUST
+/**
+ * i40e_ndo_set_vf_trust
+ * @netdev: network interface device structure of the pf
+ * @vf_id: VF identifier
+ * @setting: trust setting
+ *
+ * Enable or disable VF trust setting
+ **/
+int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_vf *vf;
+	int ret = 0;
+
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
+
+	/* validate the request */
+	if (vf_id >= pf->num_alloc_vfs) {
+		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+		dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	vf = &pf->vf[vf_id];
+
+	/* if vf is in base mode, make it untrusted */
+	if (pf->vf_base_mode_only)
+		setting = false;
+	if (setting == vf->trusted)
+		goto out;
+
+	vf->trusted = setting;
+	i40e_vc_reset_vf(vf, true);
+	dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
+		 vf_id, setting ? "" : "un");
+
+#ifdef __TC_MQPRIO_MODE_MAX
+	if (vf->adq_enabled) {
+		if (!vf->trusted) {
+			dev_info(&pf->pdev->dev,
+				 "VF %u no longer Trusted, deleting all cloud filters\n",
+				 vf_id);
+			i40e_del_all_cloud_filters(vf);
+		}
+	}
+#endif /* __TC_MQPRIO_MODE_MAX */
+
+out:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
 	return ret;
 }
+#endif /* HAVE_NDO_SET_VF_TRUST */
+#ifdef HAVE_VF_STATS
 
 /**
- * i40e_vc_reset_vf_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * called from the VF to reset itself,
- * unlike other virtchnl messages, PF driver
- * doesn't send the response back to the VF
- **/
-static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
+ * i40e_get_vf_stats - populate some stats for the VF
+ * @netdev: the netdev of the PF
+ * @vf_id: the host OS identifier (0-127)
+ * @vf_stats: pointer to the OS memory to be initialized
+ */
+int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
+		      struct ifla_vf_stats *vf_stats)
 {
-	if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
-		i40e_reset_vf(vf, false);
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_eth_stats *stats;
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+
+	/* validate the request */
+	if (i40e_validate_vf(pf, vf_id))
+		return -EINVAL;
+
+	vf = &pf->vf[vf_id];
+	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+		dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+		return -EBUSY;
+	}
+
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	if (!vsi)
+		return -EINVAL;
+
+	i40e_update_eth_stats(vsi);
+	stats = &vsi->eth_stats;
+
+	memset(vf_stats, 0, sizeof(*vf_stats));
+
+	vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
+		stats->rx_multicast;
+	vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
+		stats->tx_multicast;
+	vf_stats->rx_bytes   = stats->rx_bytes;
+	vf_stats->tx_bytes   = stats->tx_bytes;
+	vf_stats->broadcast  = stats->rx_broadcast;
+	vf_stats->multicast  = stats->rx_multicast;
+#ifdef HAVE_VF_STATS_DROPPED
+	vf_stats->rx_dropped = stats->rx_discards;
+	vf_stats->tx_dropped = stats->tx_discards;
+#endif
+
+	return 0;
 }
+#endif /* HAVE_VF_STATS */
+#endif /* IFLA_VF_MAX */
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
 
 /**
- * i40e_getnum_vf_vsi_vlan_filters
- * @vsi: pointer to the vsi
+ * i40e_get_trunk - Gets the configured VLAN filters
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @trunk_vlans: trunk vlans
  *
- * called to get the number of VLANs offloaded on this VF
+ * Gets the active trunk vlans
+ *
+ * Returns the number of active vlans filters on success,
+ * negative on failure
  **/
-static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+static int i40e_get_trunk(struct pci_dev *pdev, int vf_id,
+			  unsigned long *trunk_vlans)
 {
-	struct i40e_mac_filter *f;
-	int num_vlans = 0, bkt;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret;
 
-	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
-		if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
-			num_vlans++;
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
 	}
 
-	return num_vlans;
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto out;
+	vf = &pf->vf[vf_id];
+	/* checking if pvid has been set through netdev */
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	if (vsi->info.pvid) {
+		memset(trunk_vlans, 0,
+		       BITS_TO_LONGS(VLAN_N_VID) * sizeof(long));
+		set_bit(vsi->info.pvid, trunk_vlans);
+	} else {
+		bitmap_copy(trunk_vlans, vf->trunk_vlans, VLAN_N_VID);
+	}
+
+	bitmap_copy(trunk_vlans, vf->trunk_vlans, VLAN_N_VID);
+	ret = bitmap_weight(trunk_vlans, VLAN_N_VID);
+out:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+	return ret;
 }
 
 /**
- * i40e_vc_config_promiscuous_mode_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_set_trunk - Configure VLAN filters
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @vlan_bitmap: vlans to filter on
  *
- * called from the VF to configure the promiscuous mode of
- * VF vsis
+ * Applies the VLAN filters
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
-					       u8 *msg, u16 msglen)
+static int i40e_set_trunk(struct pci_dev *pdev, int vf_id,
+			  const unsigned long *vlan_bitmap)
 {
-	struct virtchnl_promisc_info *info =
-	    (struct virtchnl_promisc_info *)msg;
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_hw *hw = &pf->hw;
-	struct i40e_mac_filter *f;
-	i40e_status aq_ret = 0;
-	bool allmulti = false;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
 	struct i40e_vsi *vsi;
-	bool alluni = false;
-	int aq_err = 0;
-	int bkt;
+	struct i40e_vf *vf;
+	int ret;
+	u16 vid;
 
-	vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
-	    !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
-	    !vsi) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
-	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
-		dev_err(&pf->pdev->dev,
-			"Unprivileged VF %d is attempting to configure promiscuous mode\n",
-			vf->vf_id);
-		/* Lie to the VF on purpose. */
-		aq_ret = 0;
-		goto error_param;
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
 	}
-	/* Multicast promiscuous handling*/
-	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
-		allmulti = true;
 
-	if (vf->port_vlan_id) {
-		aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
-							    allmulti,
-							    vf->port_vlan_id,
-							    NULL);
-	} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
-		hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
-			if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
-				continue;
-			aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
-								    vsi->seid,
-								    allmulti,
-								    f->vlan,
-								    NULL);
-			aq_err = pf->hw.aq.asq_last_status;
-			if (aq_ret) {
-				dev_err(&pf->pdev->dev,
-					"Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
-					f->vlan,
-					i40e_stat_str(&pf->hw, aq_ret),
-					i40e_aq_str(&pf->hw, aq_err));
-				break;
-			}
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto out;
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	i40e_vlan_stripping_enable(vsi);
+
+	/* checking if pvid has been set through netdev */
+	vid = vsi->info.pvid;
+	if (vid) {
+		struct i40e_vsi *pf_vsi = pf->vsi[pf->lan_vsi];
+
+		clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+#ifdef IFLA_VF_VLAN_INFO_MAX
+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
+		pf_vsi->netdev->netdev_ops->extended.ndo_set_vf_vlan
+			(pf_vsi->netdev, vf_id, 0, 0, htons(ETH_P_8021Q));
+#else
+		pf_vsi->netdev->netdev_ops->ndo_set_vf_vlan
+			(pf_vsi->netdev, vf_id, 0, 0, htons(ETH_P_8021Q));
+#endif // HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
+#else // IFLA_VF_VLAN_INFO_MAX
+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
+		pf_vsi->netdev->netdev_ops->extended.ndo_set_vf_vlan
+			(pf_vsi->netdev, vf_id, 0, 0);
+#else
+		pf_vsi->netdev->netdev_ops->ndo_set_vf_vlan
+			(pf_vsi->netdev, vf_id, 0, 0);
+#endif // HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
+#endif // IFLA_VF_VLAN_INFO_MAX
+		if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+			dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+			return -EAGAIN;
 		}
-	} else {
-		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
-							       allmulti, NULL);
-		aq_err = pf->hw.aq.asq_last_status;
-		if (aq_ret) {
-			dev_err(&pf->pdev->dev,
-				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
-				vf->vf_id,
-				i40e_stat_str(&pf->hw, aq_ret),
-				i40e_aq_str(&pf->hw, aq_err));
-			goto error_param;
+		if (i40e_vsi_add_vlan(vsi, vid)) {
+			dev_warn(&pdev->dev, "Unable to restore Port VLAN for trunking.\n");
+			goto out;
 		}
 	}
 
-	if (!aq_ret) {
-		dev_info(&pf->pdev->dev,
-			 "VF %d successfully set multicast promiscuous mode\n",
-			 vf->vf_id);
-		if (allmulti)
-			set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
-		else
-			clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
+	/* Add vlans */
+	for_each_set_bit(vid, vlan_bitmap, VLAN_N_VID) {
+		if (!test_bit(vid, vf->trunk_vlans)) {
+			ret = i40e_vsi_add_vlan(vsi, vid);
+			if (ret)
+				goto out;
+		}
 	}
 
-	if (info->flags & FLAG_VF_UNICAST_PROMISC)
-		alluni = true;
-	if (vf->port_vlan_id) {
-		aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
-							    alluni,
-							    vf->port_vlan_id,
-							    NULL);
-	} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
-		hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
-			if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
-				continue;
-			aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
-								    vsi->seid,
-								    alluni,
-								    f->vlan,
-								    NULL);
-			aq_err = pf->hw.aq.asq_last_status;
-			if (aq_ret)
-				dev_err(&pf->pdev->dev,
-					"Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
-					f->vlan,
-					i40e_stat_str(&pf->hw, aq_ret),
-					i40e_aq_str(&pf->hw, aq_err));
-		}
-	} else {
-		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
-							     alluni, NULL,
-							     true);
-		aq_err = pf->hw.aq.asq_last_status;
-		if (aq_ret) {
-			dev_err(&pf->pdev->dev,
-				"VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
-				vf->vf_id, info->flags,
-				i40e_stat_str(&pf->hw, aq_ret),
-				i40e_aq_str(&pf->hw, aq_err));
-			goto error_param;
-		}
+	/* If to empty trunk filter is added, remove I40E_VLAN_ANY.
+	 * Removal of this filter sets allow_untagged to false.
+	 */
+	if (bitmap_weight(vlan_bitmap, VLAN_N_VID) &&
+	    !bitmap_weight(vf->trunk_vlans, VLAN_N_VID)) {
+		i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);
+		vf->allow_untagged = false;
 	}
 
-	if (!aq_ret) {
-		dev_info(&pf->pdev->dev,
-			 "VF %d successfully set unicast promiscuous mode\n",
-			 vf->vf_id);
-		if (alluni)
-			set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
-		else
-			clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
+	/* If deleting all vlan filters, check if we have VLAN 0 filters
+	 * existing. If we don't, add filters to allow all traffic i.e
+	 * VLAN tag = -1 before deleting all filters because the in the
+	 * delete all filters flow, we check if there are VLAN 0 filters
+	 * and then replace them with filters of VLAN id = -1
+	 */
+	if (!bitmap_weight(vlan_bitmap, VLAN_N_VID) && !vf->allow_untagged) {
+		ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
+		if (ret)
+			goto out;
+		vf->allow_untagged = true;
 	}
 
-error_param:
-	/* send the response to the VF */
-	return i40e_vc_send_resp_to_vf(vf,
-				       VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
-				       aq_ret);
+	/* Del vlans */
+	for_each_set_bit(vid, vf->trunk_vlans, VLAN_N_VID) {
+		if (!test_bit(vid, vlan_bitmap))
+			i40e_vsi_kill_vlan(vsi, vid);
+	}
+	/* Copy over the updated bitmap */
+	bitmap_copy(vf->trunk_vlans, vlan_bitmap, VLAN_N_VID);
+out:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+	return ret;
 }
 
 /**
- * i40e_vc_config_queues_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_get_mirror - Gets the configured  VLAN mirrors
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @mirror_vlans: mirror vlans
  *
- * called from the VF to configure the rx/tx
- * queues
+ * Gets the active mirror vlans
+ *
+ * Returns the number of active mirror vlans on success,
+ * negative on failure
  **/
-static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_get_mirror(struct pci_dev *pdev, int vf_id,
+			   unsigned long *mirror_vlans)
 {
-	struct virtchnl_vsi_queue_config_info *qci =
-	    (struct virtchnl_vsi_queue_config_info *)msg;
-	struct virtchnl_queue_pair_info *qpi;
-	struct i40e_pf *pf = vf->pf;
-	u16 vsi_id, vsi_queue_id;
-	i40e_status aq_ret = 0;
-	int i;
-
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
-
-	vsi_id = qci->vsi_id;
-	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
-	for (i = 0; i < qci->num_queue_pairs; i++) {
-		qpi = &qci->qpair[i];
-		vsi_queue_id = qpi->txq.queue_id;
-		if ((qpi->txq.vsi_id != vsi_id) ||
-		    (qpi->rxq.vsi_id != vsi_id) ||
-		    (qpi->rxq.queue_id != vsi_queue_id) ||
-		    !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
-			aq_ret = I40E_ERR_PARAM;
-			goto error_param;
-		}
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vf *vf;
+	int ret;
 
-		if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
-					     &qpi->rxq) ||
-		    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
-					     &qpi->txq)) {
-			aq_ret = I40E_ERR_PARAM;
-			goto error_param;
-		}
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
 	}
-	/* set vsi num_queue_pairs in use to num configured by VF */
-	pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs;
 
-error_param:
-	/* send the response to the VF */
-	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
-				       aq_ret);
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto out;
+	vf = &pf->vf[vf_id];
+	bitmap_copy(mirror_vlans, vf->mirror_vlans, VLAN_N_VID);
+	ret = bitmap_weight(mirror_vlans, VLAN_N_VID);
+out:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+	return ret;
 }
 
 /**
- * i40e_vc_config_irq_map_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_set_mirror - Configure VLAN mirrors
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @vlan_bitmap: vlans to configure as mirrors
  *
- * called from the VF to configure the irq to
- * queue map
+ * Configures the mirror vlans
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_set_mirror(struct pci_dev *pdev, int vf_id,
+			   const unsigned long *vlan_bitmap)
 {
-	struct virtchnl_irq_map_info *irqmap_info =
-	    (struct virtchnl_irq_map_info *)msg;
-	struct virtchnl_vector_map *map;
-	u16 vsi_id, vsi_queue_id, vector_id;
-	i40e_status aq_ret = 0;
-	unsigned long tempmap;
-	int i;
+	u16 vid, sw_seid, dst_seid, rule_id, rule_type;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	int ret = 0, num = 0, cnt = 0, add = 0;
+	u16 rules_used, rules_free;
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	__le16 *mr_list;
 
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
+	DECLARE_BITMAP(num_vlans, VLAN_N_VID);
 
-	for (i = 0; i < irqmap_info->num_vectors; i++) {
-		map = &irqmap_info->vecmap[i];
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
 
-		vector_id = map->vector_id;
-		vsi_id = map->vsi_id;
-		/* validate msg params */
-		if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
-		    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
-			aq_ret = I40E_ERR_PARAM;
-			goto error_param;
-		}
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto out;
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	sw_seid = vsi->uplink_seid;
+	dst_seid = vsi->seid;
+	rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
+	bitmap_xor(num_vlans, vf->mirror_vlans, vlan_bitmap, VLAN_N_VID);
+	cnt = bitmap_weight(num_vlans, VLAN_N_VID);
+	if (!cnt)
+		goto out;
+	mr_list = kcalloc(cnt, sizeof(__le16), GFP_KERNEL);
+	if (!mr_list) {
+		ret = -ENOMEM;
+		goto out;
+	}
 
-		/* lookout for the invalid queue index */
-		tempmap = map->rxq_map;
-		for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
-			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
-						      vsi_queue_id)) {
-				aq_ret = I40E_ERR_PARAM;
-				goto error_param;
+	/* figure out if adding or deleting */
+	bitmap_and(num_vlans, vlan_bitmap, num_vlans, VLAN_N_VID);
+	add = bitmap_weight(num_vlans, VLAN_N_VID);
+	if (add) {
+		/* Add mirrors */
+		for_each_set_bit(vid, vlan_bitmap, VLAN_N_VID) {
+			if (!test_bit(vid, vf->mirror_vlans)) {
+				mr_list[num] = CPU_TO_LE16(vid);
+				num++;
 			}
 		}
-
-		tempmap = map->txq_map;
-		for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
-			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
-						      vsi_queue_id)) {
-				aq_ret = I40E_ERR_PARAM;
-				goto error_param;
+		ret = i40e_aq_add_mirrorrule(&pf->hw, sw_seid,
+					     rule_type, dst_seid,
+					     cnt, mr_list, NULL,
+					     &rule_id, &rules_used,
+					     &rules_free);
+		if (ret)
+			goto err_free;
+		vf->vlan_rule_id = rule_id;
+	} else {
+		/* Del mirrors */
+		for_each_set_bit(vid, vf->mirror_vlans, VLAN_N_VID) {
+			if (!test_bit(vid, vlan_bitmap)) {
+				mr_list[num] = CPU_TO_LE16(vid);
+				num++;
 			}
 		}
+		ret = i40e_aq_delete_mirrorrule(&pf->hw, sw_seid, rule_type,
+						vf->vlan_rule_id, cnt, mr_list,
+						NULL, &rules_used,
+						&rules_free);
+		if (ret)
+			goto err_free;
+	}
 
-		i40e_config_irq_link_list(vf, vsi_id, map);
+	/* Copy over the updated bitmap */
+	bitmap_copy(vf->mirror_vlans, vlan_bitmap, VLAN_N_VID);
+err_free:
+	kfree(mr_list);
+out:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+	return ret;
+}
+
+/**
+ * i40e_get_allow_untagged
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @on: on or off
+ *
+ * This functions checks if the untagged packets
+ * are allowed or not.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_get_allow_untagged(struct pci_dev *pdev, int vf_id,
+				   bool *on)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vf *vf;
+	int ret;
+
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
 	}
-error_param:
-	/* send the response to the VF */
-	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
-				       aq_ret);
+
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto out;
+	vf = &pf->vf[vf_id];
+	*on = vf->allow_untagged;
+out:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+	return ret;
 }
 
 /**
- * i40e_vc_enable_queues_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_set_allow_untagged
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @on: on or off
  *
- * called from the VF to enable all or specific queue(s)
+ * This functions allows or stops untagged packets
+ * on the VF.
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_set_allow_untagged(struct pci_dev *pdev, int vf_id,
+				   const bool on)
 {
-	struct virtchnl_queue_select *vqs =
-	    (struct virtchnl_queue_select *)msg;
-	struct i40e_pf *pf = vf->pf;
-	u16 vsi_id = vqs->vsi_id;
-	i40e_status aq_ret = 0;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret;
 
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
 	}
 
-	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto out;
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	if (vsi->info.pvid) {
+		ret = -EINVAL;
+		goto out;
 	}
-
-	if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
+	spin_lock_bh(&vsi->mac_filter_hash_lock);
+	if (!on) {
+		i40e_rm_vlan_all_mac(vsi, 0);
+		i40e_service_event_schedule(vsi->back);
+	} else {
+		ret = i40e_add_vlan_all_mac(vsi, 0);
+		i40e_service_event_schedule(vsi->back);
 	}
-
-	if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
-		aq_ret = I40E_ERR_TIMEOUT;
-error_param:
-	/* send the response to the VF */
-	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
-				       aq_ret);
+	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+	if (!ret)
+		vf->allow_untagged = on;
+out:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+	return ret;
 }
 
 /**
- * i40e_vc_disable_queues_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_get_loopback
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @enable: enable or disable
  *
- * called from the VF to disable all or specific
- * queue(s)
+ * This function checks loopback is enabled
+ *
+ * Returns 1 if enabled, 0 if disabled
  **/
-static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_get_loopback(struct pci_dev *pdev, int vf_id, bool *enable)
 {
-	struct virtchnl_queue_select *vqs =
-	    (struct virtchnl_queue_select *)msg;
-	struct i40e_pf *pf = vf->pf;
-	i40e_status aq_ret = 0;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vf *vf;
+	int ret = 0;
 
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
+	*enable = vf->loopback;
+err_out:
+	return ret;
+}
 
-	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
+/**
+ * i40e_set_loopback
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @enable: enable or disable
+ *
+ * This function enables or disables loopback
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_set_loopback(struct pci_dev *pdev, int vf_id,
+			     const bool enable)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret;
 
-	if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	ret = i40e_configure_vf_loopback(vsi, vf_id, enable);
+	if (!ret)
+		vf->loopback = enable;
+err_out:
+	return ret;
+}
 
-	i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
+/**
+ * i40e_get_vlan_strip
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @enable: enable or disable
+ *
+ * This function checks if vlan stripping is enabled or not
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_get_vlan_strip(struct pci_dev *pdev, int vf_id, bool *enable)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vf *vf;
+	int ret;
 
-error_param:
-	/* send the response to the VF */
-	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
-				       aq_ret);
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
+	*enable = vf->vlan_stripping;
+err_out:
+	return ret;
 }
 
 /**
- * i40e_vc_get_stats_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_set_vlan_strip
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @enable: enable/disable
  *
- * called from the VF to get vsi stats
+ * This function enables or disables VLAN stripping on a VF
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_set_vlan_strip(struct pci_dev *pdev, int vf_id,
+			       const bool enable)
 {
-	struct virtchnl_queue_select *vqs =
-	    (struct virtchnl_queue_select *)msg;
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_eth_stats stats;
-	i40e_status aq_ret = 0;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
 	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret;
 
-	memset(&stats, 0, sizeof(struct i40e_eth_stats));
-
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	ret = i40e_configure_vf_vlan_stripping(vsi, vf_id, enable);
+	if (!ret)
+		vf->vlan_stripping = enable;
+err_out:
+	return ret;
+}
 
-	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
+/**
+ * i40e_reset_vf_stats
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ *
+ * This function resets all the stats for the VF
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_reset_vf_stats(struct pci_dev *pdev, int vf_id)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret = 0;
 
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
 	vsi = pf->vsi[vf->lan_vsi_idx];
-	if (!vsi) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
-	i40e_update_eth_stats(vsi);
-	stats = vsi->eth_stats;
+	i40e_vsi_reset_stats(vsi);
 
-error_param:
-	/* send the response back to the VF */
-	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
-				      (u8 *)&stats, sizeof(stats));
+err_out:
+	return ret;
 }
 
-/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
-#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
-#define I40E_VC_MAX_VLAN_PER_VF 8
+/**
+ * i40e_get_vf_bw_share
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @bw_share: bw share of the VF
+ *
+ * This function retrieves the bw share configured for the VF
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_get_vf_bw_share(struct pci_dev *pdev, int vf_id, u8 *bw_share)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vf *vf;
+	int ret = 0;
+
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
+	if (vf->bw_share_applied)
+		*bw_share = vf->bw_share;
+	else
+		ret = -EINVAL;
+
+err_out:
+	return ret;
+}
 
 /**
- * i40e_check_vf_permission
- * @vf: pointer to the VF info
- * @macaddr: pointer to the MAC Address being checked
+ * i40e_store_vf_bw_share
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @bw_share: bw share of the VF
+ *
+ * This function stores bw share configured for the VF
  *
- * Check if the VF has permission to add or delete unicast MAC address
- * filters and return error code -EPERM if not.  Then check if the
- * address filter requested is broadcast or zero and if so return
- * an invalid MAC address error code.
+ * Returns 0 on success, negative on failure
  **/
-static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
+static int i40e_store_vf_bw_share(struct pci_dev *pdev, int vf_id, u8 bw_share)
 {
-	struct i40e_pf *pf = vf->pf;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vf *vf;
 	int ret = 0;
 
-	if (is_broadcast_ether_addr(macaddr) ||
-		   is_zero_ether_addr(macaddr)) {
-		dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
-		ret = I40E_ERR_INVALID_MAC_ADDR;
-	} else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
-		   !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
-		   !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
-		/* If the host VMM administrator has set the VF MAC address
-		 * administratively via the ndo_set_vf_mac command then deny
-		 * permission to the VF to add or delete unicast MAC addresses.
-		 * Unless the VF is privileged and then it can do whatever.
-		 * The VF may request to set the MAC address filter already
-		 * assigned to it so do not return an error in that case.
-		 */
-		dev_err(&pf->pdev->dev,
-			"VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
-		ret = -EPERM;
-	} else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) &&
-		   !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
-		dev_err(&pf->pdev->dev,
-			"VF is not trusted, switch the VF to trusted to add more functionality\n");
-		ret = -EPERM;
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
+	vf->bw_share = bw_share;
+
+	/* this tracking bool is set to true when 'apply' attribute is used */
+	vf->bw_share_applied = false;
+	pf->vf_bw_applied = false;
+err_out:
+	return ret;
+}
+
+/**
+ * i40e_get_link_state
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @enabled: link state
+ * @link_speed: link speed of the VF
+ *
+ * Gets the status of link and the link speed
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_get_link_state(struct pci_dev *pdev, int vf_id, bool *enabled,
+			       enum vfd_link_speed *link_speed)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_link_status *ls;
+	struct i40e_vf *vf;
+	int ret;
+
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
+	ls = &pf->hw.phy.link_info;
+	if (vf->link_forced)
+		*enabled = vf->link_up;
+	else
+		*enabled = ls->link_info & I40E_AQ_LINK_UP;
+	switch (ls->link_speed) {
+	case I40E_LINK_SPEED_UNKNOWN:
+		*link_speed = VFD_LINK_SPEED_UNKNOWN;
+		break;
+	case I40E_LINK_SPEED_100MB:
+		*link_speed = VFD_LINK_SPEED_100MB;
+		break;
+	case I40E_LINK_SPEED_1GB:
+		*link_speed = VFD_LINK_SPEED_1GB;
+		break;
+	case I40E_LINK_SPEED_2_5GB:
+		*link_speed = VFD_LINK_SPEED_2_5GB;
+		break;
+	case I40E_LINK_SPEED_5GB:
+		*link_speed = VFD_LINK_SPEED_5GB;
+		break;
+	case I40E_LINK_SPEED_10GB:
+		*link_speed = VFD_LINK_SPEED_10GB;
+		break;
+	case I40E_LINK_SPEED_20GB:
+		*link_speed = VFD_LINK_SPEED_20GB;
+		break;
+	case I40E_LINK_SPEED_25GB:
+		*link_speed = VFD_LINK_SPEED_25GB;
+		break;
+	case I40E_LINK_SPEED_40GB:
+		*link_speed = VFD_LINK_SPEED_40GB;
+		break;
+	default:
+		*link_speed = VFD_LINK_SPEED_UNKNOWN;
 	}
+err_out:
 	return ret;
 }
 
 /**
- * i40e_vc_add_mac_addr_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_set_link_state
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @link: the link state to configure
  *
- * add guest mac address filter
+ * Configures link for a vf
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_set_link_state(struct pci_dev *pdev, int vf_id, const u8 link)
 {
-	struct virtchnl_ether_addr_list *al =
-	    (struct virtchnl_ether_addr_list *)msg;
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_vsi *vsi = NULL;
-	u16 vsi_id = al->vsi_id;
-	i40e_status ret = 0;
-	int i;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vf *vf;
+	int ret;
 
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
-	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
-		ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto error_out;
+	vf = &pf->vf[vf_id];
+	ret = i40e_configure_vf_link(vf, link);
+error_out:
+	return ret;
+}
 
-	for (i = 0; i < al->num_elements; i++) {
-		ret = i40e_check_vf_permission(vf, al->list[i].addr);
-		if (ret)
-			goto error_param;
-	}
-	vsi = pf->vsi[vf->lan_vsi_idx];
+/**
+ * i40e_set_vf_enable
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @enable: enable/disable
+ *
+ * This function enables or disables a VF
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_set_vf_enable(struct pci_dev *pdev, int vf_id, const bool enable)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *vsi;
+	unsigned long q_map;
+	struct i40e_vf *vf;
+	int ret;
 
-	/* Lock once, because all function inside for loop accesses VSI's
-	 * MAC filter list which needs to be protected using same lock.
-	 */
-	spin_lock_bh(&vsi->mac_filter_hash_lock);
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	q_map = BIT(vsi->num_queue_pairs) - 1;
 
-	/* add new addresses to the list */
-	for (i = 0; i < al->num_elements; i++) {
-		struct i40e_mac_filter *f;
+	/* force link down to prevent tx hangs */
+	if (!enable) {
+		ret = i40e_set_link_state(pdev, vf_id, VFD_LINKSTATE_OFF);
+		if (ret)
+			goto err_out;
+		vf->pf_ctrl_disable = true;
+		ret = i40e_ctrl_vf_tx_rings(vsi, q_map, enable);
+		if (ret)
+			goto err_out;
+		ret = i40e_ctrl_vf_rx_rings(vsi, q_map, enable);
+		if (ret)
+			goto err_out;
+		vf->queues_enabled = false;
+	} else {
+		/* Do nothing when there is no iavf driver loaded */
+		if (!test_bit(I40E_VF_STATE_LOADED_VF_DRIVER, &vf->vf_states))
+			goto err_out;
+		ret = i40e_ctrl_vf_rx_rings(vsi, q_map, enable);
+		if (ret)
+			goto err_out;
+		ret = i40e_ctrl_vf_tx_rings(vsi, q_map, enable);
+		if (ret)
+			goto err_out;
+		vf->queues_enabled = true;
+		vf->pf_ctrl_disable = false;
+		/* reset need to reinit VF resources */
+		i40e_vc_notify_vf_reset(vf);
+		i40e_reset_vf(vf, false);
+		ret = i40e_set_link_state(pdev, vf_id, VFD_LINKSTATE_AUTO);
+	}
 
-		f = i40e_find_mac(vsi, al->list[i].addr);
-		if (!f)
-			f = i40e_add_mac_filter(vsi, al->list[i].addr);
+err_out:
+	return ret;
+}
 
-		if (!f) {
-			dev_err(&pf->pdev->dev,
-				"Unable to add MAC filter %pM for VF %d\n",
-				 al->list[i].addr, vf->vf_id);
-			ret = I40E_ERR_PARAM;
-			spin_unlock_bh(&vsi->mac_filter_hash_lock);
-			goto error_param;
-		} else {
-			vf->num_mac++;
-		}
-	}
-	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+static int i40e_get_vf_enable(struct pci_dev *pdev, int vf_id, bool *enable)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vf *vf;
+	int ret;
 
-	/* program the updated filter list */
-	ret = i40e_sync_vsi_filters(vsi);
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
 	if (ret)
-		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
-			vf->vf_id, ret);
-
-error_param:
-	/* send the response to the VF */
-	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
-				       ret);
+		return ret;
+	vf = &pf->vf[vf_id];
+	*enable = !vf->pf_ctrl_disable;
+	return 0;
 }
 
 /**
- * i40e_vc_del_mac_addr_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_get_rx_bytes
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @rx_bytes: pointer to the caller's rx_bytes variable
  *
- * remove guest mac address filter
+ * This function gets the received bytes on the VF
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_get_rx_bytes(struct pci_dev *pdev, int vf_id,
+			     u64 *rx_bytes)
 {
-	struct virtchnl_ether_addr_list *al =
-	    (struct virtchnl_ether_addr_list *)msg;
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_vsi *vsi = NULL;
-	u16 vsi_id = al->vsi_id;
-	i40e_status ret = 0;
-	int i;
-
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
-	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
-		ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret;
 
-	for (i = 0; i < al->num_elements; i++) {
-		if (is_broadcast_ether_addr(al->list[i].addr) ||
-		    is_zero_ether_addr(al->list[i].addr)) {
-			dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
-				al->list[i].addr, vf->vf_id);
-			ret = I40E_ERR_INVALID_MAC_ADDR;
-			goto error_param;
-		}
-	}
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
 	vsi = pf->vsi[vf->lan_vsi_idx];
+	i40e_update_eth_stats(vsi);
+	*rx_bytes = vsi->eth_stats.rx_bytes;
+err_out:
+	return ret;
+}
 
-	spin_lock_bh(&vsi->mac_filter_hash_lock);
-	/* delete addresses from the list */
-	for (i = 0; i < al->num_elements; i++)
-		if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
-			ret = I40E_ERR_INVALID_MAC_ADDR;
-			spin_unlock_bh(&vsi->mac_filter_hash_lock);
-			goto error_param;
-		} else {
-			vf->num_mac--;
-		}
-
-	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+/**
+ * i40e_get_rx_dropped
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @rx_dropped: pointer to the caller's rx_dropped variable
+ *
+ * This function gets the dropped received bytes on the VF
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_get_rx_dropped(struct pci_dev *pdev, int vf_id,
+			       u64 *rx_dropped)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret;
 
-	/* program the updated filter list */
-	ret = i40e_sync_vsi_filters(vsi);
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
 	if (ret)
-		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
-			vf->vf_id, ret);
-
-error_param:
-	/* send the response to the VF */
-	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
-				       ret);
+		goto err_out;
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	i40e_update_eth_stats(vsi);
+	*rx_dropped = vsi->eth_stats.rx_discards;
+err_out:
+	return ret;
 }
 
 /**
- * i40e_vc_add_vlan_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_get_rx_packets
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @rx_packets: pointer to the caller's rx_packets variable
  *
- * program guest vlan id
+ * This function gets the number of packets received on the VF
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_get_rx_packets(struct pci_dev *pdev, int vf_id,
+			       u64 *rx_packets)
 {
-	struct virtchnl_vlan_filter_list *vfl =
-	    (struct virtchnl_vlan_filter_list *)msg;
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_vsi *vsi = NULL;
-	u16 vsi_id = vfl->vsi_id;
-	i40e_status aq_ret = 0;
-	int i;
-
-	if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
-	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
-		dev_err(&pf->pdev->dev,
-			"VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
-		goto error_param;
-	}
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
-	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret;
 
-	for (i = 0; i < vfl->num_elements; i++) {
-		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
-			aq_ret = I40E_ERR_PARAM;
-			dev_err(&pf->pdev->dev,
-				"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
-			goto error_param;
-		}
-	}
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
 	vsi = pf->vsi[vf->lan_vsi_idx];
-	if (vsi->info.pvid) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
-
-	i40e_vlan_stripping_enable(vsi);
-	for (i = 0; i < vfl->num_elements; i++) {
-		/* add new VLAN filter */
-		int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
-		if (!ret)
-			vf->num_vlan++;
-
-		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
-			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
-							   true,
-							   vfl->vlan_id[i],
-							   NULL);
-		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
-			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
-							   true,
-							   vfl->vlan_id[i],
-							   NULL);
+	i40e_update_eth_stats(vsi);
+	*rx_packets = vsi->eth_stats.rx_unicast + vsi->eth_stats.rx_multicast +
+		      vsi->eth_stats.rx_broadcast;
+err_out:
+	return ret;
+}
 
-		if (ret)
-			dev_err(&pf->pdev->dev,
-				"Unable to add VLAN filter %d for VF %d, error %d\n",
-				vfl->vlan_id[i], vf->vf_id, ret);
-	}
+/**
+ * i40e_get_tx_bytes
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @tx_bytes: pointer to the caller's tx_bytes variable
+ *
+ * This function gets the transmitted bytes by the VF
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_get_tx_bytes(struct pci_dev *pdev, int vf_id,
+			     u64 *tx_bytes)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret;
 
-error_param:
-	/* send the response to the VF */
-	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	i40e_update_eth_stats(vsi);
+	*tx_bytes = vsi->eth_stats.tx_bytes;
+err_out:
+	return ret;
 }
 
 /**
- * i40e_vc_remove_vlan_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_get_tx_dropped
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @tx_dropped: pointer to the caller's tx_dropped variable
  *
- * remove programmed guest vlan id
+ * This function gets the dropped tx bytes by the VF
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_get_tx_dropped(struct pci_dev *pdev, int vf_id,
+			       u64 *tx_dropped)
 {
-	struct virtchnl_vlan_filter_list *vfl =
-	    (struct virtchnl_vlan_filter_list *)msg;
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_vsi *vsi = NULL;
-	u16 vsi_id = vfl->vsi_id;
-	i40e_status aq_ret = 0;
-	int i;
-
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
-	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
-
-	for (i = 0; i < vfl->num_elements; i++) {
-		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
-			aq_ret = I40E_ERR_PARAM;
-			goto error_param;
-		}
-	}
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret;
 
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
 	vsi = pf->vsi[vf->lan_vsi_idx];
-	if (vsi->info.pvid) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
-
-	for (i = 0; i < vfl->num_elements; i++) {
-		i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
-		vf->num_vlan--;
-
-		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
-			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
-							   false,
-							   vfl->vlan_id[i],
-							   NULL);
-		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
-			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
-							   false,
-							   vfl->vlan_id[i],
-							   NULL);
-	}
-
-error_param:
-	/* send the response to the VF */
-	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
+	i40e_update_eth_stats(vsi);
+	*tx_dropped = vsi->eth_stats.tx_discards;
+err_out:
+	return ret;
 }
 
 /**
- * i40e_vc_iwarp_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_get_tx_packets
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @tx_packets: pointer to the caller's tx_packets variable
  *
- * called from the VF for the iwarp msgs
+ * This function gets the number of packets transmitted by the VF
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_get_tx_packets(struct pci_dev *pdev, int vf_id,
+			       u64 *tx_packets)
 {
-	struct i40e_pf *pf = vf->pf;
-	int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
-	i40e_status aq_ret = 0;
-
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
-	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
-
-	i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
-				     msg, msglen);
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret;
 
-error_param:
-	/* send the response to the VF */
-	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
-				       aq_ret);
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	i40e_update_eth_stats(vsi);
+	*tx_packets = vsi->eth_stats.tx_unicast + vsi->eth_stats.tx_multicast +
+		      vsi->eth_stats.tx_broadcast;
+err_out:
+	return ret;
 }
 
 /**
- * i40e_vc_iwarp_qvmap_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- * @config: config qvmap or release it
+ * i40e_get_tx_errors
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @tx_errors: pointer to the caller's tx_errors variable
  *
- * called from the VF for the iwarp msgs
+ * This function gets the number of packets transmitted by the VF
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
-				   bool config)
+static int i40e_get_tx_errors(struct pci_dev *pdev, int vf_id,
+			      u64 *tx_errors)
 {
-	struct virtchnl_iwarp_qvlist_info *qvlist_info =
-				(struct virtchnl_iwarp_qvlist_info *)msg;
-	i40e_status aq_ret = 0;
-
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
-	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
-
-	if (config) {
-		if (i40e_config_iwarp_qvlist(vf, qvlist_info))
-			aq_ret = I40E_ERR_PARAM;
-	} else {
-		i40e_release_iwarp_qvlist(vf);
-	}
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret;
 
-error_param:
-	/* send the response to the VF */
-	return i40e_vc_send_resp_to_vf(vf,
-			       config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
-			       VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
-			       aq_ret);
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	i40e_update_eth_stats(vsi);
+	*tx_errors = vsi->eth_stats.tx_errors;
+err_out:
+	return ret;
 }
 
 /**
- * i40e_vc_config_rss_key
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_get_mac
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @mac: the default mac address
  *
- * Configure the VF's RSS key
+ * This function gets the default mac address
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_get_mac(struct pci_dev *pdev, int vf_id, u8 *mac)
 {
-	struct virtchnl_rss_key *vrk =
-		(struct virtchnl_rss_key *)msg;
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_vsi *vsi = NULL;
-	u16 vsi_id = vrk->vsi_id;
-	i40e_status aq_ret = 0;
-
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
-	    !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
-	    (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto err;
-	}
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vf *vf;
+	int ret;
 
-	vsi = pf->vsi[vf->lan_vsi_idx];
-	aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
-err:
-	/* send the response to the VF */
-	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
-				       aq_ret);
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
+	ether_addr_copy(mac, vf->default_lan_addr.addr);
+err_out:
+	return ret;
 }
 
 /**
- * i40e_vc_config_rss_lut
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_set_mac
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @mac: the default mac address to set
  *
- * Configure the VF's RSS LUT
+ * This function sets the default mac address for the VF
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_set_mac(struct pci_dev *pdev, int vf_id, const u8 *mac)
 {
-	struct virtchnl_rss_lut *vrl =
-		(struct virtchnl_rss_lut *)msg;
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_vsi *vsi = NULL;
-	u16 vsi_id = vrl->vsi_id;
-	i40e_status aq_ret = 0;
-
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
-	    !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
-	    (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto err;
-	}
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret;
 
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
 	vsi = pf->vsi[vf->lan_vsi_idx];
-	aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
-	/* send the response to the VF */
-err:
-	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
-				       aq_ret);
+	ret = i40e_set_vf_mac(vf, vsi, mac);
+err_out:
+	return ret;
 }
 
 /**
- * i40e_vc_get_rss_hena
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_get_promisc
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @promisc_mode: current promiscuous mode
  *
- * Return the RSS HENA bits allowed by the hardware
+ * This function gets the current promiscuous mode configuration.
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_get_promisc(struct pci_dev *pdev, int vf_id, u8 *promisc_mode)
 {
-	struct virtchnl_rss_hena *vrh = NULL;
-	struct i40e_pf *pf = vf->pf;
-	i40e_status aq_ret = 0;
-	int len = 0;
-
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto err;
-	}
-	len = sizeof(struct virtchnl_rss_hena);
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vf *vf;
+	int ret;
 
-	vrh = kzalloc(len, GFP_KERNEL);
-	if (!vrh) {
-		aq_ret = I40E_ERR_NO_MEMORY;
-		len = 0;
-		goto err;
-	}
-	vrh->hena = i40e_pf_get_default_rss_hena(pf);
-err:
-	/* send the response back to the VF */
-	aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
-					aq_ret, (u8 *)vrh, len);
-	kfree(vrh);
-	return aq_ret;
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
+	*promisc_mode = vf->promisc_mode;
+err_out:
+	return ret;
 }
 
 /**
- * i40e_vc_set_rss_hena
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_set_promisc
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @promisc_mode: promiscuous mode to be set
  *
- * Set the RSS HENA bits for the VF
+ * This function sets the promiscuous mode configuration.
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_set_promisc(struct pci_dev *pdev, int vf_id,
+			    const u8 promisc_mode)
 {
-	struct virtchnl_rss_hena *vrh =
-		(struct virtchnl_rss_hena *)msg;
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_hw *hw = &pf->hw;
-	i40e_status aq_ret = 0;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret;
 
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
-		aq_ret = I40E_ERR_PARAM;
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
 		goto err;
-	}
-	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
-	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
-			  (u32)(vrh->hena >> 32));
-
-	/* send the response to the VF */
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	ret = i40e_configure_vf_promisc_mode(vf, vsi, promisc_mode);
 err:
-	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
+	return ret;
 }
 
 /**
- * i40e_vc_enable_vlan_stripping
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_get_ingress_mirror - Gets the configured ingress mirror
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @mirror: pointer to return the ingress mirror
  *
- * Enable vlan header stripping for the VF
+ * Gets the ingress mirror configured
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
-					 u16 msglen)
+static int i40e_get_ingress_mirror(struct pci_dev *pdev, int vf_id, int *mirror)
 {
-	struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
-	i40e_status aq_ret = 0;
-
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto err;
-	}
-
-	i40e_vlan_stripping_enable(vsi);
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vf *vf;
+	int ret;
 
-	/* send the response to the VF */
-err:
-	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
-				       aq_ret);
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
+	*mirror = vf->ingress_vlan;
+err_out:
+	return ret;
 }
 
 /**
- * i40e_vc_disable_vlan_stripping
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
+ * i40e_set_ingress_mirror - Configure ingress mirror
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @mirror: mirror vf
  *
- * Disable vlan header stripping for the VF
+ * Configures the ingress mirror
+ *
+ * Returns 0 on success, negative on failure
  **/
-static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
-					  u16 msglen)
+static int i40e_set_ingress_mirror(struct pci_dev *pdev, int vf_id,
+				   const int mirror)
 {
-	struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
-	i40e_status aq_ret = 0;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *src_vsi, *mirror_vsi;
+	struct i40e_vf *vf, *mirror_vf;
+	u16 rule_type, rule_id;
+	int ret;
 
-	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto err;
-	}
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
 
-	i40e_vlan_stripping_disable(vsi);
+	/* The Admin Queue mirroring rules refer to the traffic
+	 * directions from the perspective of the switch, not the VSI
+	 * we apply the mirroring rule on - so the behaviour of a VSI
+	 * ingress mirror is classified as an egress rule
+	 */
+	rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS;
+	src_vsi = pf->vsi[vf->lan_vsi_idx];
+	if (mirror == I40E_NO_VF_MIRROR) {
+		/* Del mirrors */
+		rule_id = vf->ingress_rule_id;
+		ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type,
+						     rule_id);
+		if (ret)
+			goto err_out;
+		vf->ingress_vlan = I40E_NO_VF_MIRROR;
+	} else {
+		/* validate the mirror */
+		ret = i40e_validate_vf(pf, mirror);
+		if (ret)
+			goto err_out;
+		mirror_vf = &pf->vf[mirror];
+		mirror_vsi = pf->vsi[mirror_vf->lan_vsi_idx];
 
-	/* send the response to the VF */
-err:
-	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
-				       aq_ret);
+		/* Add mirrors */
+		ret = i40e_add_ingress_egress_mirror(src_vsi, mirror_vsi,
+						     rule_type, &rule_id);
+		if (ret)
+			goto err_out;
+		vf->ingress_vlan = mirror;
+		vf->ingress_rule_id = rule_id;
+	}
+err_out:
+	return ret;
 }
 
 /**
- * i40e_vc_process_vf_msg
- * @pf: pointer to the PF structure
- * @vf_id: source VF id
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- * @msghndl: msg handle
+ * i40e_get_egress_mirror - Gets the configured egress mirror
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @mirror: pointer to return the egress mirror
  *
- * called from the common aeq/arq handler to
- * process request from VF
+ * Gets the egress mirror configured
+ *
+ * Returns 0 on success, negative on failure
  **/
-int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
-			   u32 v_retval, u8 *msg, u16 msglen)
+static int i40e_get_egress_mirror(struct pci_dev *pdev, int vf_id, int *mirror)
 {
-	struct i40e_hw *hw = &pf->hw;
-	int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
 	struct i40e_vf *vf;
 	int ret;
 
-	pf->vf_aq_requests++;
-	if (local_vf_id >= pf->num_alloc_vfs)
-		return -EINVAL;
-	vf = &(pf->vf[local_vf_id]);
-
-	/* Check if VF is disabled. */
-	if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
-		return I40E_ERR_PARAM;
-
-	/* perform basic checks on the msg */
-	ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
+	*mirror = vf->egress_vlan;
+err_out:
+	return ret;
+}
 
-	/* perform additional checks specific to this driver */
-	if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
-		struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
+/**
+ * i40e_set_egress_mirror - Configure egress mirror
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @mirror: mirror vf
+ *
+ * Configures the egress mirror
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_set_egress_mirror(struct pci_dev *pdev, int vf_id,
+				  const int mirror)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *src_vsi, *mirror_vsi;
+	struct i40e_vf *vf, *mirror_vf;
+	u16 rule_type, rule_id;
+	int ret;
 
-		if (vrk->key_len != I40E_HKEY_ARRAY_SIZE)
-			ret = -EINVAL;
-	} else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
-		struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto err_out;
+	vf = &pf->vf[vf_id];
 
-		if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)
-			ret = -EINVAL;
-	}
+	/* The Admin Queue mirroring rules refer to the traffic
+	 * directions from the perspective of the switch, not the VSI
+	 * we apply the mirroring rule on - so the behaviour of a VSI
+	 * egress mirror is classified as an ingress rule
+	 */
+	rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
+	src_vsi = pf->vsi[vf->lan_vsi_idx];
+	if (mirror == I40E_NO_VF_MIRROR) {
+		/* Del mirrors */
+		rule_id = vf->egress_rule_id;
+		ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type,
+						     rule_id);
+		if (ret)
+			goto err_out;
+		vf->egress_vlan = I40E_NO_VF_MIRROR;
+	} else {
+		/* validate the mirror */
+		ret = i40e_validate_vf(pf, mirror);
+		if (ret)
+			goto err_out;
+		mirror_vf = &pf->vf[mirror];
+		mirror_vsi = pf->vsi[mirror_vf->lan_vsi_idx];
 
-	if (ret) {
-		i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
-		dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
-			local_vf_id, v_opcode, msglen);
-		switch (ret) {
-		case VIRTCHNL_ERR_PARAM:
-			return -EPERM;
-		default:
-			return -EINVAL;
-		}
+		/* Add mirrors */
+		ret = i40e_add_ingress_egress_mirror(src_vsi, mirror_vsi,
+						     rule_type, &rule_id);
+		if (ret)
+			goto err_out;
+		vf->egress_vlan = mirror;
+		vf->egress_rule_id = rule_id;
 	}
+err_out:
+	return ret;
+}
 
-	switch (v_opcode) {
-	case VIRTCHNL_OP_VERSION:
-		ret = i40e_vc_get_version_msg(vf, msg);
-		break;
-	case VIRTCHNL_OP_GET_VF_RESOURCES:
-		ret = i40e_vc_get_vf_resources_msg(vf, msg);
-		break;
-	case VIRTCHNL_OP_RESET_VF:
-		i40e_vc_reset_vf_msg(vf);
-		ret = 0;
-		break;
-	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
-		ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
-		break;
-	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
-		ret = i40e_vc_config_queues_msg(vf, msg, msglen);
-		break;
-	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
-		ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
-		break;
-	case VIRTCHNL_OP_ENABLE_QUEUES:
-		ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
-		i40e_vc_notify_vf_link_state(vf);
-		break;
-	case VIRTCHNL_OP_DISABLE_QUEUES:
-		ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
-		break;
-	case VIRTCHNL_OP_ADD_ETH_ADDR:
-		ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
-		break;
-	case VIRTCHNL_OP_DEL_ETH_ADDR:
-		ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
-		break;
-	case VIRTCHNL_OP_ADD_VLAN:
-		ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
-		break;
-	case VIRTCHNL_OP_DEL_VLAN:
-		ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
-		break;
-	case VIRTCHNL_OP_GET_STATS:
-		ret = i40e_vc_get_stats_msg(vf, msg, msglen);
-		break;
-	case VIRTCHNL_OP_IWARP:
-		ret = i40e_vc_iwarp_msg(vf, msg, msglen);
-		break;
-	case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
-		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
-		break;
-	case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
-		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
-		break;
-	case VIRTCHNL_OP_CONFIG_RSS_KEY:
-		ret = i40e_vc_config_rss_key(vf, msg, msglen);
-		break;
-	case VIRTCHNL_OP_CONFIG_RSS_LUT:
-		ret = i40e_vc_config_rss_lut(vf, msg, msglen);
-		break;
-	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
-		ret = i40e_vc_get_rss_hena(vf, msg, msglen);
-		break;
-	case VIRTCHNL_OP_SET_RSS_HENA:
-		ret = i40e_vc_set_rss_hena(vf, msg, msglen);
-		break;
-	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
-		ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen);
-		break;
-	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
-		ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
-		break;
+/*
+ * i40e_get_mac_list
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @list_head: list of mac addresses
+ *
+ * This function returns the list of mac address configured on the VF. It is
+ * the responsibility of the caller to free the allocated list when finished.
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_get_mac_list(struct pci_dev *pdev, int vf_id,
+			     struct list_head *mac_list)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_mac_filter *f;
+	struct vfd_macaddr *elem;
+	struct i40e_vsi *vsi;
+	struct i40e_vf *vf;
+	int ret, bkt;
 
-	case VIRTCHNL_OP_UNKNOWN:
-	default:
-		dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
-			v_opcode, local_vf_id);
-		ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
-					      I40E_ERR_NOT_IMPLEMENTED);
-		break;
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto error_out;
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	spin_lock_bh(&vsi->mac_filter_hash_lock);
+	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+		elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+		if (!elem) {
+			ret = -ENOMEM;
+			goto error_unlock;
+		}
+		INIT_LIST_HEAD(&elem->list);
+		ether_addr_copy(elem->mac, f->macaddr);
+		list_add_tail(&elem->list, mac_list);
 	}
-
+error_unlock:
+	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+error_out:
 	return ret;
 }
 
-/**
- * i40e_vc_process_vflr_event
- * @pf: pointer to the PF structure
+/*
+ * i40e_add_macs_to_list
+ * @pdev: PCI device information struct
+ * @vf_id: VF identifier
+ * @list_head: list of mac addresses
  *
- * called from the vlfr irq handler to
- * free up VF resources and state variables
- **/
-int i40e_vc_process_vflr_event(struct i40e_pf *pf)
+ * This function adds a list of mac addresses for a VF
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_add_macs_to_list(struct pci_dev *pdev, int vf_id,
+				 struct list_head *mac_list)
 {
-	struct i40e_hw *hw = &pf->hw;
-	u32 reg, reg_idx, bit_idx;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct vfd_macaddr *tmp;
+	struct i40e_vsi *vsi;
 	struct i40e_vf *vf;
-	int vf_id;
-
-	if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
-		return 0;
+	int ret;
 
-	/* Re-enable the VFLR interrupt cause here, before looking for which
-	 * VF got reset. Otherwise, if another VF gets a reset while the
-	 * first one is being processed, that interrupt will be lost, and
-	 * that VF will be stuck in reset forever.
-	 */
-	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
-	reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
-	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
-	i40e_flush(hw);
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto error_out;
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	spin_lock_bh(&vsi->mac_filter_hash_lock);
+	list_for_each_entry(tmp, mac_list, list) {
+		struct i40e_mac_filter *f;
 
-	clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
-	for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
-		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
-		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
-		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
-		vf = &pf->vf[vf_id];
-		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
-		if (reg & BIT(bit_idx))
-			/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
-			i40e_reset_vf(vf, true);
+		f = i40e_find_mac(vsi, tmp->mac);
+		if (!f) {
+			f = i40e_add_mac_filter(vsi, tmp->mac);
+			if (!f) {
+				dev_err(&pf->pdev->dev,
+					"Unable to add MAC filter %pM for VF %d\n",
+					tmp->mac, vf->vf_id);
+				ret = I40E_ERR_PARAM;
+				spin_unlock_bh(&vsi->mac_filter_hash_lock);
+				goto error_out;
+			}
+		}
 	}
+	spin_unlock_bh(&vsi->mac_filter_hash_lock);
 
-	return 0;
+	/* program the updated filter list */
+	ret = i40e_sync_vsi_filters(vsi);
+	if (ret)
+		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
+			vf->vf_id, ret);
+error_out:
+	return ret;
 }
 
-/**
- * i40e_ndo_set_vf_mac
- * @netdev: network interface device structure
+/*
+ * i40e_rem_macs_from_list
+ * @pdev: PCI device information struct
  * @vf_id: VF identifier
- * @mac: mac address
+ * @list_head: list of mac addresses
  *
- * program VF mac address
- **/
-int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+ * This function removes a list of mac addresses from a VF
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_rem_macs_from_list(struct pci_dev *pdev, int vf_id,
+				   struct list_head *mac_list)
 {
-	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_vsi *vsi = np->vsi;
-	struct i40e_pf *pf = vsi->back;
-	struct i40e_mac_filter *f;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct vfd_macaddr *tmp;
+	struct i40e_vsi *vsi;
 	struct i40e_vf *vf;
-	int ret = 0;
-	int bkt;
+	int ret;
 
 	/* validate the request */
-	if (vf_id >= pf->num_alloc_vfs) {
-		dev_err(&pf->pdev->dev,
-			"Invalid VF Identifier %d\n", vf_id);
-		ret = -EINVAL;
-		goto error_param;
-	}
-
-	vf = &(pf->vf[vf_id]);
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto error_out;
+	vf = &pf->vf[vf_id];
 	vsi = pf->vsi[vf->lan_vsi_idx];
-	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
-		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
-			vf_id);
-		ret = -EAGAIN;
-		goto error_param;
+	spin_lock_bh(&vsi->mac_filter_hash_lock);
+	list_for_each_entry(tmp, mac_list, list) {
+		if (i40e_del_mac_filter(vsi, tmp->mac)) {
+			ret = I40E_ERR_INVALID_MAC_ADDR;
+			spin_unlock_bh(&vsi->mac_filter_hash_lock);
+			goto error_out;
+		}
 	}
+	spin_unlock_bh(&vsi->mac_filter_hash_lock);
 
-	if (is_multicast_ether_addr(mac)) {
-		dev_err(&pf->pdev->dev,
-			"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
-		ret = -EINVAL;
-		goto error_param;
-	}
+	/* program the updated filter list */
+	ret = i40e_sync_vsi_filters(vsi);
+	if (ret)
+		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
+			vf->vf_id, ret);
+error_out:
+	return ret;
+}
 
-	/* Lock once because below invoked function add/del_filter requires
-	 * mac_filter_hash_lock to be held
-	 */
-	spin_lock_bh(&vsi->mac_filter_hash_lock);
+/*
+ * i40e_set_pf_qos_apply
+ * @pdev: PCI device information struct
+ *
+ * This function applies the bw shares stored across all VFs
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_set_pf_qos_apply(struct pci_dev *pdev)
+{
+	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	int i, j, ret = 0, total_share = 0;
+	struct i40e_vf *vf = pf->vf;
+	struct i40e_vsi *vsi;
 
-	/* delete the temporary mac address */
-	if (!is_zero_ether_addr(vf->default_lan_addr.addr))
-		i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
+	for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+		total_share += vf->bw_share;
+	}
 
-	/* Delete all the filters for this VSI - we're going to kill it
-	 * anyway.
-	 */
-	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist)
-		__i40e_del_filter(vsi, f);
+	/* verify BW share distribution */
+	if (total_share > 100) {
+		dev_err(&pdev->dev, "Total share is greater than 100 percent");
+		return I40E_ERR_PARAM;
+	}
 
-	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+	memset(&bw_data, 0, sizeof(struct i40e_aqc_configure_vsi_tc_bw_data));
+	for (i = 0; i < pf->num_alloc_vfs; i++) {
+		ret = i40e_validate_vf(pf, vf->vf_id);
+		if (ret)
+			continue;
+		vf = &pf->vf[i];
+		if (!vf->bw_share)
+			continue;
+		if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+			dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
+				vf->vf_id);
+			ret = I40E_ERR_PARAM;
+			goto error_param;
+		}
+		vsi = pf->vsi[vf->lan_vsi_idx];
+		bw_data.tc_valid_bits = 1;
+		bw_data.tc_bw_credits[0] = vf->bw_share;
 
-	/* program mac filter */
-	if (i40e_sync_vsi_filters(vsi)) {
-		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
-		ret = -EIO;
-		goto error_param;
-	}
-	ether_addr_copy(vf->default_lan_addr.addr, mac);
+		ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "AQ command Config VSI BW allocation per TC failed = %d\n",
+				 pf->hw.aq.asq_last_status);
+			vf->bw_share_applied = false;
+			return -EINVAL;
+		}
 
-	if (is_zero_ether_addr(mac)) {
-		vf->pf_set_mac = false;
-		dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
-	} else {
-		vf->pf_set_mac = true;
-		dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
-			 mac, vf_id);
-	}
+		for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++)
+			vsi->info.qs_handle[j] = bw_data.qs_handles[j];
 
-	/* Force the VF driver stop so it has to reload with new MAC address */
-	i40e_vc_disable_vf(pf, vf);
-	dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
+		/* set the tracking bool to true */
+		vf->bw_share_applied = true;
+	}
+	pf->vf_bw_applied = true;
 
 error_param:
 	return ret;
 }
 
 /**
- * i40e_ndo_set_vf_port_vlan
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @vlan_id: mac address
- * @qos: priority setting
- * @vlan_proto: vlan protocol
+ * i40e_get_pf_ingress_mirror - Gets the configured ingress mirror for PF
+ * @pdev: PCI device information struct
+ * @mirror: pointer to return the ingress mirror
  *
- * program VF vlan id and/or qos
+ * Gets the ingress mirror configured
+ *
+ * Returns 0 on success, negative on failure
  **/
-int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
-			      u16 vlan_id, u8 qos, __be16 vlan_proto)
+static int i40e_get_pf_ingress_mirror(struct pci_dev *pdev, int *mirror)
 {
-	u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
-	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_pf *pf = np->vsi->back;
-	struct i40e_vsi *vsi;
-	struct i40e_vf *vf;
-	int ret = 0;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	*mirror = pf->ingress_vlan;
+	return 0;
+}
 
-	/* validate the request */
-	if (vf_id >= pf->num_alloc_vfs) {
-		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
-		ret = -EINVAL;
-		goto error_pvid;
-	}
+/**
+ * i40e_set_pf_ingress_mirror - Sets the configured ingress mirror for PF
+ * @pdev: PCI device information struct
+ * @mirror: pointer to return the ingress mirror
+ *
+ * Gets the ingress mirror configured
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_set_pf_ingress_mirror(struct pci_dev *pdev, const int mirror)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *src_vsi, *mirror_vsi;
+	struct i40e_vf *mirror_vf;
+	u16 rule_type, rule_id;
+	int ret;
 
-	if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
-		dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
-		ret = -EINVAL;
-		goto error_pvid;
-	}
+	/* The Admin Queue mirroring rules refer to the traffic
+	 * directions from the perspective of the switch, not the VSI
+	 * we apply the mirroring rule on - so the behaviour of a VSI
+	 * ingress mirror is classified as an egress rule
+	 */
+	rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS;
+	src_vsi = pf->vsi[pf->lan_vsi];
+	if (mirror == I40E_NO_VF_MIRROR) {
+		/* Del mirrors */
+		rule_id = pf->ingress_rule_id;
+		ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type,
+						     rule_id);
+		if (ret)
+			goto err_out;
+		pf->ingress_vlan = I40E_NO_VF_MIRROR;
+	} else {
+		/* validate the mirror */
+		ret = i40e_validate_vf(pf, mirror);
+		if (ret)
+			goto err_out;
+		mirror_vf = &pf->vf[mirror];
+		mirror_vsi = pf->vsi[mirror_vf->lan_vsi_idx];
 
-	if (vlan_proto != htons(ETH_P_8021Q)) {
-		dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
-		ret = -EPROTONOSUPPORT;
-		goto error_pvid;
+		/* Add mirrors */
+		ret = i40e_add_ingress_egress_mirror(src_vsi, mirror_vsi,
+						     rule_type, &rule_id);
+		if (ret)
+			goto err_out;
+		pf->ingress_vlan = mirror;
+		pf->ingress_rule_id = rule_id;
 	}
+err_out:
+	return ret;
+}
 
-	vf = &(pf->vf[vf_id]);
-	vsi = pf->vsi[vf->lan_vsi_idx];
-	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
-		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
-			vf_id);
-		ret = -EAGAIN;
-		goto error_pvid;
-	}
+/**
+ * i40e_get_pf_egress_mirror - Gets the configured egress mirror for PF
+ * @pdev: PCI device information struct
+ * @mirror: pointer to return the ingress mirror
+ *
+ * Gets the ingress mirror configured
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_get_pf_egress_mirror(struct pci_dev *pdev, int *mirror)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	*mirror = pf->egress_vlan;
+	return 0;
+}
 
-	if (le16_to_cpu(vsi->info.pvid) == vlanprio)
-		/* duplicate request, so just return success */
-		goto error_pvid;
+/**
+ * i40e_set_pf_egress_mirror - Sets the configured egress mirror for PF
+ * @pdev: PCI device information struct
+ * @mirror: pointer to return the ingress mirror
+ *
+ * Gets the ingress mirror configured
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_set_pf_egress_mirror(struct pci_dev *pdev, const int mirror)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *src_vsi, *mirror_vsi;
+	struct i40e_vf *mirror_vf;
+	u16 rule_type, rule_id;
+	int ret;
 
-	/* Locked once because multiple functions below iterate list */
-	spin_lock_bh(&vsi->mac_filter_hash_lock);
+	/* The Admin Queue mirroring rules refer to the traffic
+	 * directions from the perspective of the switch, not the VSI
+	 * we apply the mirroring rule on - so the behaviour of a VSI
+	 * egress mirror is classified as an ingress rule
+	 */
+	rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
+	src_vsi = pf->vsi[pf->lan_vsi];
+	if (mirror == I40E_NO_VF_MIRROR) {
+		/* Del mirrors */
+		rule_id = pf->egress_rule_id;
+		ret = i40e_del_ingress_egress_mirror(src_vsi, rule_type,
+						     rule_id);
+		if (ret)
+			goto err_out;
+		pf->egress_vlan = I40E_NO_VF_MIRROR;
+	} else {
+		/* validate the mirror */
+		ret = i40e_validate_vf(pf, mirror);
+		if (ret)
+			goto err_out;
+		mirror_vf = &pf->vf[mirror];
+		mirror_vsi = pf->vsi[mirror_vf->lan_vsi_idx];
 
-	if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) {
-		dev_err(&pf->pdev->dev,
-			"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
-			vf_id);
-		/* Administrator Error - knock the VF offline until he does
-		 * the right thing by reconfiguring his network correctly
-		 * and then reloading the VF driver.
-		 */
-		i40e_vc_disable_vf(pf, vf);
-		/* During reset the VF got a new VSI, so refresh the pointer. */
-		vsi = pf->vsi[vf->lan_vsi_idx];
+		/* Add mirrors */
+		ret = i40e_add_ingress_egress_mirror(src_vsi, mirror_vsi,
+						     rule_type, &rule_id);
+		if (ret)
+			goto err_out;
+		pf->egress_vlan = mirror;
+		pf->egress_rule_id = rule_id;
 	}
+err_out:
+	return ret;
+}
 
-	/* Check for condition where there was already a port VLAN ID
-	 * filter set and now it is being deleted by setting it to zero.
-	 * Additionally check for the condition where there was a port
-	 * VLAN but now there is a new and different port VLAN being set.
-	 * Before deleting all the old VLAN filters we must add new ones
-	 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
-	 * MAC addresses deleted.
-	 */
-	if ((!(vlan_id || qos) ||
-	    vlanprio != le16_to_cpu(vsi->info.pvid)) &&
-	    vsi->info.pvid) {
-		ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
-		if (ret) {
-			dev_info(&vsi->back->pdev->dev,
-				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
-				 vsi->back->hw.aq.asq_last_status);
-			spin_unlock_bh(&vsi->mac_filter_hash_lock);
-			goto error_pvid;
-		}
-	}
+#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4))
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
+#define OUTER_TAG_IDX 2
+static int i40e_get_pf_tpid(struct pci_dev *pdev, u16 *tp_id)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
 
-	if (vsi->info.pvid) {
-		/* remove all filters on the old VLAN */
-		i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
-					   VLAN_VID_MASK));
-	}
+	if (!(pf->hw.flags & I40E_HW_FLAG_802_1AD_CAPABLE))
+		return -EOPNOTSUPP;
 
-	spin_unlock_bh(&vsi->mac_filter_hash_lock);
-	if (vlan_id || qos)
-		ret = i40e_vsi_add_pvid(vsi, vlanprio);
-	else
-		i40e_vsi_remove_pvid(vsi);
-	spin_lock_bh(&vsi->mac_filter_hash_lock);
+	*tp_id = (u16)(rd32(&pf->hw, I40E_GL_SWT_L2TAGCTRL(OUTER_TAG_IDX)) >>
+		      I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
 
-	if (vlan_id) {
-		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
-			 vlan_id, qos, vf_id);
+	return 0;
+}
 
-		/* add new VLAN filter for each MAC */
-		ret = i40e_add_vlan_all_mac(vsi, vlan_id);
-		if (ret) {
-			dev_info(&vsi->back->pdev->dev,
-				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
-				 vsi->back->hw.aq.asq_last_status);
-			spin_unlock_bh(&vsi->mac_filter_hash_lock);
-			goto error_pvid;
-		}
+static int i40e_set_pf_tpid(struct pci_dev *pdev, u16 tp_id)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	int ret;
 
-		/* remove the previously added non-VLAN MAC filters */
-		i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
+	if (!(pf->hw.flags & I40E_HW_FLAG_802_1AD_CAPABLE))
+		return -EOPNOTSUPP;
+
+	if (pf->hw.pf_id != 0) {
+		dev_err(&pdev->dev,
+		        "TPID configuration only supported for PF 0\n");
+		return -EOPNOTSUPP;
 	}
 
-	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+	pf->hw.first_tag = tp_id;
+	ret = i40e_aq_set_switch_config(&pf->hw, 0, 0, 0, NULL);
 
-	/* Schedule the worker thread to take care of applying changes */
-	i40e_service_event_schedule(vsi->back);
+	return ret;
+}
 
-	if (ret) {
-		dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
-		goto error_pvid;
-	}
+static int i40e_get_num_queues(struct pci_dev *pdev, int vf_id, int *num_queues)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vf *vf;
+	int ret;
+
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		return ret;
+	vf = &pf->vf[vf_id];
 
-	/* The Port VLAN needs to be saved across resets the same as the
-	 * default LAN MAC address.
-	 */
-	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
-	ret = 0;
+	*num_queues = vf->num_queue_pairs;
 
-error_pvid:
 	return ret;
 }
 
-#define I40E_BW_CREDIT_DIVISOR 50     /* 50Mbps per BW credit */
-#define I40E_MAX_BW_INACTIVE_ACCUM 4  /* device can accumulate 4 credits max */
-/**
- * i40e_ndo_set_vf_bw
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @tx_rate: Tx rate
- *
- * configure VF Tx rate
- **/
-int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
-		       int max_tx_rate)
+static int i40e_set_num_queues(struct pci_dev *pdev, int vf_id, int num_queues)
 {
-	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_pf *pf = np->vsi->back;
-	struct i40e_vsi *vsi;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
 	struct i40e_vf *vf;
-	int speed = 0;
-	int ret = 0;
+	int ret;
 
 	/* validate the request */
-	if (vf_id >= pf->num_alloc_vfs) {
-		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
-		ret = -EINVAL;
-		goto error;
-	}
-
-	if (min_tx_rate) {
-		dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
-			min_tx_rate, vf_id);
-		return -EINVAL;
-	}
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		return ret;
+	vf = &pf->vf[vf_id];
 
-	vf = &(pf->vf[vf_id]);
-	vsi = pf->vsi[vf->lan_vsi_idx];
-	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
-		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
-			vf_id);
-		ret = -EAGAIN;
-		goto error;
+	if (test_bit(I40E_VF_STATE_LOADED_VF_DRIVER, &vf->vf_states)) {
+		dev_err(&pdev->dev,
+			"Unable to configure %d queues, please unbind the driver for VF %d\n",
+			num_queues, vf_id);
+		return -EAGAIN;
 	}
-
-	switch (pf->hw.phy.link_info.link_speed) {
-	case I40E_LINK_SPEED_40GB:
-		speed = 40000;
-		break;
-	case I40E_LINK_SPEED_25GB:
-		speed = 25000;
-		break;
-	case I40E_LINK_SPEED_20GB:
-		speed = 20000;
-		break;
-	case I40E_LINK_SPEED_10GB:
-		speed = 10000;
-		break;
-	case I40E_LINK_SPEED_1GB:
-		speed = 1000;
-		break;
-	default:
-		break;
+	if (num_queues > I40E_MAX_VF_QUEUES) {
+		dev_err(&pdev->dev,
+			"Unable to configure %d VF queues, the maximum is %d\n",
+			num_queues, I40E_MAX_VF_QUEUES);
+		return -EINVAL;
 	}
-
-	if (max_tx_rate > speed) {
-		dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.\n",
-			max_tx_rate, vf->vf_id);
-		ret = -EINVAL;
-		goto error;
+	if (num_queues - vf->num_queue_pairs > pf->queues_left) {
+		dev_err(&pdev->dev,
+			"Unable to configure %d VF queues, only %d available\n",
+			num_queues, vf->num_queue_pairs + pf->queues_left);
+		return -EINVAL;
 	}
 
-	if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
-		dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
-		max_tx_rate = 50;
-	}
+	/* Set vf->num_req_queues to the desired value and reset the VF. When
+	 * the VSI is reallocated it will be configured with the new queue
+	 * count.
+	 */
+	vf->num_req_queues = num_queues;
+	i40e_vc_notify_vf_reset(vf);
+	i40e_reset_vf(vf, false);
 
-	/* Tx rate credits are in values of 50Mbps, 0 is disabled*/
-	ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
-					  max_tx_rate / I40E_BW_CREDIT_DIVISOR,
-					  I40E_MAX_BW_INACTIVE_ACCUM, NULL);
-	if (ret) {
-		dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
-			ret);
-		ret = -EIO;
-		goto error;
-	}
-	vf->tx_rate = max_tx_rate;
-error:
-	return ret;
+	return 0;
 }
 
 /**
- * i40e_ndo_get_vf_config
- * @netdev: network interface device structure
+ * i40e_get_max_tx_rate
+ * @pdev: PCI device information struct
  * @vf_id: VF identifier
- * @ivi: VF configuration structure
+ * @max_tx_rate: max transmit bandwidth rate
  *
- * return VF configuration
- **/
-int i40e_ndo_get_vf_config(struct net_device *netdev,
-			   int vf_id, struct ifla_vf_info *ivi)
+ * This function returns the value of transmit bandwidth, in Mbps,
+ * for the specified VF,
+ * value 0 means rate limiting is disabled.
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_get_max_tx_rate(struct pci_dev *pdev, int vf_id,
+				unsigned int *max_tx_rate)
 {
-	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_vsi *vsi = np->vsi;
-	struct i40e_pf *pf = vsi->back;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *vsi;
 	struct i40e_vf *vf;
-	int ret = 0;
+	int ret;
+
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev,
+			 "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
+	}
 
 	/* validate the request */
-	if (vf_id >= pf->num_alloc_vfs) {
-		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
-		ret = -EINVAL;
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
 		goto error_param;
-	}
 
-	vf = &(pf->vf[vf_id]);
-	/* first vsi is always the LAN vsi */
+	vf = &pf->vf[vf_id];
 	vsi = pf->vsi[vf->lan_vsi_idx];
-	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
-		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
-			vf_id);
-		ret = -EAGAIN;
+	if (!vsi) {
+		ret = -ENOENT;
 		goto error_param;
 	}
 
-	ivi->vf = vf_id;
-
-	ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
-
-	ivi->max_tx_rate = vf->tx_rate;
-	ivi->min_tx_rate = 0;
-	ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
-	ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
-		   I40E_VLAN_PRIORITY_SHIFT;
-	if (vf->link_forced == false)
-		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
-	else if (vf->link_up == true)
-		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
-	else
-		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
-	ivi->spoofchk = vf->spoofchk;
-	ivi->trusted = vf->trusted;
-	ret = 0;
+	*max_tx_rate = vf->tx_rate;
 
 error_param:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
 	return ret;
 }
 
 /**
- * i40e_ndo_set_vf_link_state
- * @netdev: network interface device structure
+ * i40e_set_max_tx_rate
+ * @pdev: PCI device information struct
  * @vf_id: VF identifier
- * @link: required link state
+ * @max_tx_rate: max transmit bandwidth rate to set
  *
- * Set the link state of a specified VF, regardless of physical link state
+ * This function sets the value of max transmit bandwidth, in Mbps,
+ * for the specified VF,
+ * value 0 means rate limiting is disabled.
+ *
+ * Returns 0 on success, negative on failure
  **/
-int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+static int i40e_set_max_tx_rate(struct pci_dev *pdev, int vf_id,
+				unsigned int *max_tx_rate)
 {
-	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_pf *pf = np->vsi->back;
-	struct virtchnl_pf_event pfe;
-	struct i40e_hw *hw = &pf->hw;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_vsi *vsi;
 	struct i40e_vf *vf;
-	int abs_vf_id;
-	int ret = 0;
+	int ret;
 
-	/* validate the request */
-	if (vf_id >= pf->num_alloc_vfs) {
-		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
-		ret = -EINVAL;
-		goto error_out;
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev,
+			 "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
 	}
 
-	vf = &pf->vf[vf_id];
-	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
-
-	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
-	pfe.severity = PF_EVENT_SEVERITY_INFO;
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		return ret;
 
-	switch (link) {
-	case IFLA_VF_LINK_STATE_AUTO:
-		vf->link_forced = false;
-		pfe.event_data.link_event.link_status =
-			pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
-		pfe.event_data.link_event.link_speed =
-			(enum virtchnl_link_speed)
-			pf->hw.phy.link_info.link_speed;
-		break;
-	case IFLA_VF_LINK_STATE_ENABLE:
-		vf->link_forced = true;
-		vf->link_up = true;
-		pfe.event_data.link_event.link_status = true;
-		pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
-		break;
-	case IFLA_VF_LINK_STATE_DISABLE:
-		vf->link_forced = true;
-		vf->link_up = false;
-		pfe.event_data.link_event.link_status = false;
-		pfe.event_data.link_event.link_speed = 0;
-		break;
-	default:
-		ret = -EINVAL;
-		goto error_out;
+	vf = &pf->vf[vf_id];
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
+			vf_id);
+		ret = -EAGAIN;
+		goto error;
 	}
-	/* Notify the VF of its new link state */
-	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
-			       0, (u8 *)&pfe, sizeof(pfe), NULL);
 
-error_out:
+	ret = i40e_set_bw_limit(vsi, vsi->seid, *max_tx_rate);
+	if (ret)
+		goto error;
+
+	vf->tx_rate = *max_tx_rate;
+error:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
 	return ret;
 }
 
 /**
- * i40e_ndo_set_vf_spoofchk
- * @netdev: network interface device structure
+ * i40_get_trust_state
+ * @pdev: PCI device information struct
  * @vf_id: VF identifier
- * @enable: flag to enable or disable feature
+ * @enable: on success, true if enabled, false if not
  *
- * Enable or disable VF spoof checking
+ * Gets VF trust configure.
+ *
+ * Returns 0 on success, negative on failure
  **/
-int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
+static int i40e_get_trust_state(struct pci_dev *pdev, int vf_id, bool *enable)
 {
-	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_vsi *vsi = np->vsi;
-	struct i40e_pf *pf = vsi->back;
-	struct i40e_vsi_context ctxt;
-	struct i40e_hw *hw = &pf->hw;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
 	struct i40e_vf *vf;
-	int ret = 0;
-
-	/* validate the request */
-	if (vf_id >= pf->num_alloc_vfs) {
-		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
-		ret = -EINVAL;
-		goto out;
-	}
+	int ret;
 
-	vf = &(pf->vf[vf_id]);
-	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
-		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
-			vf_id);
-		ret = -EAGAIN;
-		goto out;
-	}
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		return ret;
+	vf = &pf->vf[vf_id];
 
-	if (enable == vf->spoofchk)
-		goto out;
+	*enable = vf->trusted;
 
-	vf->spoofchk = enable;
-	memset(&ctxt, 0, sizeof(ctxt));
-	ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
-	ctxt.pf_num = pf->hw.pf_id;
-	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
-	if (enable)
-		ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
-					I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
-	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
-	if (ret) {
-		dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
-			ret);
-		ret = -EIO;
-	}
-out:
 	return ret;
 }
 
 /**
- * i40e_ndo_set_vf_trust
- * @netdev: network interface device structure of the pf
+ * i40e_set_trust_state
+ * @pdev: PCI device information struct
  * @vf_id: VF identifier
- * @setting: trust setting
+ * @enable: enable or disable trust
  *
- * Enable or disable VF trust setting
+ * Sets the VF trust configure
+ *
+ * Returns 0 on success, negative on failure
  **/
-int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
+static int i40e_set_trust_state(struct pci_dev *pdev, int vf_id, bool enable)
 {
-	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
 	struct i40e_vf *vf;
-	int ret = 0;
+	int ret;
 
-	/* validate the request */
-	if (vf_id >= pf->num_alloc_vfs) {
-		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
-		return -EINVAL;
+	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+		return -EAGAIN;
 	}
 
 	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
 		dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
 
-	vf = &pf->vf[vf_id];
+	/* validate the request */
+	ret = i40e_validate_vf(pf, vf_id);
+	if (ret)
+		goto out;
 
-	if (!vf)
-		return -EINVAL;
-	if (setting == vf->trusted)
+	vf = &pf->vf[vf_id];
+	/* if vf is in base mode, make it untrusted */
+	if (pf->vf_base_mode_only)
+		enable = false;
+	if (enable == vf->trusted)
 		goto out;
 
-	vf->trusted = setting;
-	i40e_vc_notify_vf_reset(vf);
-	i40e_reset_vf(vf, false);
+	vf->trusted = enable;
+	i40e_vc_reset_vf(vf, true);
 	dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
-		 vf_id, setting ? "" : "un");
+		 vf_id, enable ? "" : "un");
+
+#ifdef __TC_MQPRIO_MODE_MAX
+	if (vf->adq_enabled) {
+		if (!vf->trusted) {
+			dev_info(&pf->pdev->dev,
+				 "VF %u no longer Trusted, deleting all cloud filters\n",
+				 vf_id);
+			i40e_del_all_cloud_filters(vf);
+		}
+	}
+#endif /* __TC_MQPRIO_MODE_MAX */
+
 out:
+	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
 	return ret;
 }
+
+const struct vfd_ops i40e_vfd_ops = {
+	.get_trunk		= i40e_get_trunk,
+	.set_trunk		= i40e_set_trunk,
+	.get_vlan_mirror	= i40e_get_mirror,
+	.set_vlan_mirror	= i40e_set_mirror,
+	.set_allow_untagged	= i40e_set_allow_untagged,
+	.get_allow_untagged	= i40e_get_allow_untagged,
+	.get_loopback		= i40e_get_loopback,
+	.set_loopback		= i40e_set_loopback,
+	.get_vlan_strip		= i40e_get_vlan_strip,
+	.set_vlan_strip		= i40e_set_vlan_strip,
+	.get_rx_bytes		= i40e_get_rx_bytes,
+	.get_rx_dropped		= i40e_get_rx_dropped,
+	.get_rx_packets		= i40e_get_rx_packets,
+	.get_tx_bytes		= i40e_get_tx_bytes,
+	.get_tx_dropped		= i40e_get_tx_dropped,
+	.get_tx_packets		= i40e_get_tx_packets,
+	.get_tx_errors		= i40e_get_tx_errors,
+	.get_mac		= i40e_get_mac,
+	.set_mac		= i40e_set_mac,
+	.get_promisc		= i40e_get_promisc,
+	.set_promisc		= i40e_set_promisc,
+	.get_ingress_mirror	= i40e_get_ingress_mirror,
+	.set_ingress_mirror	= i40e_set_ingress_mirror,
+	.get_egress_mirror	= i40e_get_egress_mirror,
+	.set_egress_mirror	= i40e_set_egress_mirror,
+	.get_link_state		= i40e_get_link_state,
+	.set_link_state		= i40e_set_link_state,
+	.get_mac_list		= i40e_get_mac_list,
+	.add_macs_to_list	= i40e_add_macs_to_list,
+	.rem_macs_from_list	= i40e_rem_macs_from_list,
+	.get_vf_enable		= i40e_get_vf_enable,
+	.set_vf_enable		= i40e_set_vf_enable,
+	.reset_stats		= i40e_reset_vf_stats,
+	.set_vf_bw_share	= i40e_store_vf_bw_share,
+	.get_vf_bw_share	= i40e_get_vf_bw_share,
+	.set_pf_qos_apply	= i40e_set_pf_qos_apply,
+	.get_pf_ingress_mirror	= i40e_get_pf_ingress_mirror,
+	.set_pf_ingress_mirror	= i40e_set_pf_ingress_mirror,
+	.get_pf_egress_mirror	= i40e_get_pf_egress_mirror,
+	.set_pf_egress_mirror	= i40e_set_pf_egress_mirror,
+	.get_pf_tpid		= i40e_get_pf_tpid,
+	.set_pf_tpid		= i40e_set_pf_tpid,
+	.get_num_queues		= i40e_get_num_queues,
+	.set_num_queues		= i40e_set_num_queues,
+	.get_max_tx_rate	= i40e_get_max_tx_rate,
+	.set_max_tx_rate	= i40e_set_max_tx_rate,
+	.get_trust_state	= i40e_get_trust_state,
+	.set_trust_state	= i40e_set_trust_state,
+};
+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 1f4b0c504..77fb5cfef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -1,28 +1,5 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
 
 #ifndef _I40E_VIRTCHNL_PF_H_
 #define _I40E_VIRTCHNL_PF_H_
@@ -36,9 +13,11 @@
 #define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED	3
 #define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED	10
 
-#define I40E_VLAN_PRIORITY_SHIFT	12
+#define I40E_VLAN_PRIORITY_SHIFT	13
 #define I40E_VLAN_MASK			0xFFF
-#define I40E_PRIORITY_MASK		0x7000
+#define I40E_PRIORITY_MASK		0xE000
+
+#define I40E_MAX_VF_PROMISC_FLAGS	3
 
 /* Various queue ctrls */
 enum i40e_queue_ctrl {
@@ -55,19 +34,30 @@ enum i40e_queue_ctrl {
 enum i40e_vf_states {
 	I40E_VF_STATE_INIT = 0,
 	I40E_VF_STATE_ACTIVE,
-	I40E_VF_STATE_IWARPENA,
-	I40E_VF_STATE_FCOEENA,
 	I40E_VF_STATE_DISABLED,
 	I40E_VF_STATE_MC_PROMISC,
 	I40E_VF_STATE_UC_PROMISC,
 	I40E_VF_STATE_PRE_ENABLE,
+	I40E_VF_STATE_LOADED_VF_DRIVER,
 };
 
 /* VF capabilities */
 enum i40e_vf_capabilities {
 	I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
 	I40E_VIRTCHNL_VF_CAP_L2,
-	I40E_VIRTCHNL_VF_CAP_IWARP,
+};
+
+/* In ADq, max 4 VSI's can be allocated per VF including primary VF VSI.
+ * These variables are used to store indices, id's and number of queues
+ * for each VSI including that of primary VF VSI. Each Traffic class is
+ * termed as channel and each channel can in-turn have 4 queues which
+ * means max 16 queues overall per VF.
+ */
+struct i40evf_channel {
+	u16 vsi_idx; /* index in PF struct for all channel VSIs */
+	u16 vsi_id; /* VSI ID used by firmware */
+	u16 num_qps; /* number of queue pairs requested by user */
+	u64 max_tx_rate; /* bandwidth rate allocation for VSIs */
 };
 
 /* VF information structure */
@@ -97,6 +87,7 @@ struct i40e_vf {
 	u16 lan_vsi_id;		/* ID as used by firmware */
 
 	u8 num_queue_pairs;	/* num of qps assigned to VF vsis */
+	u8 num_req_queues;	/* num of requested qps */
 	u64 num_mdd_events;	/* num of mdd events detected */
 	/* num of continuous malformed or invalid msgs detected */
 	u64 num_invalid_msgs;
@@ -105,39 +96,87 @@ struct i40e_vf {
 	unsigned long vf_caps;	/* vf's adv. capabilities */
 	unsigned long vf_states;	/* vf's runtime states */
 	unsigned int tx_rate;	/* Tx bandwidth limit in Mbps */
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
 	bool link_forced;
 	bool link_up;		/* only valid if VF link is forced */
-	bool spoofchk;
-	u16 num_mac;
+#endif
+	bool queues_enabled;	/* true if the VF queues are enabled */
+	bool mac_anti_spoof;
 	u16 num_vlan;
-
-	/* RDMA Client */
-	struct virtchnl_iwarp_qvlist_info *qvlist_info;
+	DECLARE_BITMAP(mirror_vlans, VLAN_N_VID);
+	u16 vlan_rule_id;
+#define I40E_NO_VF_MIRROR	-1
+/* assuming vf ids' range is <0..max_supported> */
+#define I40E_IS_MIRROR_VLAN_ID_VALID(id) ((id) >= 0)
+	u16 ingress_rule_id;
+	int ingress_vlan;
+	u16 egress_rule_id;
+	int egress_vlan;
+	DECLARE_BITMAP(trunk_vlans, VLAN_N_VID);
+	bool allow_untagged;
+	bool loopback;
+	bool vlan_stripping;
+	u8 promisc_mode;
+	u8 bw_share;
+	bool bw_share_applied; /* true if config is applied to the device */
+	bool pf_ctrl_disable; /* tracking bool for PF ctrl of VF enable/disable */
+
+	/* ADq related variables */
+	bool adq_enabled; /* flag to enable adq */
+	u8 num_tc;
+	struct i40evf_channel ch[I40E_MAX_VF_VSI];
+	struct hlist_head cloud_filter_list;
+	u16 num_cloud_filters;
 };
 
 void i40e_free_vfs(struct i40e_pf *pf);
+#if defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)
 int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+#endif
 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
 			   u32 v_retval, u8 *msg, u16 msglen);
 int i40e_vc_process_vflr_event(struct i40e_pf *pf);
-void i40e_reset_vf(struct i40e_vf *vf, bool flr);
-void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr);
+bool i40e_reset_vf(struct i40e_vf *vf, bool flr);
+bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr);
 void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
 
 /* VF configuration related iplink handlers */
 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
+#ifdef IFLA_VF_VLAN_INFO_MAX
 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
 			      u16 vlan_id, u8 qos, __be16 vlan_proto);
+#else
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
+			      int vf_id, u16 vlan_id, u8 qos);
+#endif
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
 		       int max_tx_rate);
+#else
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
+#endif
+#ifdef HAVE_NDO_SET_VF_TRUST
 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting);
+#endif
+int i40e_ndo_enable_vf(struct net_device *netdev, int vf_id, bool enable);
+#ifdef IFLA_VF_MAX
 int i40e_ndo_get_vf_config(struct net_device *netdev,
 			   int vf_id, struct ifla_vf_info *ivi);
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+#endif
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
+#endif
+#endif
 
 void i40e_vc_notify_link_state(struct i40e_pf *pf);
 void i40e_vc_notify_reset(struct i40e_pf *pf);
+#ifdef HAVE_VF_STATS
+int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
+		      struct ifla_vf_stats *vf_stats);
+#endif
+extern const struct vfd_ops i40e_vfd_ops;
 
 #endif /* _I40E_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/kcompat.c b/drivers/net/ethernet/intel/i40e/kcompat.c
new file mode 100644
index 000000000..eb2ba6bd1
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/kcompat.c
@@ -0,0 +1,2761 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
+
+#include "i40e.h"
+#include "kcompat.h"
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) || defined __VMKLNX__
+/* From lib/vsprintf.c */
+#include <asm/div64.h>
+
+static int skip_atoi(const char **s)
+{
+	int i=0;
+
+	while (isdigit(**s))
+		i = i*10 + *((*s)++) - '0';
+	return i;
+}
+
+#define _kc_ZEROPAD	1		/* pad with zero */
+#define _kc_SIGN	2		/* unsigned/signed long */
+#define _kc_PLUS	4		/* show plus */
+#define _kc_SPACE	8		/* space if plus */
+#define _kc_LEFT	16		/* left justified */
+#define _kc_SPECIAL	32		/* 0x */
+#define _kc_LARGE	64		/* use 'ABCDEF' instead of 'abcdef' */
+
+static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type)
+{
+	char c,sign,tmp[66];
+	const char *digits;
+	const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+	const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+	int i;
+
+	digits = (type & _kc_LARGE) ? large_digits : small_digits;
+	if (type & _kc_LEFT)
+		type &= ~_kc_ZEROPAD;
+	if (base < 2 || base > 36)
+		return 0;
+	c = (type & _kc_ZEROPAD) ? '0' : ' ';
+	sign = 0;
+	if (type & _kc_SIGN) {
+		if (num < 0) {
+			sign = '-';
+			num = -num;
+			size--;
+		} else if (type & _kc_PLUS) {
+			sign = '+';
+			size--;
+		} else if (type & _kc_SPACE) {
+			sign = ' ';
+			size--;
+		}
+	}
+	if (type & _kc_SPECIAL) {
+		if (base == 16)
+			size -= 2;
+		else if (base == 8)
+			size--;
+	}
+	i = 0;
+	if (num == 0)
+		tmp[i++]='0';
+	else while (num != 0)
+		tmp[i++] = digits[do_div(num,base)];
+	if (i > precision)
+		precision = i;
+	size -= precision;
+	if (!(type&(_kc_ZEROPAD+_kc_LEFT))) {
+		while(size-->0) {
+			if (buf <= end)
+				*buf = ' ';
+			++buf;
+		}
+	}
+	if (sign) {
+		if (buf <= end)
+			*buf = sign;
+		++buf;
+	}
+	if (type & _kc_SPECIAL) {
+		if (base==8) {
+			if (buf <= end)
+				*buf = '0';
+			++buf;
+		} else if (base==16) {
+			if (buf <= end)
+				*buf = '0';
+			++buf;
+			if (buf <= end)
+				*buf = digits[33];
+			++buf;
+		}
+	}
+	if (!(type & _kc_LEFT)) {
+		while (size-- > 0) {
+			if (buf <= end)
+				*buf = c;
+			++buf;
+		}
+	}
+	while (i < precision--) {
+		if (buf <= end)
+			*buf = '0';
+		++buf;
+	}
+	while (i-- > 0) {
+		if (buf <= end)
+			*buf = tmp[i];
+		++buf;
+	}
+	while (size-- > 0) {
+		if (buf <= end)
+			*buf = ' ';
+		++buf;
+	}
+	return buf;
+}
+
+int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+	int len;
+	unsigned long long num;
+	int i, base;
+	char *str, *end, c;
+	const char *s;
+
+	int flags;		/* flags to number() */
+
+	int field_width;	/* width of output field */
+	int precision;		/* min. # of digits for integers; max
+				   number of chars for from string */
+	int qualifier;		/* 'h', 'l', or 'L' for integer fields */
+				/* 'z' support added 23/7/1999 S.H.    */
+				/* 'z' changed to 'Z' --davidm 1/25/99 */
+
+	str = buf;
+	end = buf + size - 1;
+
+	if (end < buf - 1) {
+		end = ((void *) -1);
+		size = end - buf + 1;
+	}
+
+	for (; *fmt ; ++fmt) {
+		if (*fmt != '%') {
+			if (str <= end)
+				*str = *fmt;
+			++str;
+			continue;
+		}
+
+		/* process flags */
+		flags = 0;
+		repeat:
+			++fmt;		/* this also skips first '%' */
+			switch (*fmt) {
+				case '-': flags |= _kc_LEFT; goto repeat;
+				case '+': flags |= _kc_PLUS; goto repeat;
+				case ' ': flags |= _kc_SPACE; goto repeat;
+				case '#': flags |= _kc_SPECIAL; goto repeat;
+				case '0': flags |= _kc_ZEROPAD; goto repeat;
+			}
+
+		/* get field width */
+		field_width = -1;
+		if (isdigit(*fmt))
+			field_width = skip_atoi(&fmt);
+		else if (*fmt == '*') {
+			++fmt;
+			/* it's the next argument */
+			field_width = va_arg(args, int);
+			if (field_width < 0) {
+				field_width = -field_width;
+				flags |= _kc_LEFT;
+			}
+		}
+
+		/* get the precision */
+		precision = -1;
+		if (*fmt == '.') {
+			++fmt;
+			if (isdigit(*fmt))
+				precision = skip_atoi(&fmt);
+			else if (*fmt == '*') {
+				++fmt;
+				/* it's the next argument */
+				precision = va_arg(args, int);
+			}
+			if (precision < 0)
+				precision = 0;
+		}
+
+		/* get the conversion qualifier */
+		qualifier = -1;
+		if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
+			qualifier = *fmt;
+			++fmt;
+		}
+
+		/* default base */
+		base = 10;
+
+		switch (*fmt) {
+			case 'c':
+				if (!(flags & _kc_LEFT)) {
+					while (--field_width > 0) {
+						if (str <= end)
+							*str = ' ';
+						++str;
+					}
+				}
+				c = (unsigned char) va_arg(args, int);
+				if (str <= end)
+					*str = c;
+				++str;
+				while (--field_width > 0) {
+					if (str <= end)
+						*str = ' ';
+					++str;
+				}
+				continue;
+
+			case 's':
+				s = va_arg(args, char *);
+				if (!s)
+					s = "<NULL>";
+
+				len = strnlen(s, precision);
+
+				if (!(flags & _kc_LEFT)) {
+					while (len < field_width--) {
+						if (str <= end)
+							*str = ' ';
+						++str;
+					}
+				}
+				for (i = 0; i < len; ++i) {
+					if (str <= end)
+						*str = *s;
+					++str; ++s;
+				}
+				while (len < field_width--) {
+					if (str <= end)
+						*str = ' ';
+					++str;
+				}
+				continue;
+
+			case 'p':
+				if ('M' == *(fmt+1)) {
+					str = get_mac(str, end, va_arg(args, unsigned char *));
+					fmt++;
+				} else	{
+					if (field_width == -1) {
+						field_width = 2*sizeof(void *);
+						flags |= _kc_ZEROPAD;
+					}
+					str = number(str, end,
+							(unsigned long) va_arg(args, void *),
+							16, field_width, precision, flags);
+				}
+				continue;
+
+			case 'n':
+				/* FIXME:
+				* What does C99 say about the overflow case here? */
+				if (qualifier == 'l') {
+					long * ip = va_arg(args, long *);
+					*ip = (str - buf);
+				} else if (qualifier == 'Z') {
+					size_t * ip = va_arg(args, size_t *);
+					*ip = (str - buf);
+				} else {
+					int * ip = va_arg(args, int *);
+					*ip = (str - buf);
+				}
+				continue;
+
+			case '%':
+				if (str <= end)
+					*str = '%';
+				++str;
+				continue;
+
+				/* integer number formats - set up the flags and "break" */
+			case 'o':
+				base = 8;
+				break;
+
+			case 'X':
+				flags |= _kc_LARGE;
+			case 'x':
+				base = 16;
+				break;
+
+			case 'd':
+			case 'i':
+				flags |= _kc_SIGN;
+			case 'u':
+				break;
+
+			default:
+				if (str <= end)
+					*str = '%';
+				++str;
+				if (*fmt) {
+					if (str <= end)
+						*str = *fmt;
+					++str;
+				} else {
+					--fmt;
+				}
+				continue;
+		}
+		if (qualifier == 'L')
+			num = va_arg(args, long long);
+		else if (qualifier == 'l') {
+			num = va_arg(args, unsigned long);
+			if (flags & _kc_SIGN)
+				num = (signed long) num;
+		} else if (qualifier == 'Z') {
+			num = va_arg(args, size_t);
+		} else if (qualifier == 'h') {
+			num = (unsigned short) va_arg(args, int);
+			if (flags & _kc_SIGN)
+				num = (signed short) num;
+		} else {
+			num = va_arg(args, unsigned int);
+			if (flags & _kc_SIGN)
+				num = (signed int) num;
+		}
+		str = number(str, end, num, base,
+				field_width, precision, flags);
+	}
+	if (str <= end)
+		*str = '\0';
+	else if (size > 0)
+		/* don't write out a null byte if the buf size is zero */
+		*end = '\0';
+	/* the trailing null byte doesn't count towards the total
+	* ++str;
+	*/
+	return str-buf;
+}
+
+int _kc_snprintf(char * buf, size_t size, const char *fmt, ...)
+{
+	va_list args;
+	int i;
+
+	va_start(args, fmt);
+	i = _kc_vsnprintf(buf,size,fmt,args);
+	va_end(args);
+	return i;
+}
+#endif /* < 2.4.8 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#if defined(CONFIG_HIGHMEM)
+
+#ifndef PCI_DRAM_OFFSET
+#define PCI_DRAM_OFFSET 0
+#endif
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+                 size_t size, int direction)
+{
+	return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
+		PCI_DRAM_OFFSET);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+                 size_t size, int direction)
+{
+	return pci_map_single(dev, (void *)page_address(page) + offset, size,
+			      direction);
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+void
+_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
+                   int direction)
+{
+	return pci_unmap_single(dev, dma_addr, size, direction);
+}
+
+#endif /* 2.4.13 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+int
+_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
+{
+	if (!pci_dma_supported(dev, mask))
+		return -EIO;
+	dev->dma_mask = mask;
+	return 0;
+}
+
+int
+_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
+{
+	int i;
+
+	for (i = 0; i < 6; i++) {
+		if (pci_resource_len(dev, i) == 0)
+			continue;
+
+		if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
+			if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
+				pci_release_regions(dev);
+				return -EBUSY;
+			}
+		} else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
+			if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
+				pci_release_regions(dev);
+				return -EBUSY;
+			}
+		}
+	}
+	return 0;
+}
+
+void
+_kc_pci_release_regions(struct pci_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < 6; i++) {
+		if (pci_resource_len(dev, i) == 0)
+			continue;
+
+		if (pci_resource_flags(dev, i) & IORESOURCE_IO)
+			release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
+
+		else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
+			release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
+	}
+}
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+struct net_device *
+_kc_alloc_etherdev(int sizeof_priv)
+{
+	struct net_device *dev;
+	int alloc_size;
+
+	alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
+	dev = kzalloc(alloc_size, GFP_KERNEL);
+	if (!dev)
+		return NULL;
+
+	if (sizeof_priv)
+		dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
+	dev->name[0] = '\0';
+	ether_setup(dev);
+
+	return dev;
+}
+
+int
+_kc_is_valid_ether_addr(u8 *addr)
+{
+	const char zaddr[6] = { 0, };
+
+	return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
+}
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+int
+_kc_pci_set_power_state(struct pci_dev *dev, int state)
+{
+	return 0;
+}
+
+int
+_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
+{
+	return 0;
+}
+
+#endif /* 2.4.6 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
+                            int off, int size)
+{
+	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+	frag->page = page;
+	frag->page_offset = off;
+	frag->size = size;
+	skb_shinfo(skb)->nr_frags = i + 1;
+}
+
+/*
+ * Original Copyright:
+ * find_next_bit.c: fallback find next bit implementation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+                            unsigned long offset)
+{
+	const unsigned long *p = addr + BITOP_WORD(offset);
+	unsigned long result = offset & ~(BITS_PER_LONG-1);
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset %= BITS_PER_LONG;
+	if (offset) {
+		tmp = *(p++);
+		tmp &= (~0UL << offset);
+		if (size < BITS_PER_LONG)
+			goto found_first;
+		if (tmp)
+			goto found_middle;
+		size -= BITS_PER_LONG;
+		result += BITS_PER_LONG;
+	}
+	while (size & ~(BITS_PER_LONG-1)) {
+		if ((tmp = *(p++)))
+			goto found_middle;
+		result += BITS_PER_LONG;
+		size -= BITS_PER_LONG;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
+
+found_first:
+	tmp &= (~0UL >> (BITS_PER_LONG - size));
+	if (tmp == 0UL)		/* Are any bits set? */
+		return result + size;	/* Nope. */
+found_middle:
+	return result + ffs(tmp);
+}
+
+size_t _kc_strlcpy(char *dest, const char *src, size_t size)
+{
+	size_t ret = strlen(src);
+
+	if (size) {
+		size_t len = (ret >= size) ? size - 1 : ret;
+		memcpy(dest, src, len);
+		dest[len] = '\0';
+	}
+	return ret;
+}
+
+#ifndef do_div
+#if BITS_PER_LONG == 32
+uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base)
+{
+	uint64_t rem = *n;
+	uint64_t b = base;
+	uint64_t res, d = 1;
+	uint32_t high = rem >> 32;
+
+	/* Reduce the thing a bit first */
+	res = 0;
+	if (high >= base) {
+		high /= base;
+		res = (uint64_t) high << 32;
+		rem -= (uint64_t) (high*base) << 32;
+	}
+
+	while ((int64_t)b > 0 && b < rem) {
+		b = b+b;
+		d = d+d;
+	}
+
+	do {
+		if (rem >= b) {
+			rem -= b;
+			res += d;
+		}
+		b >>= 1;
+		d >>= 1;
+	} while (d);
+
+	*n = res;
+	return rem;
+}
+#endif /* BITS_PER_LONG == 32 */
+#endif /* do_div */
+#endif /* 2.6.0 => 2.4.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...)
+{
+	va_list args;
+	int i;
+
+	va_start(args, fmt);
+	i = vsnprintf(buf, size, fmt, args);
+	va_end(args);
+	return (i >= size) ? (size - 1) : i;
+}
+#endif /* < 2.6.4 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
+DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1};
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
+char *_kc_kstrdup(const char *s, unsigned int gfp)
+{
+	size_t len;
+	char *buf;
+
+	if (!s)
+		return NULL;
+
+	len = strlen(s) + 1;
+	buf = kmalloc(len, gfp);
+	if (buf)
+		memcpy(buf, s, len);
+	return buf;
+}
+#endif /* < 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+void *_kc_kzalloc(size_t size, int flags)
+{
+	void *ret = kmalloc(size, flags);
+	if (ret)
+		memset(ret, 0, size);
+	return ret;
+}
+#endif /* <= 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+int _kc_skb_pad(struct sk_buff *skb, int pad)
+{
+	int ntail;
+
+        /* If the skbuff is non linear tailroom is always zero.. */
+        if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
+		memset(skb->data+skb->len, 0, pad);
+		return 0;
+        }
+
+	ntail = skb->data_len + pad - (skb->end - skb->tail);
+	if (likely(skb_cloned(skb) || ntail > 0)) {
+		if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC))
+			goto free_skb;
+	}
+
+#ifdef MAX_SKB_FRAGS
+	if (skb_is_nonlinear(skb) &&
+	    !__pskb_pull_tail(skb, skb->data_len))
+		goto free_skb;
+
+#endif
+	memset(skb->data + skb->len, 0, pad);
+        return 0;
+
+free_skb:
+	kfree_skb(skb);
+	return -ENOMEM;
+}
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
+int _kc_pci_save_state(struct pci_dev *pdev)
+{
+	struct adapter_struct *adapter = pci_get_drvdata(pdev);
+	int size = PCI_CONFIG_SPACE_LEN, i;
+	u16 pcie_cap_offset, pcie_link_status;
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
+	/* no ->dev for 2.4 kernels */
+	WARN_ON(pdev->dev.driver_data == NULL);
+#endif
+	pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	if (pcie_cap_offset) {
+		if (!pci_read_config_word(pdev,
+		                          pcie_cap_offset + PCIE_LINK_STATUS,
+		                          &pcie_link_status))
+		size = PCIE_CONFIG_SPACE_LEN;
+	}
+	pci_config_space_ich8lan();
+#ifdef HAVE_PCI_ERS
+	if (adapter->config_space == NULL)
+#else
+	WARN_ON(adapter->config_space != NULL);
+#endif
+		adapter->config_space = kmalloc(size, GFP_KERNEL);
+	if (!adapter->config_space) {
+		printk(KERN_ERR "Out of memory in pci_save_state\n");
+		return -ENOMEM;
+	}
+	for (i = 0; i < (size / 4); i++)
+		pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
+	return 0;
+}
+
+void _kc_pci_restore_state(struct pci_dev *pdev)
+{
+	struct adapter_struct *adapter = pci_get_drvdata(pdev);
+	int size = PCI_CONFIG_SPACE_LEN, i;
+	u16 pcie_cap_offset;
+	u16 pcie_link_status;
+
+	if (adapter->config_space != NULL) {
+		pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+		if (pcie_cap_offset &&
+		    !pci_read_config_word(pdev,
+		                          pcie_cap_offset + PCIE_LINK_STATUS,
+		                          &pcie_link_status))
+			size = PCIE_CONFIG_SPACE_LEN;
+
+		pci_config_space_ich8lan();
+		for (i = 0; i < (size / 4); i++)
+		pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
+#ifndef HAVE_PCI_ERS
+		kfree(adapter->config_space);
+		adapter->config_space = NULL;
+#endif
+	}
+}
+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
+
+#ifdef HAVE_PCI_ERS
+void _kc_free_netdev(struct net_device *netdev)
+{
+	struct adapter_struct *adapter = netdev_priv(netdev);
+
+	kfree(adapter->config_space);
+#ifdef CONFIG_SYSFS
+	if (netdev->reg_state == NETREG_UNINITIALIZED) {
+		kfree((char *)netdev - netdev->padded);
+	} else {
+		BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
+		netdev->reg_state = NETREG_RELEASED;
+		class_device_put(&netdev->class_dev);
+	}
+#else
+	kfree((char *)netdev - netdev->padded);
+#endif
+}
+#endif
+
+void *_kc_kmemdup(const void *src, size_t len, unsigned gfp)
+{
+	void *p;
+
+	p = kzalloc(len, gfp);
+	if (p)
+		memcpy(p, src, len);
+	return p;
+}
+#endif /* <= 2.6.19 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev)
+{
+	return ((struct adapter_struct *)netdev_priv(netdev))->pdev;
+}
+#endif /* < 2.6.21 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+/* hexdump code taken from lib/hexdump.c */
+static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
+			int groupsize, unsigned char *linebuf,
+			size_t linebuflen, bool ascii)
+{
+	const u8 *ptr = buf;
+	u8 ch;
+	int j, lx = 0;
+	int ascii_column;
+
+	if (rowsize != 16 && rowsize != 32)
+		rowsize = 16;
+
+	if (!len)
+		goto nil;
+	if (len > rowsize)		/* limit to one line at a time */
+		len = rowsize;
+	if ((len % groupsize) != 0)	/* no mixed size output */
+		groupsize = 1;
+
+	switch (groupsize) {
+	case 8: {
+		const u64 *ptr8 = buf;
+		int ngroups = len / groupsize;
+
+		for (j = 0; j < ngroups; j++)
+			lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+				"%s%16.16llx", j ? " " : "",
+				(unsigned long long)*(ptr8 + j));
+		ascii_column = 17 * ngroups + 2;
+		break;
+	}
+
+	case 4: {
+		const u32 *ptr4 = buf;
+		int ngroups = len / groupsize;
+
+		for (j = 0; j < ngroups; j++)
+			lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+				"%s%8.8x", j ? " " : "", *(ptr4 + j));
+		ascii_column = 9 * ngroups + 2;
+		break;
+	}
+
+	case 2: {
+		const u16 *ptr2 = buf;
+		int ngroups = len / groupsize;
+
+		for (j = 0; j < ngroups; j++)
+			lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+				"%s%4.4x", j ? " " : "", *(ptr2 + j));
+		ascii_column = 5 * ngroups + 2;
+		break;
+	}
+
+	default:
+		for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
+			ch = ptr[j];
+			linebuf[lx++] = hex_asc(ch >> 4);
+			linebuf[lx++] = hex_asc(ch & 0x0f);
+			linebuf[lx++] = ' ';
+		}
+		if (j)
+			lx--;
+
+		ascii_column = 3 * rowsize + 2;
+		break;
+	}
+	if (!ascii)
+		goto nil;
+
+	while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
+		linebuf[lx++] = ' ';
+	for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
+		linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
+				: '.';
+nil:
+	linebuf[lx++] = '\0';
+}
+
+void _kc_print_hex_dump(const char *level,
+			const char *prefix_str, int prefix_type,
+			int rowsize, int groupsize,
+			const void *buf, size_t len, bool ascii)
+{
+	const u8 *ptr = buf;
+	int i, linelen, remaining = len;
+	unsigned char linebuf[200];
+
+	if (rowsize != 16 && rowsize != 32)
+		rowsize = 16;
+
+	for (i = 0; i < len; i += rowsize) {
+		linelen = min(remaining, rowsize);
+		remaining -= rowsize;
+		_kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
+				linebuf, sizeof(linebuf), ascii);
+
+		switch (prefix_type) {
+		case DUMP_PREFIX_ADDRESS:
+			printk("%s%s%*p: %s\n", level, prefix_str,
+				(int)(2 * sizeof(void *)), ptr + i, linebuf);
+			break;
+		case DUMP_PREFIX_OFFSET:
+			printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
+			break;
+		default:
+			printk("%s%s%s\n", level, prefix_str, linebuf);
+			break;
+		}
+	}
+}
+
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+#ifdef NAPI
+struct net_device *napi_to_poll_dev(const struct napi_struct *napi)
+{
+	struct adapter_q_vector *q_vector = container_of(napi,
+	                                                struct adapter_q_vector,
+	                                                napi);
+	return &q_vector->poll_dev;
+}
+
+int __kc_adapter_clean(struct net_device *netdev, int *budget)
+{
+	int work_done;
+	int work_to_do = min(*budget, netdev->quota);
+	/* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
+	struct napi_struct *napi = netdev->priv;
+	work_done = napi->poll(napi, work_to_do);
+	*budget -= work_done;
+	netdev->quota -= work_done;
+	return (work_done >= work_to_do) ? 1 : 0;
+}
+#endif /* NAPI */
+#endif /* <= 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
+void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
+{
+	struct pci_dev *parent = pdev->bus->self;
+	u16 link_state;
+	int pos;
+
+	if (!parent)
+		return;
+
+	pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+	if (pos) {
+		pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
+		link_state &= ~state;
+		pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
+	}
+}
+#endif /* < 2.6.26 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
+#ifdef HAVE_TX_MQ
+void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
+{
+	struct adapter_struct *adapter = netdev_priv(netdev);
+	int i;
+
+	netif_stop_queue(netdev);
+	if (netif_is_multiqueue(netdev))
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			netif_stop_subqueue(netdev, i);
+}
+void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
+{
+	struct adapter_struct *adapter = netdev_priv(netdev);
+	int i;
+
+	netif_wake_queue(netdev);
+	if (netif_is_multiqueue(netdev))
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			netif_wake_subqueue(netdev, i);
+}
+void _kc_netif_tx_start_all_queues(struct net_device *netdev)
+{
+	struct adapter_struct *adapter = netdev_priv(netdev);
+	int i;
+
+	netif_start_queue(netdev);
+	if (netif_is_multiqueue(netdev))
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			netif_start_subqueue(netdev, i);
+}
+#endif /* HAVE_TX_MQ */
+
+void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...)
+{
+	va_list args;
+
+	printk(KERN_WARNING "------------[ cut here ]------------\n");
+	printk(KERN_WARNING "WARNING: at %s:%d \n", file, line);
+	va_start(args, fmt);
+	vprintk(fmt, args);
+	va_end(args);
+
+	dump_stack();
+}
+#endif /* __VMKLNX__ */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
+
+int
+_kc_pci_prepare_to_sleep(struct pci_dev *dev)
+{
+	pci_power_t target_state;
+	int error;
+
+	target_state = pci_choose_state(dev, PMSG_SUSPEND);
+
+	pci_enable_wake(dev, target_state, true);
+
+	error = pci_set_power_state(dev, target_state);
+
+	if (error)
+		pci_enable_wake(dev, target_state, false);
+
+	return error;
+}
+
+int
+_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
+{
+	int err;
+
+	err = pci_enable_wake(dev, PCI_D3cold, enable);
+	if (err)
+		goto out;
+
+	err = pci_enable_wake(dev, PCI_D3hot, enable);
+
+out:
+	return err;
+}
+#endif /* < 2.6.28 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
+static void __kc_pci_set_master(struct pci_dev *pdev, bool enable)
+{
+	u16 old_cmd, cmd;
+
+	pci_read_config_word(pdev, PCI_COMMAND, &old_cmd);
+	if (enable)
+		cmd = old_cmd | PCI_COMMAND_MASTER;
+	else
+		cmd = old_cmd & ~PCI_COMMAND_MASTER;
+	if (cmd != old_cmd) {
+		dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n",
+			enable ? "enabling" : "disabling");
+		pci_write_config_word(pdev, PCI_COMMAND, cmd);
+	}
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) )
+	pdev->is_busmaster = enable;
+#endif
+}
+
+void _kc_pci_clear_master(struct pci_dev *dev)
+{
+	__kc_pci_set_master(dev, false);
+}
+#endif /* < 2.6.29 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev)
+{
+	int num_vf = 0;
+#ifdef CONFIG_PCI_IOV
+	struct pci_dev *vfdev;
+
+	/* loop through all ethernet devices starting at PF dev */
+	vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL);
+	while (vfdev) {
+		if (vfdev->is_virtfn && vfdev->physfn == dev)
+			num_vf++;
+
+		vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev);
+	}
+
+#endif
+	return num_vf;
+}
+#endif /* RHEL_RELEASE_CODE */
+#endif /* < 2.6.34 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
+#ifdef HAVE_TX_MQ
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
+#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
+{
+	unsigned int real_num = dev->real_num_tx_queues;
+	struct Qdisc *qdisc;
+	int i;
+
+	if (txq < 1 || txq > dev->num_tx_queues)
+		return -EINVAL;
+
+	else if (txq > real_num)
+		dev->real_num_tx_queues = txq;
+	else if (txq < real_num) {
+		dev->real_num_tx_queues = txq;
+		for (i = txq; i < dev->num_tx_queues; i++) {
+			qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+			if (qdisc) {
+				spin_lock_bh(qdisc_lock(qdisc));
+				qdisc_reset(qdisc);
+				spin_unlock_bh(qdisc_lock(qdisc));
+			}
+		}
+	}
+
+	return 0;
+}
+#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
+#endif /* HAVE_TX_MQ */
+
+ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
+				   const void __user *from, size_t count)
+{
+        loff_t pos = *ppos;
+        size_t res;
+
+        if (pos < 0)
+                return -EINVAL;
+        if (pos >= available || !count)
+                return 0;
+        if (count > available - pos)
+                count = available - pos;
+        res = copy_from_user(to + pos, from, count);
+        if (res == count)
+                return -EFAULT;
+        count -= res;
+        *ppos = pos + count;
+        return count;
+}
+
+#endif /* < 2.6.35 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
+static const u32 _kc_flags_dup_features =
+	(ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
+
+u32 _kc_ethtool_op_get_flags(struct net_device *dev)
+{
+	return dev->features & _kc_flags_dup_features;
+}
+
+int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
+{
+	if (data & ~supported)
+		return -EINVAL;
+
+	dev->features = ((dev->features & ~_kc_flags_dup_features) |
+			 (data & _kc_flags_dup_features));
+	return 0;
+}
+#endif /* < 2.6.36 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+#include <net/ip.h>
+#include <linux/pkt_sched.h>
+
+u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb,
+		      u16 num_tx_queues)
+{
+	u32 hash;
+	u16 qoffset = 0;
+	u16 qcount = num_tx_queues;
+
+	if (skb_rx_queue_recorded(skb)) {
+		hash = skb_get_rx_queue(skb);
+		while (unlikely(hash >= num_tx_queues))
+			hash -= num_tx_queues;
+		return hash;
+	}
+
+	if (skb->sk && skb->sk->sk_hash)
+		hash = skb->sk->sk_hash;
+	else
+#ifdef NETIF_F_RXHASH
+		hash = (__force u16) skb->protocol ^ skb->rxhash;
+#else
+		hash = skb->protocol;
+#endif
+
+	hash = jhash_1word(hash, _kc_hashrnd);
+
+	return (u16) (((u64) hash * qcount) >> 32) + qoffset;
+}
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+
+u8 _kc_netdev_get_num_tc(struct net_device *dev)
+{
+	struct i40e_netdev_priv *np = netdev_priv(dev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	if (pf->flags & I40E_FLAG_DCB_ENABLED)
+		return vsi->tc_config.numtc;
+
+	return 0;
+}
+
+int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc)
+{
+	struct i40e_netdev_priv *np = netdev_priv(dev);
+	struct i40e_vsi *vsi = np->vsi;
+
+	if (num_tc > I40E_MAX_TRAFFIC_CLASS)
+		return -EINVAL;
+
+	vsi->tc_config.numtc = num_tc;
+
+	return 0;
+}
+
+u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up)
+{
+	struct i40e_netdev_priv *np = netdev_priv(dev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+	return dcbcfg->etscfg.prioritytable[up];
+}
+
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
+#endif /* < 2.6.39 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
+void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
+			 int off, int size, unsigned int truesize)
+{
+	skb_fill_page_desc(skb, i, page, off, size);
+	skb->len += size;
+	skb->data_len += size;
+	skb->truesize += truesize;
+}
+
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+int _kc_simple_open(struct inode *inode, struct file *file)
+{
+        if (inode->i_private)
+                file->private_data = inode->i_private;
+
+        return 0;
+}
+#endif /* SLE_VERSION < 11,3,0 */
+
+#endif /* < 3.4.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
+static inline int __kc_pcie_cap_version(struct pci_dev *dev)
+{
+	int pos;
+	u16 reg16;
+
+	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	if (!pos)
+		return 0;
+	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
+	return reg16 & PCI_EXP_FLAGS_VERS;
+}
+
+static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev)
+{
+	return true;
+}
+
+static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev)
+{
+	int type = pci_pcie_type(dev);
+
+	return __kc_pcie_cap_version(dev) > 1 ||
+	       type == PCI_EXP_TYPE_ROOT_PORT ||
+	       type == PCI_EXP_TYPE_ENDPOINT ||
+	       type == PCI_EXP_TYPE_LEG_END;
+}
+
+static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev)
+{
+	int type = pci_pcie_type(dev);
+	int pos;
+	u16 pcie_flags_reg;
+
+	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	if (!pos)
+		return false;
+	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg);
+
+	return __kc_pcie_cap_version(dev) > 1 ||
+	       type == PCI_EXP_TYPE_ROOT_PORT ||
+	       (type == PCI_EXP_TYPE_DOWNSTREAM &&
+		pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
+}
+
+static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev)
+{
+	int type = pci_pcie_type(dev);
+
+	return __kc_pcie_cap_version(dev) > 1 ||
+	       type == PCI_EXP_TYPE_ROOT_PORT ||
+	       type == PCI_EXP_TYPE_RC_EC;
+}
+
+static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
+{
+	if (!pci_is_pcie(dev))
+		return false;
+
+	switch (pos) {
+	case PCI_EXP_FLAGS_TYPE:
+		return true;
+	case PCI_EXP_DEVCAP:
+	case PCI_EXP_DEVCTL:
+	case PCI_EXP_DEVSTA:
+		return __kc_pcie_cap_has_devctl(dev);
+	case PCI_EXP_LNKCAP:
+	case PCI_EXP_LNKCTL:
+	case PCI_EXP_LNKSTA:
+		return __kc_pcie_cap_has_lnkctl(dev);
+	case PCI_EXP_SLTCAP:
+	case PCI_EXP_SLTCTL:
+	case PCI_EXP_SLTSTA:
+		return __kc_pcie_cap_has_sltctl(dev);
+	case PCI_EXP_RTCTL:
+	case PCI_EXP_RTCAP:
+	case PCI_EXP_RTSTA:
+		return __kc_pcie_cap_has_rtctl(dev);
+	case PCI_EXP_DEVCAP2:
+	case PCI_EXP_DEVCTL2:
+	case PCI_EXP_LNKCAP2:
+	case PCI_EXP_LNKCTL2:
+	case PCI_EXP_LNKSTA2:
+		return __kc_pcie_cap_version(dev) > 1;
+	default:
+		return false;
+	}
+}
+
+/*
+ * Note that these accessor functions are only for the "PCI Express
+ * Capability" (see PCIe spec r3.0, sec 7.8).  They do not apply to the
+ * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
+ */
+int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
+{
+	int ret;
+
+	*val = 0;
+	if (pos & 1)
+		return -EINVAL;
+
+	if (__kc_pcie_capability_reg_implemented(dev, pos)) {
+		ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
+		/*
+		 * Reset *val to 0 if pci_read_config_word() fails, it may
+		 * have been written as 0xFFFF if hardware error happens
+		 * during pci_read_config_word().
+		 */
+		if (ret)
+			*val = 0;
+		return ret;
+	}
+
+	/*
+	 * For Functions that do not implement the Slot Capabilities,
+	 * Slot Status, and Slot Control registers, these spaces must
+	 * be hardwired to 0b, with the exception of the Presence Detect
+	 * State bit in the Slot Status register of Downstream Ports,
+	 * which must be hardwired to 1b.  (PCIe Base Spec 3.0, sec 7.8)
+	 */
+	if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
+	    pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+		*val = PCI_EXP_SLTSTA_PDS;
+	}
+
+	return 0;
+}
+
+int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
+{
+	int ret;
+
+	*val = 0;
+	if (pos & 3)
+		return -EINVAL;
+
+	if (__kc_pcie_capability_reg_implemented(dev, pos)) {
+		ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
+		/*
+		 * Reset *val to 0 if pci_read_config_dword() fails, it may
+		 * have been written as 0xFFFFFFFF if hardware error happens
+		 * during pci_read_config_dword().
+		 */
+		if (ret)
+			*val = 0;
+		return ret;
+	}
+
+	if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
+	    pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+		*val = PCI_EXP_SLTSTA_PDS;
+	}
+
+	return 0;
+}
+
+int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
+{
+	if (pos & 1)
+		return -EINVAL;
+
+	if (!__kc_pcie_capability_reg_implemented(dev, pos))
+		return 0;
+
+	return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
+}
+
+int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+					    u16 clear, u16 set)
+{
+	int ret;
+	u16 val;
+
+	ret = __kc_pcie_capability_read_word(dev, pos, &val);
+	if (!ret) {
+		val &= ~clear;
+		val |= set;
+		ret = __kc_pcie_capability_write_word(dev, pos, val);
+	}
+
+	return ret;
+}
+
+int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos,
+					     u16 clear)
+{
+	return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0);
+}
+#endif /* < 3.7.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
+#ifdef CONFIG_XPS
+#if NR_CPUS < 64
+#define _KC_MAX_XPS_CPUS	NR_CPUS
+#else
+#define _KC_MAX_XPS_CPUS	64
+#endif
+
+/*
+ * netdev_queue sysfs structures and functions.
+ */
+struct _kc_netdev_queue_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct netdev_queue *queue,
+	    struct _kc_netdev_queue_attribute *attr, char *buf);
+	ssize_t (*store)(struct netdev_queue *queue,
+	    struct _kc_netdev_queue_attribute *attr, const char *buf, size_t len);
+};
+
+#define to_kc_netdev_queue_attr(_attr) container_of(_attr,		\
+    struct _kc_netdev_queue_attribute, attr)
+
+int __kc_netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
+			     u16 index)
+{
+	struct netdev_queue *txq = netdev_get_tx_queue(dev, index);
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
+	/* Redhat requires some odd extended netdev structures */
+	struct netdev_tx_queue_extended *txq_ext =
+					netdev_extended(dev)->_tx_ext + index;
+	struct kobj_type *ktype = txq_ext->kobj.ktype;
+#else
+	struct kobj_type *ktype = txq->kobj.ktype;
+#endif
+	struct _kc_netdev_queue_attribute *xps_attr;
+	struct attribute *attr = NULL;
+	int i, len, err;
+#define _KC_XPS_BUFLEN	(DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9)
+	char buf[_KC_XPS_BUFLEN];
+
+	if (!ktype)
+		return -ENOMEM;
+
+	/* attempt to locate the XPS attribute in the Tx queue */
+	for (i = 0; (attr = ktype->default_attrs[i]); i++) {
+		if (!strcmp("xps_cpus", attr->name))
+			break;
+	}
+
+	/* if we did not find it return an error */
+	if (!attr)
+		return -EINVAL;
+
+	/* copy the mask into a string */
+	len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN,
+			       cpumask_bits(mask), _KC_MAX_XPS_CPUS);
+	if (!len)
+		return -ENOMEM;
+
+	xps_attr = to_kc_netdev_queue_attr(attr);
+
+	/* Store the XPS value using the SYSFS store call */
+	err = xps_attr->store(txq, xps_attr, buf, len);
+
+	/* we only had an error on err < 0 */
+	return (err < 0) ? err : 0;
+}
+#endif /* CONFIG_XPS */
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+	struct xps_dev_maps *dev_maps;
+	struct xps_map *map;
+	int queue_index = -1;
+
+	rcu_read_lock();
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
+	/* Redhat requires some odd extended netdev structures */
+	dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps);
+#else
+	dev_maps = rcu_dereference(dev->xps_maps);
+#endif
+	if (dev_maps) {
+		map = rcu_dereference(
+		    dev_maps->cpu_map[raw_smp_processor_id()]);
+		if (map) {
+			if (map->len == 1)
+				queue_index = map->queues[0];
+			else {
+				u32 hash;
+				if (skb->sk && skb->sk->sk_hash)
+					hash = skb->sk->sk_hash;
+				else
+					hash = (__force u16) skb->protocol ^
+					    skb->rxhash;
+				hash = jhash_1word(hash, _kc_hashrnd);
+				queue_index = map->queues[
+				    ((u64)hash * map->len) >> 32];
+			}
+			if (unlikely(queue_index >= dev->real_num_tx_queues))
+				queue_index = -1;
+		}
+	}
+	rcu_read_unlock();
+
+	return queue_index;
+#else
+	return -1;
+#endif
+}
+
+u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
+{
+	struct sock *sk = skb->sk;
+	int queue_index = sk_tx_queue_get(sk);
+	int new_index;
+
+	if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) {
+#ifdef CONFIG_XPS
+		if (!skb->ooo_okay)
+#endif
+			return queue_index;
+	}
+
+	new_index = kc_get_xps_queue(dev, skb);
+	if (new_index < 0)
+		new_index = skb_tx_hash(dev, skb);
+
+	if (queue_index != new_index && sk) {
+		struct dst_entry *dst =
+			    rcu_dereference(sk->sk_dst_cache);
+
+		if (dst && skb_dst(skb) == dst)
+			sk_tx_queue_set(sk, new_index);
+
+	}
+
+	return new_index;
+}
+
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+#endif /* 3.9.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+#ifdef HAVE_FDB_OPS
+#ifdef USE_CONST_DEV_UC_CHAR
+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+			  struct net_device *dev, const unsigned char *addr,
+			  u16 flags)
+#else
+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+			  unsigned char *addr, u16 flags)
+#endif
+{
+	int err = -EINVAL;
+
+	/* If aging addresses are supported device will need to
+	 * implement its own handler for this.
+	 */
+	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+		pr_info("%s: FDB only supports static addresses\n", dev->name);
+		return err;
+	}
+
+	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+		err = dev_uc_add_excl(dev, addr);
+	else if (is_multicast_ether_addr(addr))
+		err = dev_mc_add_excl(dev, addr);
+
+	/* Only return duplicate errors if NLM_F_EXCL is set */
+	if (err == -EEXIST && !(flags & NLM_F_EXCL))
+		err = 0;
+
+	return err;
+}
+
+#ifdef USE_CONST_DEV_UC_CHAR
+#ifdef HAVE_FDB_DEL_NLATTR
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+			  struct net_device *dev, const unsigned char *addr)
+#else
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+			  const unsigned char *addr)
+#endif
+#else
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+			  unsigned char *addr)
+#endif
+{
+	int err = -EINVAL;
+
+	/* If aging addresses are supported device will need to
+	 * implement its own handler for this.
+	 */
+	if (!(ndm->ndm_state & NUD_PERMANENT)) {
+		pr_info("%s: FDB only supports static addresses\n", dev->name);
+		return err;
+	}
+
+	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+		err = dev_uc_del(dev, addr);
+	else if (is_multicast_ether_addr(addr))
+		err = dev_mc_del(dev, addr);
+
+	return err;
+}
+
+#endif /* HAVE_FDB_OPS */
+#ifdef CONFIG_PCI_IOV
+int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev)
+{
+	unsigned int vfs_assigned = 0;
+#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
+	int pos;
+	struct pci_dev *vfdev;
+	unsigned short dev_id;
+
+	/* only search if we are a PF */
+	if (!dev->is_physfn)
+		return 0;
+
+	/* find SR-IOV capability */
+	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+	if (!pos)
+		return 0;
+
+	/*
+	 * determine the device ID for the VFs, the vendor ID will be the
+	 * same as the PF so there is no need to check for that one
+	 */
+	pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id);
+
+	/* loop through all the VFs to see if we own any that are assigned */
+	vfdev = pci_get_device(dev->vendor, dev_id, NULL);
+	while (vfdev) {
+		/*
+		 * It is considered assigned if it is a virtual function with
+		 * our dev as the physical function and the assigned bit is set
+		 */
+		if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
+		    (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
+			vfs_assigned++;
+
+		vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
+	}
+
+#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */
+	return vfs_assigned;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* 3.10.0 */
+
+static const unsigned char __maybe_unused pcie_link_speed[] = {
+	PCI_SPEED_UNKNOWN,      /* 0 */
+	PCIE_SPEED_2_5GT,       /* 1 */
+	PCIE_SPEED_5_0GT,       /* 2 */
+	PCIE_SPEED_8_0GT,       /* 3 */
+	PCIE_SPEED_16_0GT,      /* 4 */
+	PCI_SPEED_UNKNOWN,      /* 5 */
+	PCI_SPEED_UNKNOWN,      /* 6 */
+	PCI_SPEED_UNKNOWN,      /* 7 */
+	PCI_SPEED_UNKNOWN,      /* 8 */
+	PCI_SPEED_UNKNOWN,      /* 9 */
+	PCI_SPEED_UNKNOWN,      /* A */
+	PCI_SPEED_UNKNOWN,      /* B */
+	PCI_SPEED_UNKNOWN,      /* C */
+	PCI_SPEED_UNKNOWN,      /* D */
+	PCI_SPEED_UNKNOWN,      /* E */
+	PCI_SPEED_UNKNOWN       /* F */
+};
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) )
+int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
+			       enum pcie_link_width *width)
+{
+	int ret;
+
+	*speed = PCI_SPEED_UNKNOWN;
+	*width = PCIE_LNK_WIDTH_UNKNOWN;
+
+	while (dev) {
+		u16 lnksta;
+		enum pci_bus_speed next_speed;
+		enum pcie_link_width next_width;
+
+		ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+		if (ret)
+			return ret;
+
+		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+			PCI_EXP_LNKSTA_NLW_SHIFT;
+
+		if (next_speed < *speed)
+			*speed = next_speed;
+
+		if (next_width < *width)
+			*width = next_width;
+
+		dev = dev->bus->self;
+	}
+
+	return 0;
+}
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7))
+int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev)
+{
+	int i;
+	u16 status;
+
+	/* Wait for Transaction Pending bit clean */
+	for (i = 0; i < 4; i++) {
+		if (i)
+			msleep((1 << (i - 1)) * 100);
+
+		pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
+		if (!(status & PCI_EXP_DEVSTA_TRPND))
+			return 1;
+	}
+
+	return 0;
+}
+#endif /* <RHEL6.7 */
+
+#endif /* <3.12 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) )
+int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+	int err = dma_set_mask(dev, mask);
+
+	if (!err)
+		/* coherent mask for the same size will always succeed if
+		 * dma_set_mask does. However we store the error anyways, due
+		 * to some kernels which use gcc's warn_unused_result on their
+		 * definition of dma_set_coherent_mask.
+		 */
+		err = dma_set_coherent_mask(dev, mask);
+	return err;
+}
+#endif /* 3.13.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) )
+/******************************************************************************
+ * ripped from linux/net/ipv6/exthdrs_core.c, GPL2, no direct copyright,
+ * inferred copyright from kernel
+ */
+int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
+		       int target, unsigned short *fragoff, int *flags)
+{
+	unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
+	u8 nexthdr = ipv6_hdr(skb)->nexthdr;
+	unsigned int len;
+	bool found;
+
+#define __KC_IP6_FH_F_FRAG	BIT(0)
+#define __KC_IP6_FH_F_AUTH	BIT(1)
+#define __KC_IP6_FH_F_SKIP_RH	BIT(2)
+
+	if (fragoff)
+		*fragoff = 0;
+
+	if (*offset) {
+		struct ipv6hdr _ip6, *ip6;
+
+		ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6);
+		if (!ip6 || (ip6->version != 6)) {
+			printk(KERN_ERR "IPv6 header not found\n");
+			return -EBADMSG;
+		}
+		start = *offset + sizeof(struct ipv6hdr);
+		nexthdr = ip6->nexthdr;
+	}
+	len = skb->len - start;
+
+	do {
+		struct ipv6_opt_hdr _hdr, *hp;
+		unsigned int hdrlen;
+		found = (nexthdr == target);
+
+		if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
+			if (target < 0 || found)
+				break;
+			return -ENOENT;
+		}
+
+		hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
+		if (!hp)
+			return -EBADMSG;
+
+		if (nexthdr == NEXTHDR_ROUTING) {
+			struct ipv6_rt_hdr _rh, *rh;
+
+			rh = skb_header_pointer(skb, start, sizeof(_rh),
+						&_rh);
+			if (!rh)
+				return -EBADMSG;
+
+			if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) &&
+			    rh->segments_left == 0)
+				found = false;
+		}
+
+		if (nexthdr == NEXTHDR_FRAGMENT) {
+			unsigned short _frag_off;
+			__be16 *fp;
+
+			if (flags)	/* Indicate that this is a fragment */
+				*flags |= __KC_IP6_FH_F_FRAG;
+			fp = skb_header_pointer(skb,
+						start+offsetof(struct frag_hdr,
+							       frag_off),
+						sizeof(_frag_off),
+						&_frag_off);
+			if (!fp)
+				return -EBADMSG;
+
+			_frag_off = ntohs(*fp) & ~0x7;
+			if (_frag_off) {
+				if (target < 0 &&
+				    ((!ipv6_ext_hdr(hp->nexthdr)) ||
+				     hp->nexthdr == NEXTHDR_NONE)) {
+					if (fragoff)
+						*fragoff = _frag_off;
+					return hp->nexthdr;
+				}
+				return -ENOENT;
+			}
+			hdrlen = 8;
+		} else if (nexthdr == NEXTHDR_AUTH) {
+			if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0))
+				break;
+			hdrlen = (hp->hdrlen + 2) << 2;
+		} else
+			hdrlen = ipv6_optlen(hp);
+
+		if (!found) {
+			nexthdr = hp->nexthdr;
+			len -= hdrlen;
+			start += hdrlen;
+		}
+	} while (!found);
+
+	*offset = start;
+	return nexthdr;
+}
+
+int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
+			       int minvec, int maxvec)
+{
+        int nvec = maxvec;
+        int rc;
+
+        if (maxvec < minvec)
+                return -ERANGE;
+
+        do {
+                rc = pci_enable_msix(dev, entries, nvec);
+                if (rc < 0) {
+                        return rc;
+                } else if (rc > 0) {
+                        if (rc < minvec)
+                                return -ENOSPC;
+                        nvec = rc;
+                }
+        } while (rc);
+
+        return nvec;
+}
+#endif /* 3.14.0 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0))
+char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
+{
+	size_t size;
+	char *buf;
+
+	if (!s)
+		return NULL;
+
+	size = strlen(s) + 1;
+	buf = devm_kzalloc(dev, size, gfp);
+	if (buf)
+		memcpy(buf, s, size);
+	return buf;
+}
+
+void __kc_netdev_rss_key_fill(void *buffer, size_t len)
+{
+	/* Set of random keys generated using kernel random number generator */
+	static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62,
+				0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F,
+				0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95,
+				0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC,
+				0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41,
+				0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A,
+				0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20};
+
+	BUG_ON(len > NETDEV_RSS_KEY_LEN);
+	memcpy(buffer, seed, len);
+}
+#endif /* 3.15.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) )
+#ifdef HAVE_SET_RX_MODE
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list,
+		struct net_device *dev,
+		int (*sync)(struct net_device *, const unsigned char *),
+		int (*unsync)(struct net_device *, const unsigned char *))
+{
+	struct netdev_hw_addr *ha, *tmp;
+	int err;
+
+	/* first go through and flush out any stale entries */
+	list_for_each_entry_safe(ha, tmp, &list->list, list) {
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+		if (!ha->synced || ha->refcount != 1)
+#else
+		if (!ha->sync_cnt || ha->refcount != 1)
+#endif
+			continue;
+
+		if (unsync && unsync(dev, ha->addr))
+			continue;
+
+		list_del_rcu(&ha->list);
+		kfree_rcu(ha, rcu_head);
+		list->count--;
+	}
+
+	/* go through and sync new entries to the list */
+	list_for_each_entry_safe(ha, tmp, &list->list, list) {
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+		if (ha->synced)
+#else
+		if (ha->sync_cnt)
+#endif
+			continue;
+
+		err = sync(dev, ha->addr);
+		if (err)
+			return err;
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+		ha->synced = true;
+#else
+		ha->sync_cnt++;
+#endif
+		ha->refcount++;
+	}
+
+	return 0;
+}
+
+void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
+		struct net_device *dev,
+		int (*unsync)(struct net_device *, const unsigned char *))
+{
+	struct netdev_hw_addr *ha, *tmp;
+
+	list_for_each_entry_safe(ha, tmp, &list->list, list) {
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+		if (!ha->synced)
+#else
+		if (!ha->sync_cnt)
+#endif
+			continue;
+
+		if (unsync && unsync(dev, ha->addr))
+			continue;
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+		ha->synced = false;
+#else
+		ha->sync_cnt--;
+#endif
+		if (--ha->refcount)
+			continue;
+
+		list_del_rcu(&ha->list);
+		kfree_rcu(ha, rcu_head);
+		list->count--;
+	}
+}
+
+#endif /* NETDEV_HW_ADDR_T_UNICAST  */
+#ifndef NETDEV_HW_ADDR_T_MULTICAST
+int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count,
+		struct net_device *dev,
+		int (*sync)(struct net_device *, const unsigned char *),
+		int (*unsync)(struct net_device *, const unsigned char *))
+{
+	struct dev_addr_list *da, **next = list;
+	int err;
+
+	/* first go through and flush out any stale entries */
+	while ((da = *next) != NULL) {
+		if (da->da_synced && da->da_users == 1) {
+			if (!unsync || !unsync(dev, da->da_addr)) {
+				*next = da->next;
+				kfree(da);
+				(*count)--;
+				continue;
+			}
+		}
+		next = &da->next;
+	}
+
+	/* go through and sync new entries to the list */
+	for (da = *list; da != NULL; da = da->next) {
+		if (da->da_synced)
+			continue;
+
+		err = sync(dev, da->da_addr);
+		if (err)
+			return err;
+
+		da->da_synced++;
+		da->da_users++;
+	}
+
+	return 0;
+}
+
+void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count,
+		struct net_device *dev,
+		int (*unsync)(struct net_device *, const unsigned char *))
+{
+	struct dev_addr_list *da;
+
+	while ((da = *list) != NULL) {
+		if (da->da_synced) {
+			if (!unsync || !unsync(dev, da->da_addr)) {
+				da->da_synced--;
+				if (--da->da_users == 0) {
+					*list = da->next;
+					kfree(da);
+					(*count)--;
+					continue;
+				}
+			}
+		}
+		list = &da->next;
+	}
+}
+#endif /* NETDEV_HW_ADDR_T_MULTICAST  */
+#endif /* HAVE_SET_RX_MODE */
+void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len,
+			gfp_t gfp)
+{
+	void *p;
+
+	p = devm_kzalloc(dev, len, gfp);
+	if (p)
+		memcpy(p, src, len);
+
+	return p;
+}
+#endif /* 3.16.0 */
+
+/******************************************************************************/
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)) && \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)))
+#endif /* <3.17.0 && RHEL_RELEASE_CODE < RHEL7.5 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) )
+#ifndef NO_PTP_SUPPORT
+static void __kc_sock_efree(struct sk_buff *skb)
+{
+	sock_put(skb->sk);
+}
+
+struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb)
+{
+	struct sock *sk = skb->sk;
+	struct sk_buff *clone;
+
+	if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt))
+		return NULL;
+
+	clone = skb_clone(skb, GFP_ATOMIC);
+	if (!clone) {
+		sock_put(sk);
+		return NULL;
+	}
+
+	clone->sk = sk;
+	clone->destructor = __kc_sock_efree;
+
+	return clone;
+}
+
+void __kc_skb_complete_tx_timestamp(struct sk_buff *skb,
+				    struct skb_shared_hwtstamps *hwtstamps)
+{
+	struct sock_exterr_skb *serr;
+	struct sock *sk = skb->sk;
+	int err;
+
+	sock_hold(sk);
+
+	*skb_hwtstamps(skb) = *hwtstamps;
+
+	serr = SKB_EXT_ERR(skb);
+	memset(serr, 0, sizeof(*serr));
+	serr->ee.ee_errno = ENOMSG;
+	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
+
+	err = sock_queue_err_skb(sk, skb);
+	if (err)
+		kfree_skb(skb);
+
+	sock_put(sk);
+}
+#endif
+
+/* include headers needed for get_headlen function */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#include <scsi/fc/fc_fcoe.h>
+#endif
+#ifdef HAVE_SCTP
+#include <linux/sctp.h>
+#endif
+
+u32 __kc_eth_get_headlen(const struct net_device __always_unused *dev,
+			 unsigned char *data, unsigned int max_len)
+{
+	union {
+		unsigned char *network;
+		/* l2 headers */
+		struct ethhdr *eth;
+		struct vlan_hdr *vlan;
+		/* l3 headers */
+		struct iphdr *ipv4;
+		struct ipv6hdr *ipv6;
+	} hdr;
+	__be16 proto;
+	u8 nexthdr = 0;	/* default to not TCP */
+	u8 hlen;
+
+	/* this should never happen, but better safe than sorry */
+	if (max_len < ETH_HLEN)
+		return max_len;
+
+	/* initialize network frame pointer */
+	hdr.network = data;
+
+	/* set first protocol and move network header forward */
+	proto = hdr.eth->h_proto;
+	hdr.network += ETH_HLEN;
+
+again:
+	switch (proto) {
+	/* handle any vlan tag if present */
+	case __constant_htons(ETH_P_8021AD):
+	case __constant_htons(ETH_P_8021Q):
+		if ((hdr.network - data) > (max_len - VLAN_HLEN))
+			return max_len;
+
+		proto = hdr.vlan->h_vlan_encapsulated_proto;
+		hdr.network += VLAN_HLEN;
+		goto again;
+	/* handle L3 protocols */
+	case __constant_htons(ETH_P_IP):
+		if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
+			return max_len;
+
+		/* access ihl as a u8 to avoid unaligned access on ia64 */
+		hlen = (hdr.network[0] & 0x0F) << 2;
+
+		/* verify hlen meets minimum size requirements */
+		if (hlen < sizeof(struct iphdr))
+			return hdr.network - data;
+
+		/* record next protocol if header is present */
+		if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
+			nexthdr = hdr.ipv4->protocol;
+
+		hdr.network += hlen;
+		break;
+#ifdef NETIF_F_TSO6
+	case __constant_htons(ETH_P_IPV6):
+		if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+			return max_len;
+
+		/* record next protocol */
+		nexthdr = hdr.ipv6->nexthdr;
+		hdr.network += sizeof(struct ipv6hdr);
+		break;
+#endif /* NETIF_F_TSO6 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+	case __constant_htons(ETH_P_FCOE):
+		hdr.network += FCOE_HEADER_LEN;
+		break;
+#endif
+	default:
+		return hdr.network - data;
+	}
+
+	/* finally sort out L4 */
+	switch (nexthdr) {
+	case IPPROTO_TCP:
+		if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
+			return max_len;
+
+		/* access doff as a u8 to avoid unaligned access on ia64 */
+		hdr.network += max_t(u8, sizeof(struct tcphdr),
+				     (hdr.network[12] & 0xF0) >> 2);
+
+		break;
+	case IPPROTO_UDP:
+	case IPPROTO_UDPLITE:
+		hdr.network += sizeof(struct udphdr);
+		break;
+#ifdef HAVE_SCTP
+	case IPPROTO_SCTP:
+		hdr.network += sizeof(struct sctphdr);
+		break;
+#endif
+	}
+
+	/*
+	 * If everything has gone correctly hdr.network should be the
+	 * data section of the packet and will be the end of the header.
+	 * If not then it probably represents the end of the last recognized
+	 * header.
+	 */
+	return min_t(unsigned int, hdr.network - data, max_len);
+}
+
+#endif /* < 3.18.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) )
+#ifdef HAVE_NET_GET_RANDOM_ONCE
+static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN];
+
+void __kc_netdev_rss_key_fill(void *buffer, size_t len)
+{
+	BUG_ON(len > sizeof(__kc_netdev_rss_key));
+	net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key));
+	memcpy(buffer, __kc_netdev_rss_key, len);
+}
+#endif
+
+int _kc_bitmap_print_to_pagebuf(bool list, char *buf,
+				const unsigned long *maskp,
+				int nmaskbits)
+{
+	ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2;
+	int n = 0;
+
+	if (len > 1) {
+		n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) :
+			   bitmap_scnprintf(buf, len, maskp, nmaskbits);
+		buf[n++] = '\n';
+		buf[n] = '\0';
+	}
+	return n;
+}
+#endif
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) )
+#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \
+      (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \
+      (SLE_VERSION_CODE > SLE_VERSION(12,1,0)))
+unsigned int _kc_cpumask_local_spread(unsigned int i, int node)
+{
+	int cpu;
+
+	/* Wrap: we always want a cpu. */
+	i %= num_online_cpus();
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
+	/* Kernels prior to 2.6.28 do not have for_each_cpu or
+	 * cpumask_of_node, so just use for_each_online_cpu()
+	 */
+	for_each_online_cpu(cpu)
+		if (i-- == 0)
+			return cpu;
+
+	return 0;
+#else
+	if (node == -1) {
+		for_each_cpu(cpu, cpu_online_mask)
+			if (i-- == 0)
+				return cpu;
+	} else {
+		/* NUMA first. */
+		for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
+			if (i-- == 0)
+				return cpu;
+
+		for_each_cpu(cpu, cpu_online_mask) {
+			/* Skip NUMA nodes, done above. */
+			if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
+				continue;
+
+			if (i-- == 0)
+				return cpu;
+		}
+	}
+#endif /* KERNEL_VERSION >= 2.6.28 */
+	BUG();
+}
+#endif
+#endif
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) )
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)))
+#ifdef CONFIG_SPARC
+#include <asm/idprom.h>
+#include <asm/prom.h>
+#endif
+int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused,
+				     u8 *mac_addr __maybe_unused)
+{
+#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)) && defined(CONFIG_OF) && \
+      !defined(HAVE_STRUCT_DEVICE_OF_NODE) || !defined(CONFIG_OF)) && \
+     !defined(CONFIG_SPARC))
+	return -ENODEV;
+#else
+	const unsigned char *addr;
+	struct device_node *dp;
+
+	if (dev_is_pci(dev))
+		dp = pci_device_to_OF_node(to_pci_dev(dev));
+	else
+#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF)
+		dp = dev->of_node;
+#else
+		dp = NULL;
+#endif
+
+	addr = NULL;
+	if (dp)
+		addr = of_get_mac_address(dp);
+#ifdef CONFIG_SPARC
+	/* Kernel hasn't implemented arch_get_platform_mac_address, but we
+	 * should handle the SPARC case here since it was supported
+	 * originally. This is replaced by arch_get_platform_mac_address()
+	 * upstream.
+	 */
+	if (!addr)
+		addr = idprom->id_ethaddr;
+#endif
+	if (!addr)
+		return -ENODEV;
+
+	ether_addr_copy(mac_addr, addr);
+	return 0;
+#endif
+}
+#endif /* !(RHEL_RELEASE >= 7.3) */
+#endif /* < 4.5.0 */
+
+/*****************************************************************************/
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \
+     (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \
+     (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5))))
+const char *_kc_phy_speed_to_str(int speed)
+{
+	switch (speed) {
+	case SPEED_10:
+		return "10Mbps";
+	case SPEED_100:
+		return "100Mbps";
+	case SPEED_1000:
+		return "1Gbps";
+	case SPEED_2500:
+		return "2.5Gbps";
+	case SPEED_5000:
+		return "5Gbps";
+	case SPEED_10000:
+		return "10Gbps";
+	case SPEED_14000:
+		return "14Gbps";
+	case SPEED_20000:
+		return "20Gbps";
+	case SPEED_25000:
+		return "25Gbps";
+	case SPEED_40000:
+		return "40Gbps";
+	case SPEED_50000:
+		return "50Gbps";
+	case SPEED_56000:
+		return "56Gbps";
+#ifdef SPEED_100000
+	case SPEED_100000:
+		return "100Gbps";
+#endif
+	case SPEED_UNKNOWN:
+		return "Unknown";
+	default:
+		return "Unsupported (update phy-core.c)";
+	}
+}
+#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) )
+void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst,
+				      struct ethtool_link_ksettings *src)
+{
+	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
+	unsigned int idx = 0;
+
+	for (; idx < size; idx++) {
+		dst->link_modes.supported[idx] &=
+			src->link_modes.supported[idx];
+		dst->link_modes.advertising[idx] &=
+			src->link_modes.advertising[idx];
+	}
+}
+#endif /* 4.15.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0))
+/* PCIe link information */
+#define PCIE_SPEED2STR(speed) \
+	((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \
+	 (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \
+	 (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \
+	 (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \
+	 "Unknown speed")
+
+/* PCIe speed to Mb/s reduced by encoding overhead */
+#define PCIE_SPEED2MBS_ENC(speed) \
+	((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \
+	 (speed) == PCIE_SPEED_8_0GT  ?  8000*128/130 : \
+	 (speed) == PCIE_SPEED_5_0GT  ?  5000*8/10 : \
+	 (speed) == PCIE_SPEED_2_5GT  ?  2500*8/10 : \
+	 0)
+
+static u32
+_kc_pcie_bandwidth_available(struct pci_dev *dev,
+			     struct pci_dev **limiting_dev,
+			     enum pci_bus_speed *speed,
+			     enum pcie_link_width *width)
+{
+	u16 lnksta;
+	enum pci_bus_speed next_speed;
+	enum pcie_link_width next_width;
+	u32 bw, next_bw;
+
+	if (speed)
+		*speed = PCI_SPEED_UNKNOWN;
+	if (width)
+		*width = PCIE_LNK_WIDTH_UNKNOWN;
+
+	bw = 0;
+
+	while (dev) {
+		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+
+		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+			PCI_EXP_LNKSTA_NLW_SHIFT;
+
+		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
+
+		/* Check if current device limits the total bandwidth */
+		if (!bw || next_bw <= bw) {
+			bw = next_bw;
+
+			if (limiting_dev)
+				*limiting_dev = dev;
+			if (speed)
+				*speed = next_speed;
+			if (width)
+				*width = next_width;
+		}
+
+		dev = pci_upstream_bridge(dev);
+	}
+
+	return bw;
+}
+
+static enum pci_bus_speed _kc_pcie_get_speed_cap(struct pci_dev *dev)
+{
+	u32 lnkcap2, lnkcap;
+
+	/*
+	 * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link
+	 * Speeds Vector in Link Capabilities 2 when supported, falling
+	 * back to Max Link Speed in Link Capabilities otherwise.
+	 */
+	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
+	if (lnkcap2) { /* PCIe r3.0-compliant */
+		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
+			return PCIE_SPEED_16_0GT;
+		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+			return PCIE_SPEED_8_0GT;
+		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+			return PCIE_SPEED_5_0GT;
+		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+			return PCIE_SPEED_2_5GT;
+		return PCI_SPEED_UNKNOWN;
+	}
+
+	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+	if (lnkcap) {
+		if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
+			return PCIE_SPEED_16_0GT;
+		else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
+			return PCIE_SPEED_8_0GT;
+		else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
+			return PCIE_SPEED_5_0GT;
+		else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
+			return PCIE_SPEED_2_5GT;
+	}
+
+	return PCI_SPEED_UNKNOWN;
+}
+
+static enum pcie_link_width _kc_pcie_get_width_cap(struct pci_dev *dev)
+{
+	u32 lnkcap;
+
+	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+	if (lnkcap)
+		return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
+
+	return PCIE_LNK_WIDTH_UNKNOWN;
+}
+
+static u32
+_kc_pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
+			   enum pcie_link_width *width)
+{
+	*speed = _kc_pcie_get_speed_cap(dev);
+	*width = _kc_pcie_get_width_cap(dev);
+
+	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
+		return 0;
+
+	return *width * PCIE_SPEED2MBS_ENC(*speed);
+}
+
+void _kc_pcie_print_link_status(struct pci_dev *dev) {
+	enum pcie_link_width width, width_cap;
+	enum pci_bus_speed speed, speed_cap;
+	struct pci_dev *limiting_dev = NULL;
+	u32 bw_avail, bw_cap;
+
+	bw_cap = _kc_pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
+	bw_avail = _kc_pcie_bandwidth_available(dev, &limiting_dev, &speed,
+						&width);
+
+	if (bw_avail >= bw_cap)
+		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
+			 bw_cap / 1000, bw_cap % 1000,
+			 PCIE_SPEED2STR(speed_cap), width_cap);
+	else
+		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
+			 bw_avail / 1000, bw_avail % 1000,
+			 PCIE_SPEED2STR(speed), width,
+			 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
+			 bw_cap / 1000, bw_cap % 1000,
+			 PCIE_SPEED2STR(speed_cap), width_cap);
+}
+#endif /* 4.17.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0))
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1)))
+#define HAVE_NDO_FDB_ADD_EXTACK
+#else /* !RHEL || RHEL < 8.1 */
+#ifdef HAVE_TC_SETUP_CLSFLOWER
+#define FLOW_DISSECTOR_MATCH(__rule, __type, __out)				\
+	const struct flow_match *__m = &(__rule)->match;			\
+	struct flow_dissector *__d = (__m)->dissector;				\
+										\
+	(__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key);	\
+	(__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask);	\
+
+void flow_rule_match_basic(const struct flow_rule *rule,
+			   struct flow_match_basic *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
+}
+
+void flow_rule_match_control(const struct flow_rule *rule,
+			     struct flow_match_control *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
+}
+
+void flow_rule_match_eth_addrs(const struct flow_rule *rule,
+			       struct flow_match_eth_addrs *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
+}
+
+#ifdef HAVE_TC_FLOWER_ENC
+void flow_rule_match_enc_keyid(const struct flow_rule *rule,
+			       struct flow_match_enc_keyid *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
+}
+
+void flow_rule_match_enc_ports(const struct flow_rule *rule,
+			       struct flow_match_ports *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
+}
+
+void flow_rule_match_enc_control(const struct flow_rule *rule,
+				 struct flow_match_control *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
+}
+
+void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
+				    struct flow_match_ipv4_addrs *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
+}
+
+void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
+				    struct flow_match_ipv6_addrs *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
+}
+#endif
+
+#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS
+void flow_rule_match_vlan(const struct flow_rule *rule,
+			  struct flow_match_vlan *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
+}
+#endif
+
+void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
+				struct flow_match_ipv4_addrs *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
+}
+
+void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
+				struct flow_match_ipv6_addrs *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
+}
+
+void flow_rule_match_ports(const struct flow_rule *rule,
+			   struct flow_match_ports *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
+}
+#endif /* HAVE_TC_SETUP_CLSFLOWER */
+#endif /* !RHEL || RHEL < 8.1 */
+#endif /* 5.1.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0))
+#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))))
+#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
+int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f,
+				   struct list_head __always_unused *driver_list,
+				   tc_setup_cb_t *cb,
+				   void *cb_ident, void *cb_priv,
+				   bool ingress_only)
+{
+	if (ingress_only &&
+	    f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+		return -EOPNOTSUPP;
+
+	/* Note: Upstream has driver_block_list, but older kernels do not */
+	switch (f->command) {
+	case TC_BLOCK_BIND:
+#ifdef HAVE_TCF_BLOCK_CB_REGISTER_EXTACK
+		return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv,
+					     f->extack);
+#else
+		return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv);
+#endif
+	case TC_BLOCK_UNBIND:
+		tcf_block_cb_unregister(f->block, cb, cb_ident);
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */
+#endif /* !RHEL >= 8.2 */
+#endif /* 5.3.0 */
diff --git a/drivers/net/ethernet/intel/i40e/kcompat.h b/drivers/net/ethernet/intel/i40e/kcompat.h
new file mode 100644
index 000000000..a5cee4859
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/kcompat.h
@@ -0,0 +1,6838 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
+
+#ifndef _KCOMPAT_H_
+#define _KCOMPAT_H_
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#else
+#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
+#endif
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/if_link.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/list.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/udp.h>
+#include <linux/vmalloc.h>
+
+#ifndef GCC_VERSION
+#define GCC_VERSION (__GNUC__ * 10000		\
+		     + __GNUC_MINOR__ * 100	\
+		     + __GNUC_PATCHLEVEL__)
+#endif /* GCC_VERSION */
+
+/* Backport macros for controlling GCC diagnostics */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0) )
+
+/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */
+#if GCC_VERSION >= 40600
+#define __diag_str1(s)		#s
+#define __diag_str(s)		__diag_str1(s)
+#define __diag(s)		_Pragma(__diag_str(GCC diagnostic s))
+#else
+#define __diag(s)
+#endif /* GCC_VERSION >= 4.6 */
+#define __diag_push()	__diag(push)
+#define __diag_pop()	__diag(pop)
+#endif /* LINUX_VERSION < 4.18.0 */
+
+#ifndef NSEC_PER_MSEC
+#define NSEC_PER_MSEC 1000000L
+#endif
+#include <net/ipv6.h>
+/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */
+#ifndef UTS_RELEASE
+/* utsrelease.h changed locations in 2.6.33 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
+#include <linux/utsrelease.h>
+#else
+#include <generated/utsrelease.h>
+#endif
+#endif
+
+/* NAPI enable/disable flags here */
+#define NAPI
+
+#define adapter_struct i40e_pf
+#define adapter_q_vector i40e_q_vector
+
+/* and finally set defines so that the code sees the changes */
+#ifdef NAPI
+#ifndef CONFIG_I40E_NAPI
+#define CONFIG_I40E_NAPI
+#endif
+#else
+#undef CONFIG_I40E_NAPI
+#endif /* NAPI */
+
+/* Dynamic LTR and deeper C-State support disable/enable */
+
+/* packet split disable/enable */
+#ifdef DISABLE_PACKET_SPLIT
+#ifndef CONFIG_I40E_DISABLE_PACKET_SPLIT
+#define CONFIG_I40E_DISABLE_PACKET_SPLIT
+#endif
+#endif /* DISABLE_PACKET_SPLIT */
+
+/* MSI compatibility code for all kernels and drivers */
+#ifdef DISABLE_PCI_MSI
+#undef CONFIG_PCI_MSI
+#endif
+#ifndef CONFIG_PCI_MSI
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+struct msix_entry {
+	u16 vector; /* kernel uses to write allocated vector */
+	u16 entry;  /* driver uses to specify entry, OS writes */
+};
+#endif
+#undef pci_enable_msi
+#define pci_enable_msi(a) -ENOTSUPP
+#undef pci_disable_msi
+#define pci_disable_msi(a) do {} while (0)
+#undef pci_enable_msix
+#define pci_enable_msix(a, b, c) -ENOTSUPP
+#undef pci_disable_msix
+#define pci_disable_msix(a) do {} while (0)
+#define msi_remove_pci_irq_vectors(a) do {} while (0)
+#endif /* CONFIG_PCI_MSI */
+#ifdef DISABLE_PM
+#undef CONFIG_PM
+#endif
+
+#ifdef DISABLE_NET_POLL_CONTROLLER
+#undef CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef PMSG_SUSPEND
+#define PMSG_SUSPEND 3
+#endif
+
+/* generic boolean compatibility */
+#undef TRUE
+#undef FALSE
+#define TRUE true
+#define FALSE false
+#ifdef GCC_VERSION
+#if ( GCC_VERSION < 3000 )
+#define _Bool char
+#endif
+#else
+#define _Bool char
+#endif
+
+#ifndef BIT
+#define BIT(nr)         (1UL << (nr))
+#endif
+
+#undef __always_unused
+#define __always_unused __attribute__((__unused__))
+
+#undef __maybe_unused
+#define __maybe_unused __attribute__((__unused__))
+
+/* kernels less than 2.4.14 don't have this */
+#ifndef ETH_P_8021Q
+#define ETH_P_8021Q 0x8100
+#endif
+
+#ifndef module_param
+#define module_param(v,t,p) MODULE_PARM(v, "i");
+#endif
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK  0xffffffffffffffffULL
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK  0x00000000ffffffffULL
+#endif
+
+#ifndef PCI_CAP_ID_EXP
+#define PCI_CAP_ID_EXP 0x10
+#endif
+
+#ifndef uninitialized_var
+#define uninitialized_var(x) x = x
+#endif
+
+#ifndef PCIE_LINK_STATE_L0S
+#define PCIE_LINK_STATE_L0S 1
+#endif
+#ifndef PCIE_LINK_STATE_L1
+#define PCIE_LINK_STATE_L1 2
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)
+#endif
+
+#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
+#define free_netdev(x)	kfree(x)
+#endif
+
+#ifdef HAVE_POLL_CONTROLLER
+#define CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef SKB_DATAREF_SHIFT
+/* if we do not have the infrastructure to detect if skb_header is cloned
+   just return false in all cases */
+#define skb_header_cloned(x) 0
+#endif
+
+#ifndef NETIF_F_GSO
+#define gso_size tso_size
+#define gso_segs tso_segs
+#endif
+
+#ifndef NETIF_F_GRO
+#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \
+		vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
+#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
+#endif
+
+#ifndef NETIF_F_SCTP_CSUM
+#define NETIF_F_SCTP_CSUM 0
+#endif
+
+#ifndef NETIF_F_LRO
+#define NETIF_F_LRO BIT(15)
+#endif
+
+#ifndef NETIF_F_NTUPLE
+#define NETIF_F_NTUPLE BIT(27)
+#endif
+
+#ifndef NETIF_F_ALL_FCOE
+#define NETIF_F_ALL_FCOE	(NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
+				 NETIF_F_FSO)
+#endif
+
+#ifndef IPPROTO_SCTP
+#define IPPROTO_SCTP 132
+#endif
+
+#ifndef IPPROTO_UDPLITE
+#define IPPROTO_UDPLITE 136
+#endif
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#define CHECKSUM_COMPLETE CHECKSUM_HW
+#endif
+
+#ifndef __read_mostly
+#define __read_mostly
+#endif
+
+#ifndef MII_RESV1
+#define MII_RESV1		0x17		/* Reserved...		*/
+#endif
+
+#ifndef unlikely
+#define unlikely(_x) _x
+#define likely(_x) _x
+#endif
+
+#ifndef WARN_ON
+#define WARN_ON(x)
+#endif
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend,dev) \
+	.vendor = (vend), .device = (dev), \
+	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#endif
+
+#ifndef node_online
+#define node_online(node) ((node) == 0)
+#endif
+
+#ifndef cpu_online
+#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map)
+#endif
+
+#ifndef _LINUX_RANDOM_H
+#include <linux/random.h>
+#endif
+
+#ifndef BITS_PER_TYPE
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#endif
+
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+#endif
+
+#ifndef DECLARE_BITMAP
+#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
+#endif
+
+#ifndef VLAN_HLEN
+#define VLAN_HLEN 4
+#endif
+
+#ifndef VLAN_ETH_HLEN
+#define VLAN_ETH_HLEN 18
+#endif
+
+#ifndef VLAN_ETH_FRAME_LEN
+#define VLAN_ETH_FRAME_LEN 1518
+#endif
+
+#ifndef DCA_GET_TAG_TWO_ARGS
+#define dca3_get_tag(a,b) dca_get_tag(b)
+#endif
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#if defined(__i386__) || defined(__x86_64__)
+#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#endif
+#endif
+
+/* taken from 2.6.24 definition in linux/kernel.h */
+#ifndef IS_ALIGNED
+#define IS_ALIGNED(x,a)         (((x) % ((typeof(x))(a))) == 0)
+#endif
+
+#ifdef IS_ENABLED
+#undef IS_ENABLED
+#undef __ARG_PLACEHOLDER_1
+#undef config_enabled
+#undef _config_enabled
+#undef __config_enabled
+#undef ___config_enabled
+#endif
+
+#define __ARG_PLACEHOLDER_1 0,
+#define config_enabled(cfg) _config_enabled(cfg)
+#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
+#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
+#define ___config_enabled(__ignored, val, ...) val
+
+#define IS_ENABLED(option) \
+	(config_enabled(option) || config_enabled(option##_MODULE))
+
+#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX)
+struct _kc_vlan_ethhdr {
+	unsigned char	h_dest[ETH_ALEN];
+	unsigned char	h_source[ETH_ALEN];
+	__be16		h_vlan_proto;
+	__be16		h_vlan_TCI;
+	__be16		h_vlan_encapsulated_proto;
+};
+#define vlan_ethhdr _kc_vlan_ethhdr
+struct _kc_vlan_hdr {
+	__be16		h_vlan_TCI;
+	__be16		h_vlan_encapsulated_proto;
+};
+#define vlan_hdr _kc_vlan_hdr
+#define vlan_tx_tag_present(_skb) 0
+#define vlan_tx_tag_get(_skb) 0
+#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */
+
+#ifndef VLAN_PRIO_SHIFT
+#define VLAN_PRIO_SHIFT 13
+#endif
+
+#ifndef PCI_EXP_LNKSTA_CLS_2_5GB
+#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001
+#endif
+
+#ifndef PCI_EXP_LNKSTA_CLS_5_0GB
+#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002
+#endif
+
+#ifndef PCI_EXP_LNKSTA_CLS_8_0GB
+#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003
+#endif
+
+#ifndef PCI_EXP_LNKSTA_NLW_X1
+#define PCI_EXP_LNKSTA_NLW_X1 0x0010
+#endif
+
+#ifndef PCI_EXP_LNKSTA_NLW_X2
+#define PCI_EXP_LNKSTA_NLW_X2 0x0020
+#endif
+
+#ifndef PCI_EXP_LNKSTA_NLW_X4
+#define PCI_EXP_LNKSTA_NLW_X4 0x0040
+#endif
+
+#ifndef PCI_EXP_LNKSTA_NLW_X8
+#define PCI_EXP_LNKSTA_NLW_X8 0x0080
+#endif
+
+#ifndef __GFP_COLD
+#define __GFP_COLD 0
+#endif
+
+#ifndef __GFP_COMP
+#define __GFP_COMP 0
+#endif
+
+#ifndef IP_OFFSET
+#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
+#endif
+
+/*****************************************************************************/
+/* Installations with ethtool version without eeprom, adapter id, or statistics
+ * support */
+
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+#endif
+
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x1d
+#undef ethtool_drvinfo
+#define ethtool_drvinfo k_ethtool_drvinfo
+struct k_ethtool_drvinfo {
+	u32 cmd;
+	char driver[32];
+	char version[32];
+	char fw_version[32];
+	char bus_info[32];
+	char reserved1[32];
+	char reserved2[16];
+	u32 n_stats;
+	u32 testinfo_len;
+	u32 eedump_len;
+	u32 regdump_len;
+};
+
+struct ethtool_stats {
+	u32 cmd;
+	u32 n_stats;
+	u64 data[0];
+};
+#endif /* ETHTOOL_GSTATS */
+
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x1c
+#endif /* ETHTOOL_PHYS_ID */
+
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x1b
+enum ethtool_stringset {
+	ETH_SS_TEST             = 0,
+	ETH_SS_STATS,
+};
+struct ethtool_gstrings {
+	u32 cmd;            /* ETHTOOL_GSTRINGS */
+	u32 string_set;     /* string set id e.c. ETH_SS_TEST, etc*/
+	u32 len;            /* number of strings in the string set */
+	u8 data[0];
+};
+#endif /* ETHTOOL_GSTRINGS */
+
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x1a
+enum ethtool_test_flags {
+	ETH_TEST_FL_OFFLINE	= BIT(0),
+	ETH_TEST_FL_FAILED	= BIT(1),
+};
+struct ethtool_test {
+	u32 cmd;
+	u32 flags;
+	u32 reserved;
+	u32 len;
+	u64 data[0];
+};
+#endif /* ETHTOOL_TEST */
+
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0xb
+#undef ETHTOOL_GREGS
+struct ethtool_eeprom {
+	u32 cmd;
+	u32 magic;
+	u32 offset;
+	u32 len;
+	u8 data[0];
+};
+
+struct ethtool_value {
+	u32 cmd;
+	u32 data;
+};
+#endif /* ETHTOOL_GEEPROM */
+
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0xa
+#endif /* ETHTOOL_GLINK */
+
+#ifndef ETHTOOL_GWOL
+#define ETHTOOL_GWOL 0x5
+#define ETHTOOL_SWOL 0x6
+#define SOPASS_MAX      6
+struct ethtool_wolinfo {
+	u32 cmd;
+	u32 supported;
+	u32 wolopts;
+	u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */
+};
+#endif /* ETHTOOL_GWOL */
+
+#ifndef ETHTOOL_GREGS
+#define ETHTOOL_GREGS		0x00000004 /* Get NIC registers */
+#define ethtool_regs _kc_ethtool_regs
+/* for passing big chunks of data */
+struct _kc_ethtool_regs {
+	u32 cmd;
+	u32 version; /* driver-specific, indicates different chips/revs */
+	u32 len; /* bytes */
+	u8 data[0];
+};
+#endif /* ETHTOOL_GREGS */
+
+#ifndef ETHTOOL_GMSGLVL
+#define ETHTOOL_GMSGLVL		0x00000007 /* Get driver message level */
+#endif
+#ifndef ETHTOOL_SMSGLVL
+#define ETHTOOL_SMSGLVL		0x00000008 /* Set driver msg level, priv. */
+#endif
+#ifndef ETHTOOL_NWAY_RST
+#define ETHTOOL_NWAY_RST	0x00000009 /* Restart autonegotiation, priv */
+#endif
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK		0x0000000a /* Get link status */
+#endif
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM		0x0000000b /* Get EEPROM data */
+#endif
+#ifndef ETHTOOL_SEEPROM
+#define ETHTOOL_SEEPROM		0x0000000c /* Set EEPROM data */
+#endif
+#ifndef ETHTOOL_GCOALESCE
+#define ETHTOOL_GCOALESCE	0x0000000e /* Get coalesce config */
+/* for configuring coalescing parameters of chip */
+#define ethtool_coalesce _kc_ethtool_coalesce
+struct _kc_ethtool_coalesce {
+	u32	cmd;	/* ETHTOOL_{G,S}COALESCE */
+
+	/* How many usecs to delay an RX interrupt after
+	 * a packet arrives.  If 0, only rx_max_coalesced_frames
+	 * is used.
+	 */
+	u32	rx_coalesce_usecs;
+
+	/* How many packets to delay an RX interrupt after
+	 * a packet arrives.  If 0, only rx_coalesce_usecs is
+	 * used.  It is illegal to set both usecs and max frames
+	 * to zero as this would cause RX interrupts to never be
+	 * generated.
+	 */
+	u32	rx_max_coalesced_frames;
+
+	/* Same as above two parameters, except that these values
+	 * apply while an IRQ is being serviced by the host.  Not
+	 * all cards support this feature and the values are ignored
+	 * in that case.
+	 */
+	u32	rx_coalesce_usecs_irq;
+	u32	rx_max_coalesced_frames_irq;
+
+	/* How many usecs to delay a TX interrupt after
+	 * a packet is sent.  If 0, only tx_max_coalesced_frames
+	 * is used.
+	 */
+	u32	tx_coalesce_usecs;
+
+	/* How many packets to delay a TX interrupt after
+	 * a packet is sent.  If 0, only tx_coalesce_usecs is
+	 * used.  It is illegal to set both usecs and max frames
+	 * to zero as this would cause TX interrupts to never be
+	 * generated.
+	 */
+	u32	tx_max_coalesced_frames;
+
+	/* Same as above two parameters, except that these values
+	 * apply while an IRQ is being serviced by the host.  Not
+	 * all cards support this feature and the values are ignored
+	 * in that case.
+	 */
+	u32	tx_coalesce_usecs_irq;
+	u32	tx_max_coalesced_frames_irq;
+
+	/* How many usecs to delay in-memory statistics
+	 * block updates.  Some drivers do not have an in-memory
+	 * statistic block, and in such cases this value is ignored.
+	 * This value must not be zero.
+	 */
+	u32	stats_block_coalesce_usecs;
+
+	/* Adaptive RX/TX coalescing is an algorithm implemented by
+	 * some drivers to improve latency under low packet rates and
+	 * improve throughput under high packet rates.  Some drivers
+	 * only implement one of RX or TX adaptive coalescing.  Anything
+	 * not implemented by the driver causes these values to be
+	 * silently ignored.
+	 */
+	u32	use_adaptive_rx_coalesce;
+	u32	use_adaptive_tx_coalesce;
+
+	/* When the packet rate (measured in packets per second)
+	 * is below pkt_rate_low, the {rx,tx}_*_low parameters are
+	 * used.
+	 */
+	u32	pkt_rate_low;
+	u32	rx_coalesce_usecs_low;
+	u32	rx_max_coalesced_frames_low;
+	u32	tx_coalesce_usecs_low;
+	u32	tx_max_coalesced_frames_low;
+
+	/* When the packet rate is below pkt_rate_high but above
+	 * pkt_rate_low (both measured in packets per second) the
+	 * normal {rx,tx}_* coalescing parameters are used.
+	 */
+
+	/* When the packet rate is (measured in packets per second)
+	 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+	 * used.
+	 */
+	u32	pkt_rate_high;
+	u32	rx_coalesce_usecs_high;
+	u32	rx_max_coalesced_frames_high;
+	u32	tx_coalesce_usecs_high;
+	u32	tx_max_coalesced_frames_high;
+
+	/* How often to do adaptive coalescing packet rate sampling,
+	 * measured in seconds.  Must not be zero.
+	 */
+	u32	rate_sample_interval;
+};
+#endif /* ETHTOOL_GCOALESCE */
+
+#ifndef ETHTOOL_SCOALESCE
+#define ETHTOOL_SCOALESCE	0x0000000f /* Set coalesce config. */
+#endif
+#ifndef ETHTOOL_GRINGPARAM
+#define ETHTOOL_GRINGPARAM	0x00000010 /* Get ring parameters */
+/* for configuring RX/TX ring parameters */
+#define ethtool_ringparam _kc_ethtool_ringparam
+struct _kc_ethtool_ringparam {
+	u32	cmd;	/* ETHTOOL_{G,S}RINGPARAM */
+
+	/* Read only attributes.  These indicate the maximum number
+	 * of pending RX/TX ring entries the driver will allow the
+	 * user to set.
+	 */
+	u32	rx_max_pending;
+	u32	rx_mini_max_pending;
+	u32	rx_jumbo_max_pending;
+	u32	tx_max_pending;
+
+	/* Values changeable by the user.  The valid values are
+	 * in the range 1 to the "*_max_pending" counterpart above.
+	 */
+	u32	rx_pending;
+	u32	rx_mini_pending;
+	u32	rx_jumbo_pending;
+	u32	tx_pending;
+};
+#endif /* ETHTOOL_GRINGPARAM */
+
+#ifndef ETHTOOL_SRINGPARAM
+#define ETHTOOL_SRINGPARAM	0x00000011 /* Set ring parameters, priv. */
+#endif
+#ifndef ETHTOOL_GPAUSEPARAM
+#define ETHTOOL_GPAUSEPARAM	0x00000012 /* Get pause parameters */
+/* for configuring link flow control parameters */
+#define ethtool_pauseparam _kc_ethtool_pauseparam
+struct _kc_ethtool_pauseparam {
+	u32	cmd;	/* ETHTOOL_{G,S}PAUSEPARAM */
+
+	/* If the link is being auto-negotiated (via ethtool_cmd.autoneg
+	 * being true) the user may set 'autoneg' here non-zero to have the
+	 * pause parameters be auto-negotiated too.  In such a case, the
+	 * {rx,tx}_pause values below determine what capabilities are
+	 * advertised.
+	 *
+	 * If 'autoneg' is zero or the link is not being auto-negotiated,
+	 * then {rx,tx}_pause force the driver to use/not-use pause
+	 * flow control.
+	 */
+	u32	autoneg;
+	u32	rx_pause;
+	u32	tx_pause;
+};
+#endif /* ETHTOOL_GPAUSEPARAM */
+
+#ifndef ETHTOOL_SPAUSEPARAM
+#define ETHTOOL_SPAUSEPARAM	0x00000013 /* Set pause parameters. */
+#endif
+#ifndef ETHTOOL_GRXCSUM
+#define ETHTOOL_GRXCSUM		0x00000014 /* Get RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SRXCSUM
+#define ETHTOOL_SRXCSUM		0x00000015 /* Set RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GTXCSUM
+#define ETHTOOL_GTXCSUM		0x00000016 /* Get TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STXCSUM
+#define ETHTOOL_STXCSUM		0x00000017 /* Set TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GSG
+#define ETHTOOL_GSG		0x00000018 /* Get scatter-gather enable
+					    * (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SSG
+#define ETHTOOL_SSG		0x00000019 /* Set scatter-gather enable
+					    * (ethtool_value). */
+#endif
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST		0x0000001a /* execute NIC self-test, priv. */
+#endif
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS	0x0000001b /* get specified string set */
+#endif
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID		0x0000001c /* identify the NIC */
+#endif
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS		0x0000001d /* get NIC-specific statistics */
+#endif
+#ifndef ETHTOOL_GTSO
+#define ETHTOOL_GTSO		0x0000001e /* Get TSO enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STSO
+#define ETHTOOL_STSO		0x0000001f /* Set TSO enable (ethtool_value) */
+#endif
+
+#ifndef ETHTOOL_BUSINFO_LEN
+#define ETHTOOL_BUSINFO_LEN	32
+#endif
+
+#ifndef WAKE_FILTER
+#define WAKE_FILTER	BIT(7)
+#endif
+
+#ifndef SPEED_2500
+#define SPEED_2500 2500
+#endif
+#ifndef SPEED_5000
+#define SPEED_5000 5000
+#endif
+#ifndef SPEED_14000
+#define SPEED_14000 14000
+#endif
+#ifndef SPEED_25000
+#define SPEED_25000 25000
+#endif
+#ifndef SPEED_50000
+#define SPEED_50000 50000
+#endif
+#ifndef SPEED_56000
+#define SPEED_56000 56000
+#endif
+#ifndef SPEED_100000
+#define SPEED_100000 100000
+#endif
+
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b))
+#endif
+#ifndef AX_RELEASE_VERSION
+#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b))
+#endif
+
+/* #ifndef AX_RELEASE_CODE */
+#define AX_RELEASE_CODE 0
+/* #endif */
+
+#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0)
+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1)
+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3)
+#endif
+
+/* #ifndef RHEL_RELEASE_CODE  */
+/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */
+#define RHEL_RELEASE_CODE 0
+/* #endif */
+
+/* RHEL 7 didn't backport the parameter change in
+ * create_singlethread_workqueue.
+ * If/when RH corrects this we will want to tighten up the version check.
+ */
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))
+#undef create_singlethread_workqueue
+#define create_singlethread_workqueue(name)	\
+	alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
+#endif
+
+/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find
+ * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new
+ * enough versions of Ubuntu. Otherwise you can simply see it in the output of
+ * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in
+ * the linux-source package, but in the linux-headers package. It begins to
+ * appear in later releases of 14.04 and 14.10.
+ *
+ * Ex:
+ * <Ubuntu 14.04.1>
+ *  $uname -r
+ *  3.13.0-45-generic
+ * ABI is 45
+ *
+ * <Ubuntu 14.10>
+ *  $uname -r
+ *  3.16.0-23-generic
+ * ABI is 23
+ */
+#ifndef UTS_UBUNTU_RELEASE_ABI
+#define UTS_UBUNTU_RELEASE_ABI 0
+#define UBUNTU_VERSION_CODE 0
+#else
+/* Ubuntu does not provide actual release version macro, so we use the kernel
+ * version plus the ABI to generate a unique version code specific to Ubuntu.
+ * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to
+ * ignore differences in sublevel which are not important since we have the
+ * ABI value. Otherwise, it becomes impossible to correlate ABI to version for
+ * ordering checks.
+ */
+#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \
+			     UTS_UBUNTU_RELEASE_ABI)
+
+#if UTS_UBUNTU_RELEASE_ABI > 255
+#error UTS_UBUNTU_RELEASE_ABI is too large...
+#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */
+
+#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) )
+/* Our version code scheme does not make sense for non 3.x or newer kernels,
+ * and we have no support in kcompat for this scenario. Thus, treat this as a
+ * non-Ubuntu kernel. Possibly might be better to error here.
+ */
+#define UTS_UBUNTU_RELEASE_ABI 0
+#define UBUNTU_VERSION_CODE 0
+#endif
+
+#endif
+
+/* Note that the 3rd digit is always zero, and will be ignored. This is
+ * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux
+ * version codes are 3 digit, this 3rd digit is superseded by the ABI value.
+ */
+#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d))
+
+/* SuSE version macros are the same as Linux kernel version macro */
+#ifndef SLE_VERSION
+#define SLE_VERSION(a,b,c)	KERNEL_VERSION(a,b,c)
+#endif
+#define SLE_LOCALVERSION(a,b,c)	KERNEL_VERSION(a,b,c)
+#ifdef CONFIG_SUSE_KERNEL
+#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) )
+/* SLES11 GA is 2.6.27 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,0,0)
+#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) )
+/* SLES11 SP1 is 2.6.32 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,1,0)
+#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) )
+/* SLES11 SP2 GA is 3.0.13-0.27 */
+#define SLE_VERSION_CODE SLE_VERSION(11,2,0)
+#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76)))
+/* SLES11 SP3 GA is 3.0.76-0.11 */
+#define SLE_VERSION_CODE SLE_VERSION(11,3,0)
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101))
+  #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0))
+  /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */
+  #define SLE_VERSION_CODE SLE_VERSION(11,2,0)
+  #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0))
+  /* most SLES11sp3 update kernels */
+  #define SLE_VERSION_CODE SLE_VERSION(11,3,0)
+  #else
+  /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */
+  #define SLE_VERSION_CODE SLE_VERSION(11,4,0)
+  #endif
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28))
+/* SLES12 GA is 3.12.28-4
+ * kernel updates 3.12.xx-<33 through 52>[.yy] */
+#define SLE_VERSION_CODE SLE_VERSION(12,0,0)
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49))
+/* SLES12 SP1 GA is 3.12.49-11
+ * updates 3.12.xx-60.yy where xx={51..} */
+#define SLE_VERSION_CODE SLE_VERSION(12,1,0)
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,21) && \
+       (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,59))) || \
+       (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,74) && \
+        LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \
+        SLE_LOCALVERSION_CODE >= KERNEL_VERSION(92,0,0) && \
+        SLE_LOCALVERSION_CODE <  KERNEL_VERSION(93,0,0)))
+/* SLES12 SP2 GA is 4.4.21-69.
+ * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59}
+ * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120}
+ * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y */
+#define SLE_VERSION_CODE SLE_VERSION(12,2,0)
+#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(4,4,73) || \
+        LINUX_VERSION_CODE == KERNEL_VERSION(4,4,82) || \
+        LINUX_VERSION_CODE == KERNEL_VERSION(4,4,92)) || \
+       (LINUX_VERSION_CODE == KERNEL_VERSION(4,4,103) && \
+       (SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,33,0) || \
+        SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,38,0))) || \
+       (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,114) && \
+        LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \
+        SLE_LOCALVERSION_CODE >= KERNEL_VERSION(94,0,0) && \
+        SLE_LOCALVERSION_CODE <  KERNEL_VERSION(95,0,0)) )
+/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3.
+ * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92}
+ * SLES12 SP3 updates conflicting with SP2 are:
+ *   - 4.4.103-6.33.1, 4.4.103-6.38.1
+ *   - 4.4.{114,120}-94.nn.y */
+#define SLE_VERSION_CODE SLE_VERSION(12,3,0)
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \
+       (SLE_LOCALVERSION_CODE == KERNEL_VERSION(94,41,0) || \
+       (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(95,0,0) && \
+        SLE_LOCALVERSION_CODE < KERNEL_VERSION(96,0,0))))
+/* SLES12 SP4 GM is 4.12.14-94.41 and update kernel is 4.12.14-95.x. */
+#define SLE_VERSION_CODE SLE_VERSION(12,4,0)
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \
+       (SLE_LOCALVERSION_CODE == KERNEL_VERSION(23,0,0) || \
+        SLE_LOCALVERSION_CODE == KERNEL_VERSION(2,0,0) || \
+        SLE_LOCALVERSION_CODE == KERNEL_VERSION(136,0,0) || \
+        (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,0,0) && \
+	 SLE_LOCALVERSION_CODE < KERNEL_VERSION(26,0,0)) || \
+	(SLE_LOCALVERSION_CODE >= KERNEL_VERSION(150,0,0) && \
+	 SLE_LOCALVERSION_CODE < KERNEL_VERSION(151,0,0))))
+/* SLES15 Beta1 is 4.12.14-2
+ * SLES15 GM is 4.12.14-23 and update kernel is 4.12.14-{25,136},
+ * and 4.12.14-150.14.
+ */
+#define SLE_VERSION_CODE SLE_VERSION(15,0,0)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,14) && \
+       SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,23,0))
+/* SLES15 SP1 Beta1 is 4.12.14-25.23 */
+#define SLE_VERSION_CODE SLE_VERSION(15,1,0)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,13))
+/* SLES15 SP2 Beta1 is 5.3.13 */
+#define SLE_VERSION_CODE SLE_VERSION(15,2,0)
+
+/* new SLES kernels must be added here with >= based on kernel
+ * the idea is to order from newest to oldest and just catch all
+ * of them using the >=
+ */
+#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
+#endif /* CONFIG_SUSE_KERNEL */
+#ifndef SLE_VERSION_CODE
+#define SLE_VERSION_CODE 0
+#endif /* SLE_VERSION_CODE */
+#ifndef SLE_LOCALVERSION_CODE
+#define SLE_LOCALVERSION_CODE 0
+#endif /* SLE_LOCALVERSION_CODE */
+
+#ifdef __KLOCWORK__
+/* The following are not compiled into the binary driver; they are here
+ * only to tune Klocwork scans to workaround false-positive issues.
+ */
+#ifdef ARRAY_SIZE
+#undef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#define memcpy(dest, src, len)	memcpy_s(dest, len, src, len)
+#define memset(dest, ch, len)	memset_s(dest, len, ch, len)
+
+static inline int _kc_test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old;
+	unsigned long flags = 0;
+
+	_atomic_spin_lock_irqsave(p, flags);
+	old = *p;
+	*p = old & ~mask;
+	_atomic_spin_unlock_irqrestore(p, flags);
+
+	return (old & mask) != 0;
+}
+#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr)
+
+static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old;
+	unsigned long flags = 0;
+
+	_atomic_spin_lock_irqsave(p, flags);
+	old = *p;
+	*p = old | mask;
+	_atomic_spin_unlock_irqrestore(p, flags);
+
+	return (old & mask) != 0;
+}
+#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr)
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+#undef dev_dbg
+#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg)
+#undef pr_debug
+#define pr_debug(format, arg...) printk(KERN_DEBUG format, ##arg)
+#endif /* CONFIG_DYNAMIC_DEBUG */
+
+#undef hlist_for_each_entry_safe
+#define hlist_for_each_entry_safe(pos, n, head, member)			     \
+	for (n = NULL, pos = hlist_entry_safe((head)->first, typeof(*(pos)), \
+					      member);			     \
+	     pos;							     \
+	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+#ifdef uninitialized_var
+#undef uninitialized_var
+#define uninitialized_var(x) x = *(&(x))
+#endif
+#endif /* __KLOCWORK__ */
+
+#include "kcompat_vfd.h"
+struct vfd_objects *create_vfd_sysfs(struct pci_dev *pdev, int num_alloc_vfs);
+void destroy_vfd_sysfs(struct pci_dev *pdev, struct vfd_objects *vfd_obj);
+
+/* Older versions of GCC will trigger -Wformat-nonliteral warnings for const
+ * char * strings. Unfortunately, the implementation of do_trace_printk does
+ * this, in order to add a storage attribute to the memory. This was fixed in
+ * GCC 5.1, but we still use older distributions built with GCC 4.x.
+ *
+ * The string pointer is only passed as a const char * to the __trace_bprintk
+ * function. Since that function has the __printf attribute, it will trigger
+ * the warnings. We can't remove the attribute, so instead we'll use the
+ * __diag macro to disable -Wformat-nonliteral around the call to
+ * __trace_bprintk.
+ */
+#if GCC_VERSION < 50100
+#define __trace_bprintk(ip, fmt, args...) ({		\
+	int err;					\
+	__diag_push();					\
+	__diag(ignored "-Wformat-nonliteral");		\
+	err = __trace_bprintk(ip, fmt, ##args);		\
+	__diag_pop();					\
+	err;						\
+})
+#endif /* GCC_VERSION < 5.1.0 */
+
+/* Newer kernels removed <linux/pci-aspm.h> */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) )
+#define HAVE_PCI_ASPM_H
+#endif
+
+/*****************************************************************************/
+/* 2.4.3 => 2.4.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+#ifndef pci_set_dma_mask
+#define pci_set_dma_mask _kc_pci_set_dma_mask
+int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
+#endif
+
+#ifndef pci_request_regions
+#define pci_request_regions _kc_pci_request_regions
+int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
+#endif
+
+#ifndef pci_release_regions
+#define pci_release_regions _kc_pci_release_regions
+void _kc_pci_release_regions(struct pci_dev *pdev);
+#endif
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+#ifndef alloc_etherdev
+#define alloc_etherdev _kc_alloc_etherdev
+struct net_device * _kc_alloc_etherdev(int sizeof_priv);
+#endif
+
+#ifndef is_valid_ether_addr
+#define is_valid_ether_addr _kc_is_valid_ether_addr
+int _kc_is_valid_ether_addr(u8 *addr);
+#endif
+
+/**************************************/
+/* MISCELLANEOUS */
+
+#ifndef INIT_TQUEUE
+#define INIT_TQUEUE(_tq, _routine, _data)		\
+	do {						\
+		INIT_LIST_HEAD(&(_tq)->list);		\
+		(_tq)->sync = 0;			\
+		(_tq)->routine = _routine;		\
+		(_tq)->data = _data;			\
+	} while (0)
+#endif
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
+/* Generic MII registers. */
+#define MII_BMCR            0x00        /* Basic mode control register */
+#define MII_BMSR            0x01        /* Basic mode status register  */
+#define MII_PHYSID1         0x02        /* PHYS ID 1                   */
+#define MII_PHYSID2         0x03        /* PHYS ID 2                   */
+#define MII_ADVERTISE       0x04        /* Advertisement control reg   */
+#define MII_LPA             0x05        /* Link partner ability reg    */
+#define MII_EXPANSION       0x06        /* Expansion register          */
+/* Basic mode control register. */
+#define BMCR_FULLDPLX           0x0100  /* Full duplex                 */
+#define BMCR_ANENABLE           0x1000  /* Enable auto negotiation     */
+/* Basic mode status register. */
+#define BMSR_ERCAP              0x0001  /* Ext-reg capability          */
+#define BMSR_ANEGCAPABLE        0x0008  /* Able to do auto-negotiation */
+#define BMSR_10HALF             0x0800  /* Can do 10mbps, half-duplex  */
+#define BMSR_10FULL             0x1000  /* Can do 10mbps, full-duplex  */
+#define BMSR_100HALF            0x2000  /* Can do 100mbps, half-duplex */
+#define BMSR_100FULL            0x4000  /* Can do 100mbps, full-duplex */
+/* Advertisement control register. */
+#define ADVERTISE_CSMA          0x0001  /* Only selector supported     */
+#define ADVERTISE_10HALF        0x0020  /* Try for 10mbps half-duplex  */
+#define ADVERTISE_10FULL        0x0040  /* Try for 10mbps full-duplex  */
+#define ADVERTISE_100HALF       0x0080  /* Try for 100mbps half-duplex */
+#define ADVERTISE_100FULL       0x0100  /* Try for 100mbps full-duplex */
+#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+                       ADVERTISE_100HALF | ADVERTISE_100FULL)
+/* Expansion register for auto-negotiation. */
+#define EXPANSION_ENABLENPAGE   0x0004  /* This enables npage words    */
+#endif
+
+/*****************************************************************************/
+/* 2.4.6 => 2.4.3 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+#ifndef pci_set_power_state
+#define pci_set_power_state _kc_pci_set_power_state
+int _kc_pci_set_power_state(struct pci_dev *dev, int state);
+#endif
+
+#ifndef pci_enable_wake
+#define pci_enable_wake _kc_pci_enable_wake
+int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
+#endif
+
+#ifndef pci_disable_device
+#define pci_disable_device _kc_pci_disable_device
+void _kc_pci_disable_device(struct pci_dev *pdev);
+#endif
+
+/* PCI PM entry point syntax changed, so don't support suspend/resume */
+#undef CONFIG_PM
+
+#endif /* 2.4.6 => 2.4.3 */
+
+#ifndef HAVE_PCI_SET_MWI
+#define pci_set_mwi(X) pci_write_config_word(X, \
+			       PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
+			       PCI_COMMAND_INVALIDATE);
+#define pci_clear_mwi(X) pci_write_config_word(X, \
+			       PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
+			       ~PCI_COMMAND_INVALIDATE);
+#endif
+
+/*****************************************************************************/
+/* 2.4.10 => 2.4.9 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
+
+/**************************************/
+/* MODULE API */
+
+#ifndef MODULE_LICENSE
+	#define MODULE_LICENSE(X)
+#endif
+
+/**************************************/
+/* OTHER */
+
+#undef min
+#define min(x,y) ({ \
+	const typeof(x) _x = (x);	\
+	const typeof(y) _y = (y);	\
+	(void) (&_x == &_y);		\
+	_x < _y ? _x : _y; })
+
+#undef max
+#define max(x,y) ({ \
+	const typeof(x) _x = (x);	\
+	const typeof(y) _y = (y);	\
+	(void) (&_x == &_y);		\
+	_x > _y ? _x : _y; })
+
+#define min_t(type,x,y) ({ \
+	type _x = (x); \
+	type _y = (y); \
+	_x < _y ? _x : _y; })
+
+#define max_t(type,x,y) ({ \
+	type _x = (x); \
+	type _y = (y); \
+	_x > _y ? _x : _y; })
+
+#ifndef list_for_each_safe
+#define list_for_each_safe(pos, n, head) \
+	for (pos = (head)->next, n = pos->next; pos != (head); \
+		pos = n, n = pos->next)
+#endif
+
+#ifndef ____cacheline_aligned_in_smp
+#ifdef CONFIG_SMP
+#define ____cacheline_aligned_in_smp ____cacheline_aligned
+#else
+#define ____cacheline_aligned_in_smp
+#endif /* CONFIG_SMP */
+#endif
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
+int _kc_snprintf(char * buf, size_t size, const char *fmt, ...);
+#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args)
+int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args)
+#else /* 2.4.8 => 2.4.9 */
+int snprintf(char * buf, size_t size, const char *fmt, ...);
+int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+#endif
+#endif /* 2.4.10 -> 2.4.6 */
+
+
+/*****************************************************************************/
+/* 2.4.12 => 2.4.10 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) )
+#ifndef HAVE_NETIF_MSG
+#define HAVE_NETIF_MSG 1
+enum {
+	NETIF_MSG_DRV		= 0x0001,
+	NETIF_MSG_PROBE		= 0x0002,
+	NETIF_MSG_LINK		= 0x0004,
+	NETIF_MSG_TIMER		= 0x0008,
+	NETIF_MSG_IFDOWN	= 0x0010,
+	NETIF_MSG_IFUP		= 0x0020,
+	NETIF_MSG_RX_ERR	= 0x0040,
+	NETIF_MSG_TX_ERR	= 0x0080,
+	NETIF_MSG_TX_QUEUED	= 0x0100,
+	NETIF_MSG_INTR		= 0x0200,
+	NETIF_MSG_TX_DONE	= 0x0400,
+	NETIF_MSG_RX_STATUS	= 0x0800,
+	NETIF_MSG_PKTDATA	= 0x1000,
+	NETIF_MSG_HW		= 0x2000,
+	NETIF_MSG_WOL		= 0x4000,
+};
+
+#define netif_msg_drv(p)	((p)->msg_enable & NETIF_MSG_DRV)
+#define netif_msg_probe(p)	((p)->msg_enable & NETIF_MSG_PROBE)
+#define netif_msg_link(p)	((p)->msg_enable & NETIF_MSG_LINK)
+#define netif_msg_timer(p)	((p)->msg_enable & NETIF_MSG_TIMER)
+#define netif_msg_ifdown(p)	((p)->msg_enable & NETIF_MSG_IFDOWN)
+#define netif_msg_ifup(p)	((p)->msg_enable & NETIF_MSG_IFUP)
+#define netif_msg_rx_err(p)	((p)->msg_enable & NETIF_MSG_RX_ERR)
+#define netif_msg_tx_err(p)	((p)->msg_enable & NETIF_MSG_TX_ERR)
+#define netif_msg_tx_queued(p)	((p)->msg_enable & NETIF_MSG_TX_QUEUED)
+#define netif_msg_intr(p)	((p)->msg_enable & NETIF_MSG_INTR)
+#define netif_msg_tx_done(p)	((p)->msg_enable & NETIF_MSG_TX_DONE)
+#define netif_msg_rx_status(p)	((p)->msg_enable & NETIF_MSG_RX_STATUS)
+#define netif_msg_pktdata(p)	((p)->msg_enable & NETIF_MSG_PKTDATA)
+#endif /* !HAVE_NETIF_MSG */
+#endif /* 2.4.12 => 2.4.10 */
+
+/*****************************************************************************/
+/* 2.4.13 => 2.4.12 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#ifndef virt_to_page
+	#define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
+#endif
+
+#ifndef pci_map_page
+#define pci_map_page _kc_pci_map_page
+u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
+#endif
+
+#ifndef pci_unmap_page
+#define pci_unmap_page _kc_pci_unmap_page
+void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
+#endif
+
+/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
+
+#undef DMA_32BIT_MASK
+#define DMA_32BIT_MASK	0xffffffff
+#undef DMA_64BIT_MASK
+#define DMA_64BIT_MASK	0xffffffff
+
+/**************************************/
+/* OTHER */
+
+#ifndef cpu_relax
+#define cpu_relax()	rep_nop()
+#endif
+
+struct vlan_ethhdr {
+	unsigned char h_dest[ETH_ALEN];
+	unsigned char h_source[ETH_ALEN];
+	unsigned short h_vlan_proto;
+	unsigned short h_vlan_TCI;
+	unsigned short h_vlan_encapsulated_proto;
+};
+#endif /* 2.4.13 => 2.4.12 */
+
+/*****************************************************************************/
+/* 2.4.17 => 2.4.12 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
+
+#ifndef __devexit_p
+	#define __devexit_p(x) &(x)
+#endif
+
+#endif /* 2.4.17 => 2.4.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) )
+#define NETIF_MSG_HW	0x2000
+#define NETIF_MSG_WOL	0x4000
+
+#ifndef netif_msg_hw
+#define netif_msg_hw(p)		((p)->msg_enable & NETIF_MSG_HW)
+#endif
+#ifndef netif_msg_wol
+#define netif_msg_wol(p)	((p)->msg_enable & NETIF_MSG_WOL)
+#endif
+#endif /* 2.4.18 */
+
+/*****************************************************************************/
+
+/*****************************************************************************/
+/* 2.4.20 => 2.4.19 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
+
+/* we won't support NAPI on less than 2.4.20 */
+#ifdef NAPI
+#undef NAPI
+#endif
+
+#endif /* 2.4.20 => 2.4.19 */
+
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
+#define pci_name(x)	((x)->slot_name)
+
+#ifndef SUPPORTED_10000baseT_Full
+#define SUPPORTED_10000baseT_Full	BIT(12)
+#endif
+#ifndef ADVERTISED_10000baseT_Full
+#define ADVERTISED_10000baseT_Full	BIT(12)
+#endif
+#endif
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* 2.4.23 => 2.4.22 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
+/*****************************************************************************/
+#ifdef NAPI
+#ifndef netif_poll_disable
+#define netif_poll_disable(x) _kc_netif_poll_disable(x)
+static inline void _kc_netif_poll_disable(struct net_device *netdev)
+{
+	while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
+		/* No hurry */
+		current->state = TASK_INTERRUPTIBLE;
+		schedule_timeout(1);
+	}
+}
+#endif
+#ifndef netif_poll_enable
+#define netif_poll_enable(x) _kc_netif_poll_enable(x)
+static inline void _kc_netif_poll_enable(struct net_device *netdev)
+{
+	clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
+}
+#endif
+#endif /* NAPI */
+#ifndef netif_tx_disable
+#define netif_tx_disable(x) _kc_netif_tx_disable(x)
+static inline void _kc_netif_tx_disable(struct net_device *dev)
+{
+	spin_lock_bh(&dev->xmit_lock);
+	netif_stop_queue(dev);
+	spin_unlock_bh(&dev->xmit_lock);
+}
+#endif
+#else /* 2.4.23 => 2.4.22 */
+#define HAVE_SCTP
+#endif /* 2.4.23 => 2.4.22 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
+    ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+      LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
+#define ETHTOOL_OPS_COMPAT
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) )
+#define __user
+#endif /* < 2.4.27 */
+
+/*****************************************************************************/
+/* 2.5.71 => 2.4.x */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
+#define sk_protocol protocol
+#define pci_get_device pci_find_device
+#endif /* 2.5.70 => 2.4.x */
+
+/*****************************************************************************/
+/* < 2.4.27 or 2.6.0 <= 2.6.5 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
+    ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+      LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
+
+#ifndef netif_msg_init
+#define netif_msg_init _kc_netif_msg_init
+static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
+{
+	/* use default */
+	if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
+		return default_msg_enable_bits;
+	if (debug_value == 0) /* no output */
+		return 0;
+	/* set low N bits */
+	return (1 << debug_value) -1;
+}
+#endif
+
+#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
+/*****************************************************************************/
+#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
+     (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
+      ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
+#define netdev_priv(x) x->priv
+#endif
+
+/*****************************************************************************/
+/* <= 2.5.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
+#include <linux/rtnetlink.h>
+#undef pci_register_driver
+#define pci_register_driver pci_module_init
+
+/*
+ * Most of the dma compat code is copied/modifed from the 2.4.37
+ * /include/linux/libata-compat.h header file
+ */
+/* These definitions mirror those in pci.h, so they can be used
+ * interchangeably with their PCI_ counterparts */
+enum dma_data_direction {
+	DMA_BIDIRECTIONAL = 0,
+	DMA_TO_DEVICE = 1,
+	DMA_FROM_DEVICE = 2,
+	DMA_NONE = 3,
+};
+
+struct device {
+	struct pci_dev pdev;
+};
+
+static inline struct pci_dev *to_pci_dev (struct device *dev)
+{
+	return (struct pci_dev *) dev;
+}
+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
+{
+	return (struct device *) pdev;
+}
+#define pdev_printk(lvl, pdev, fmt, args...) 	\
+	printk("%s %s: " fmt, lvl, pci_name(pdev), ## args)
+#define dev_err(dev, fmt, args...)            \
+	pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args)
+#define dev_info(dev, fmt, args...)            \
+	pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
+#define dev_warn(dev, fmt, args...)            \
+	pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
+#define dev_notice(dev, fmt, args...)            \
+	pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args)
+#define dev_dbg(dev, fmt, args...) \
+	pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args)
+
+/* NOTE: dangerous! we ignore the 'gfp' argument */
+#define dma_alloc_coherent(dev,sz,dma,gfp) \
+	pci_alloc_consistent(to_pci_dev(dev),(sz),(dma))
+#define dma_free_coherent(dev,sz,addr,dma_addr) \
+	pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr))
+
+#define dma_map_page(dev,a,b,c,d) \
+	pci_map_page(to_pci_dev(dev),(a),(b),(c),(d))
+#define dma_unmap_page(dev,a,b,c) \
+	pci_unmap_page(to_pci_dev(dev),(a),(b),(c))
+
+#define dma_map_single(dev,a,b,c) \
+	pci_map_single(to_pci_dev(dev),(a),(b),(c))
+#define dma_unmap_single(dev,a,b,c) \
+	pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
+
+#define dma_map_sg(dev, sg, nents, dir) \
+	pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir)
+#define dma_unmap_sg(dev, sg, nents, dir) \
+	pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir)
+
+#define dma_sync_single(dev,a,b,c) \
+	pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
+
+/* for range just sync everything, that's all the pci API can do */
+#define dma_sync_single_range(dev,addr,off,sz,dir) \
+	pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir))
+
+#define dma_set_mask(dev,mask) \
+	pci_set_dma_mask(to_pci_dev(dev),(mask))
+
+/* hlist_* code - double linked lists */
+struct hlist_head {
+	struct hlist_node *first;
+};
+
+struct hlist_node {
+	struct hlist_node *next, **pprev;
+};
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+	struct hlist_node *next = n->next;
+	struct hlist_node **pprev = n->pprev;
+	*pprev = next;
+	if (next)
+	next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+	__hlist_del(n);
+	n->next = NULL;
+	n->pprev = NULL;
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+	struct hlist_node *first = h->first;
+	n->next = first;
+	if (first)
+		first->pprev = &n->next;
+	h->first = n;
+	n->pprev = &h->first;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+	return !h->first;
+}
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = {  .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+	h->next = NULL;
+	h->pprev = NULL;
+}
+
+#ifndef might_sleep
+#define might_sleep()
+#endif
+#else
+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
+{
+	return &pdev->dev;
+}
+#endif /* <= 2.5.0 */
+
+/*****************************************************************************/
+/* 2.5.28 => 2.4.23 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
+
+#include <linux/tqueue.h>
+#define work_struct tq_struct
+#undef INIT_WORK
+#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
+#undef container_of
+#define container_of list_entry
+#define schedule_work schedule_task
+#define flush_scheduled_work flush_scheduled_tasks
+#define cancel_work_sync(x) flush_scheduled_work()
+
+#endif /* 2.5.28 => 2.4.17 */
+
+/*****************************************************************************/
+/* 2.6.0 => 2.5.28 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+#ifndef read_barrier_depends
+#define read_barrier_depends() rmb()
+#endif
+
+#ifndef rcu_head
+struct __kc_callback_head {
+	struct __kc_callback_head *next;
+	void (*func)(struct callback_head *head);
+};
+#define rcu_head __kc_callback_head
+#endif
+
+#undef get_cpu
+#define get_cpu() smp_processor_id()
+#undef put_cpu
+#define put_cpu() do { } while(0)
+#define MODULE_INFO(version, _version)
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
+#endif
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
+#endif
+#ifndef CONFIG_IGC_DISABLE_PACKET_SPLIT
+#define CONFIG_IGC_DISABLE_PACKET_SPLIT 1
+#endif
+
+#define dma_set_coherent_mask(dev,mask) 1
+
+#undef dev_put
+#define dev_put(dev) __dev_put(dev)
+
+#ifndef skb_fill_page_desc
+#define skb_fill_page_desc _kc_skb_fill_page_desc
+void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
+#endif
+
+#undef ALIGN
+#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
+
+#ifndef page_count
+#define page_count(p) atomic_read(&(p)->count)
+#endif
+
+#ifdef MAX_NUMNODES
+#undef MAX_NUMNODES
+#endif
+#define MAX_NUMNODES 1
+
+/* find_first_bit and find_next bit are not defined for most
+ * 2.4 kernels (except for the redhat 2.4.21 kernels
+ */
+#include <linux/bitops.h>
+#define BITOP_WORD(nr)          ((nr) / BITS_PER_LONG)
+#undef find_next_bit
+#define find_next_bit _kc_find_next_bit
+unsigned long _kc_find_next_bit(const unsigned long *addr, unsigned long size,
+				unsigned long offset);
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+
+#ifndef netdev_name
+static inline const char *_kc_netdev_name(const struct net_device *dev)
+{
+	if (strchr(dev->name, '%'))
+		return "(unregistered net_device)";
+	return dev->name;
+}
+#define netdev_name(netdev)	_kc_netdev_name(netdev)
+#endif /* netdev_name */
+
+#ifndef strlcpy
+#define strlcpy _kc_strlcpy
+size_t _kc_strlcpy(char *dest, const char *src, size_t size);
+#endif /* strlcpy */
+
+#ifndef do_div
+#if BITS_PER_LONG == 64
+# define do_div(n,base) ({					\
+	uint32_t __base = (base);				\
+	uint32_t __rem;						\
+	__rem = ((uint64_t)(n)) % __base;			\
+	(n) = ((uint64_t)(n)) / __base;				\
+	__rem;							\
+ })
+#elif BITS_PER_LONG == 32
+uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor);
+# define do_div(n,base) ({				\
+	uint32_t __base = (base);			\
+	uint32_t __rem;					\
+	if (likely(((n) >> 32) == 0)) {			\
+		__rem = (uint32_t)(n) % __base;		\
+		(n) = (uint32_t)(n) / __base;		\
+	} else 						\
+		__rem = _kc__div64_32(&(n), __base);	\
+	__rem;						\
+ })
+#else /* BITS_PER_LONG == ?? */
+# error do_div() does not yet support the C64
+#endif /* BITS_PER_LONG */
+#endif /* do_div */
+
+#ifndef NSEC_PER_SEC
+#define NSEC_PER_SEC	1000000000L
+#endif
+
+#undef HAVE_I2C_SUPPORT
+#else /* 2.6.0 */
+
+#endif /* 2.6.0 => 2.5.28 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )
+#define dma_pool pci_pool
+#define dma_pool_destroy pci_pool_destroy
+#define dma_pool_alloc pci_pool_alloc
+#define dma_pool_free pci_pool_free
+
+#define dma_pool_create(name,dev,size,align,allocation) \
+       pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation))
+#endif /* < 2.6.3 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+/* 2.6.5 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
+#define dma_sync_single_for_cpu		dma_sync_single
+#define dma_sync_single_for_device	dma_sync_single
+#define dma_sync_single_range_for_cpu		dma_sync_single_range
+#define dma_sync_single_range_for_device	dma_sync_single_range
+#ifndef pci_dma_mapping_error
+#define pci_dma_mapping_error _kc_pci_dma_mapping_error
+static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
+{
+	return dma_addr == 0;
+}
+#endif
+#endif /* 2.6.5 => 2.6.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...);
+#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args)
+#endif /* < 2.6.4 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
+/* taken from 2.6 include/linux/bitmap.h */
+#undef bitmap_zero
+#define bitmap_zero _kc_bitmap_zero
+static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
+{
+        if (nbits <= BITS_PER_LONG)
+                *dst = 0UL;
+        else {
+                int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+                memset(dst, 0, len);
+        }
+}
+#define page_to_nid(x) 0
+
+#endif /* < 2.6.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
+#undef if_mii
+#define if_mii _kc_if_mii
+static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
+{
+	return (struct mii_ioctl_data *) &rq->ifr_ifru;
+}
+
+#ifndef __force
+#define __force
+#endif
+#endif /* < 2.6.7 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+#ifndef PCI_EXP_DEVCTL
+#define PCI_EXP_DEVCTL 8
+#endif
+#ifndef PCI_EXP_DEVCTL_CERE
+#define PCI_EXP_DEVCTL_CERE 0x0001
+#endif
+#define PCI_EXP_FLAGS		2	/* Capabilities register */
+#define PCI_EXP_FLAGS_VERS	0x000f	/* Capability version */
+#define PCI_EXP_FLAGS_TYPE	0x00f0	/* Device/Port type */
+#define  PCI_EXP_TYPE_ENDPOINT	0x0	/* Express Endpoint */
+#define  PCI_EXP_TYPE_LEG_END	0x1	/* Legacy Endpoint */
+#define  PCI_EXP_TYPE_ROOT_PORT 0x4	/* Root Port */
+#define  PCI_EXP_TYPE_DOWNSTREAM 0x6	/* Downstream Port */
+#define PCI_EXP_FLAGS_SLOT	0x0100	/* Slot implemented */
+#define PCI_EXP_DEVCAP		4	/* Device capabilities */
+#define PCI_EXP_DEVSTA		10	/* Device Status */
+#define msleep(x)	do { set_current_state(TASK_UNINTERRUPTIBLE); \
+				schedule_timeout((x * HZ)/1000 + 2); \
+			} while (0)
+
+#endif /* < 2.6.8 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
+#include <net/dsfield.h>
+#define __iomem
+
+#ifndef kcalloc
+#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
+void *_kc_kzalloc(size_t size, int flags);
+#endif
+#define MSEC_PER_SEC    1000L
+static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
+{
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+	return (MSEC_PER_SEC / HZ) * j;
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+	return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
+#else
+	return (j * MSEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
+{
+	if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
+		return MAX_JIFFY_OFFSET;
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+	return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+	return m * (HZ / MSEC_PER_SEC);
+#else
+	return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
+#endif
+}
+
+#define msleep_interruptible _kc_msleep_interruptible
+static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
+{
+	unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
+
+	while (timeout && !signal_pending(current)) {
+		__set_current_state(TASK_INTERRUPTIBLE);
+		timeout = schedule_timeout(timeout);
+	}
+	return _kc_jiffies_to_msecs(timeout);
+}
+
+/* Basic mode control register. */
+#define BMCR_SPEED1000		0x0040  /* MSB of Speed (1000)         */
+
+#ifndef __le16
+#define __le16 u16
+#endif
+#ifndef __le32
+#define __le32 u32
+#endif
+#ifndef __le64
+#define __le64 u64
+#endif
+#ifndef __be16
+#define __be16 u16
+#endif
+#ifndef __be32
+#define __be32 u32
+#endif
+#ifndef __be64
+#define __be64 u64
+#endif
+
+static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
+{
+	return (struct vlan_ethhdr *)skb->mac.raw;
+}
+
+/* Wake-On-Lan options. */
+#define WAKE_PHY		BIT(0)
+#define WAKE_UCAST		BIT(1)
+#define WAKE_MCAST		BIT(2)
+#define WAKE_BCAST		BIT(3)
+#define WAKE_ARP		BIT(4)
+#define WAKE_MAGIC		BIT(5)
+#define WAKE_MAGICSECURE	BIT(6) /* only meaningful if WAKE_MAGIC */
+
+#define skb_header_pointer _kc_skb_header_pointer
+static inline void *_kc_skb_header_pointer(const struct sk_buff *skb,
+					    int offset, int len, void *buffer)
+{
+	int hlen = skb_headlen(skb);
+
+	if (hlen - offset >= len)
+		return skb->data + offset;
+
+#ifdef MAX_SKB_FRAGS
+	if (skb_copy_bits(skb, offset, buffer, len) < 0)
+		return NULL;
+
+	return buffer;
+#else
+	return NULL;
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0
+#endif
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1
+#endif
+#ifndef NETDEV_TX_LOCKED
+#define NETDEV_TX_LOCKED -1
+#endif
+}
+
+#ifndef __bitwise
+#define __bitwise
+#endif
+#endif /* < 2.6.9 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
+#ifdef module_param_array_named
+#undef module_param_array_named
+#define module_param_array_named(name, array, type, nump, perm)          \
+	static struct kparam_array __param_arr_##name                    \
+	= { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
+	    sizeof(array[0]), array };                                   \
+	module_param_call(name, param_array_set, param_array_get,        \
+			  &__param_arr_##name, perm)
+#endif /* module_param_array_named */
+/*
+ * num_online is broken for all < 2.6.10 kernels.  This is needed to support
+ * Node module parameter of ixgbe.
+ */
+#undef num_online_nodes
+#define num_online_nodes(n) 1
+extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
+#undef node_online_map
+#define node_online_map _kcompat_node_online_map
+#define pci_get_class pci_find_class
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
+#define PCI_D0      0
+#define PCI_D1      1
+#define PCI_D2      2
+#define PCI_D3hot   3
+#define PCI_D3cold  4
+typedef int pci_power_t;
+#define pci_choose_state(pdev,state) state
+#define PMSG_SUSPEND 3
+#define PCI_EXP_LNKCTL	16
+
+#undef NETIF_F_LLTX
+
+#ifndef ARCH_HAS_PREFETCH
+#define prefetch(X)
+#endif
+
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN 2
+#endif
+
+#define KC_USEC_PER_SEC	1000000L
+#define usecs_to_jiffies _kc_usecs_to_jiffies
+static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
+{
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+	return (KC_USEC_PER_SEC / HZ) * j;
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+	return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
+#else
+	return (j * KC_USEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
+{
+	if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
+		return MAX_JIFFY_OFFSET;
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+	return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+	return m * (HZ / KC_USEC_PER_SEC);
+#else
+	return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
+#endif
+}
+
+#define PCI_EXP_LNKCAP		12	/* Link Capabilities */
+#define PCI_EXP_LNKSTA		18	/* Link Status */
+#define PCI_EXP_SLTCAP		20	/* Slot Capabilities */
+#define PCI_EXP_SLTCTL		24	/* Slot Control */
+#define PCI_EXP_SLTSTA		26	/* Slot Status */
+#define PCI_EXP_RTCTL		28	/* Root Control */
+#define PCI_EXP_RTCAP		30	/* Root Capabilities */
+#define PCI_EXP_RTSTA		32	/* Root Status */
+#endif /* < 2.6.11 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
+#include <linux/reboot.h>
+#define USE_REBOOT_NOTIFIER
+
+/* Generic MII registers. */
+#define MII_CTRL1000        0x09        /* 1000BASE-T control          */
+#define MII_STAT1000        0x0a        /* 1000BASE-T status           */
+/* Advertisement control register. */
+#define ADVERTISE_PAUSE_CAP     0x0400  /* Try for pause               */
+#define ADVERTISE_PAUSE_ASYM    0x0800  /* Try for asymmetric pause     */
+/* Link partner ability register. */
+#define LPA_PAUSE_CAP		0x0400	/* Can pause                   */
+#define LPA_PAUSE_ASYM		0x0800	/* Can pause asymetrically     */
+/* 1000BASE-T Control register */
+#define ADVERTISE_1000FULL      0x0200  /* Advertise 1000BASE-T full duplex */
+#define ADVERTISE_1000HALF	0x0100  /* Advertise 1000BASE-T half duplex */
+/* 1000BASE-T Status register */
+#define LPA_1000LOCALRXOK	0x2000	/* Link partner local receiver status */
+#define LPA_1000REMRXOK		0x1000	/* Link partner remote receiver status */
+
+#ifndef is_zero_ether_addr
+#define is_zero_ether_addr _kc_is_zero_ether_addr
+static inline int _kc_is_zero_ether_addr(const u8 *addr)
+{
+	return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
+}
+#endif /* is_zero_ether_addr */
+#ifndef is_multicast_ether_addr
+#define is_multicast_ether_addr _kc_is_multicast_ether_addr
+static inline int _kc_is_multicast_ether_addr(const u8 *addr)
+{
+	return addr[0] & 0x01;
+}
+#endif /* is_multicast_ether_addr */
+#endif /* < 2.6.12 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
+#ifndef kstrdup
+#define kstrdup _kc_kstrdup
+char *_kc_kstrdup(const char *s, unsigned int gfp);
+#endif
+#endif /* < 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+#define pm_message_t u32
+#ifndef kzalloc
+#define kzalloc _kc_kzalloc
+void *_kc_kzalloc(size_t size, int flags);
+#endif
+
+/* Generic MII registers. */
+#define MII_ESTATUS	    0x0f	/* Extended Status */
+/* Basic mode status register. */
+#define BMSR_ESTATEN		0x0100	/* Extended Status in R15 */
+/* Extended status register. */
+#define ESTATUS_1000_TFULL	0x2000	/* Can do 1000BT Full */
+#define ESTATUS_1000_THALF	0x1000	/* Can do 1000BT Half */
+
+#define SUPPORTED_Pause	        BIT(13)
+#define SUPPORTED_Asym_Pause	BIT(14)
+#define ADVERTISED_Pause	BIT(13)
+#define ADVERTISED_Asym_Pause	BIT(14)
+
+#if (!(RHEL_RELEASE_CODE && \
+       (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \
+       (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))))
+#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t))
+#define gfp_t unsigned
+#else
+typedef unsigned gfp_t;
+#endif
+#endif /* !RHEL4.3->RHEL5.0 */
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
+#ifdef CONFIG_X86_64
+#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir)       \
+	dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir))
+#define dma_sync_single_range_for_device(dev, addr, off, sz, dir)    \
+	dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir))
+#endif
+#endif
+#endif /* < 2.6.14 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
+#ifndef kfree_rcu
+/* this is placed here due to a lack of rcu_barrier in previous kernels */
+#define kfree_rcu(_ptr, _offset) kfree(_ptr)
+#endif /* kfree_rcu */
+#ifndef vmalloc_node
+#define vmalloc_node(a,b) vmalloc(a)
+#endif /* vmalloc_node*/
+
+#define setup_timer(_timer, _function, _data) \
+do { \
+	(_timer)->function = _function; \
+	(_timer)->data = _data; \
+	init_timer(_timer); \
+} while (0)
+#ifndef device_can_wakeup
+#define device_can_wakeup(dev)	(1)
+#endif
+#ifndef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val)	do{}while(0)
+#endif
+#ifndef device_init_wakeup
+#define device_init_wakeup(dev,val) do {} while (0)
+#endif
+static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2)
+{
+	const u16 *a = (const u16 *) addr1;
+	const u16 *b = (const u16 *) addr2;
+
+	return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
+}
+#undef compare_ether_addr
+#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2)
+#endif /* < 2.6.15 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
+#undef DEFINE_MUTEX
+#define DEFINE_MUTEX(x)	DECLARE_MUTEX(x)
+#define mutex_lock(x)	down_interruptible(x)
+#define mutex_unlock(x)	up(x)
+
+#ifndef ____cacheline_internodealigned_in_smp
+#ifdef CONFIG_SMP
+#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp
+#else
+#define ____cacheline_internodealigned_in_smp
+#endif /* CONFIG_SMP */
+#endif /* ____cacheline_internodealigned_in_smp */
+#undef HAVE_PCI_ERS
+#else /* 2.6.16 and above */
+#undef HAVE_PCI_ERS
+#define HAVE_PCI_ERS
+#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) )
+#ifdef device_can_wakeup
+#undef device_can_wakeup
+#endif /* device_can_wakeup */
+#define device_can_wakeup(dev) 1
+#endif /* SLE_VERSION(10,4,0) */
+#endif /* < 2.6.16 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
+#ifndef dev_notice
+#define dev_notice(dev, fmt, args...)            \
+	dev_printk(KERN_NOTICE, dev, fmt, ## args)
+#endif
+
+#ifndef first_online_node
+#define first_online_node 0
+#endif
+#ifndef NET_SKB_PAD
+#define NET_SKB_PAD 16
+#endif
+#endif /* < 2.6.17 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
+
+#ifndef IRQ_HANDLED
+#define irqreturn_t void
+#define IRQ_HANDLED
+#define IRQ_NONE
+#endif
+
+#ifndef IRQF_PROBE_SHARED
+#ifdef SA_PROBEIRQ
+#define IRQF_PROBE_SHARED SA_PROBEIRQ
+#else
+#define IRQF_PROBE_SHARED 0
+#endif
+#endif
+
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef skb_is_gso
+#ifdef NETIF_F_TSO
+#define skb_is_gso _kc_skb_is_gso
+static inline int _kc_skb_is_gso(const struct sk_buff *skb)
+{
+	return skb_shinfo(skb)->gso_size;
+}
+#else
+#define skb_is_gso(a) 0
+#endif
+#endif
+
+#ifndef resource_size_t
+#define resource_size_t unsigned long
+#endif
+
+#ifdef skb_pad
+#undef skb_pad
+#endif
+#define skb_pad(x,y) _kc_skb_pad(x, y)
+int _kc_skb_pad(struct sk_buff *skb, int pad);
+#ifdef skb_padto
+#undef skb_padto
+#endif
+#define skb_padto(x,y) _kc_skb_padto(x, y)
+static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len)
+{
+	unsigned int size = skb->len;
+	if(likely(size >= len))
+		return 0;
+	return _kc_skb_pad(skb, len - size);
+}
+
+#ifndef DECLARE_PCI_UNMAP_ADDR
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
+	dma_addr_t ADDR_NAME
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
+	u32 LEN_NAME
+#define pci_unmap_addr(PTR, ADDR_NAME) \
+	((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+	(((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME) \
+	((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
+	(((PTR)->LEN_NAME) = (VAL))
+#endif /* DECLARE_PCI_UNMAP_ADDR */
+#endif /* < 2.6.18 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+enum pcie_link_width {
+	PCIE_LNK_WIDTH_RESRV    = 0x00,
+	PCIE_LNK_X1             = 0x01,
+	PCIE_LNK_X2             = 0x02,
+	PCIE_LNK_X4             = 0x04,
+	PCIE_LNK_X8             = 0x08,
+	PCIE_LNK_X12            = 0x0C,
+	PCIE_LNK_X16            = 0x10,
+	PCIE_LNK_X32            = 0x20,
+	PCIE_LNK_WIDTH_UNKNOWN  = 0xFF,
+};
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0)))
+#define i_private u.generic_ip
+#endif /* >= RHEL 5.0 */
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef __ALIGN_MASK
+#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#endif
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
+#if (!((RHEL_RELEASE_CODE && \
+        ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \
+          RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \
+         (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0))))))
+typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
+#endif
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+#undef CONFIG_INET_LRO
+#undef CONFIG_INET_LRO_MODULE
+#endif
+typedef irqreturn_t (*new_handler_t)(int, void*);
+static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
+#else /* 2.4.x */
+typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
+typedef void (*new_handler_t)(int, void*);
+static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
+#endif /* >= 2.5.x */
+{
+	irq_handler_t new_handler = (irq_handler_t) handler;
+	return request_irq(irq, new_handler, flags, devname, dev_id);
+}
+
+#undef request_irq
+#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
+
+#define irq_handler_t new_handler_t
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) )
+#ifndef skb_checksum_help
+static inline int __kc_skb_checksum_help(struct sk_buff *skb)
+{
+	return skb_checksum_help(skb, 0);
+}
+#define skb_checksum_help(skb) __kc_skb_checksum_help((skb))
+#endif
+#endif /* < 2.6.19 && >= 2.6.11 */
+
+/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
+#define PCIE_CONFIG_SPACE_LEN 256
+#define PCI_CONFIG_SPACE_LEN 64
+#define PCIE_LINK_STATUS 0x12
+#define pci_config_space_ich8lan() do {} while(0)
+#undef pci_save_state
+int _kc_pci_save_state(struct pci_dev *);
+#define pci_save_state(pdev) _kc_pci_save_state(pdev)
+#undef pci_restore_state
+void _kc_pci_restore_state(struct pci_dev *);
+#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
+
+#ifdef HAVE_PCI_ERS
+#undef free_netdev
+void _kc_free_netdev(struct net_device *);
+#define free_netdev(netdev) _kc_free_netdev(netdev)
+#endif
+static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev)
+{
+	return 0;
+}
+#define pci_disable_pcie_error_reporting(dev) do {} while (0)
+#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
+
+void *_kc_kmemdup(const void *src, size_t len, unsigned gfp);
+#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp)
+#ifndef bool
+#define bool _Bool
+#define true 1
+#define false 0
+#endif
+#else /* 2.6.19 */
+#include <linux/aer.h>
+#include <linux/pci_hotplug.h>
+
+#define NEW_SKB_CSUM_HELP
+#endif /* < 2.6.19 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
+#undef INIT_WORK
+#define INIT_WORK(_work, _func) \
+do { \
+	INIT_LIST_HEAD(&(_work)->entry); \
+	(_work)->pending = 0; \
+	(_work)->func = (void (*)(void *))_func; \
+	(_work)->data = _work; \
+	init_timer(&(_work)->timer); \
+} while (0)
+#endif
+
+#ifndef PCI_VDEVICE
+#define PCI_VDEVICE(ven, dev)        \
+	PCI_VENDOR_ID_##ven, (dev),  \
+	PCI_ANY_ID, PCI_ANY_ID, 0, 0
+#endif
+
+#ifndef PCI_VENDOR_ID_INTEL
+#define PCI_VENDOR_ID_INTEL 0x8086
+#endif
+
+#ifndef round_jiffies
+#define round_jiffies(x) x
+#endif
+
+#define csum_offset csum
+
+#define HAVE_EARLY_VMALLOC_NODE
+#define dev_to_node(dev) -1
+#undef set_dev_node
+/* remove compiler warning with b=b, for unused variable */
+#define set_dev_node(a, b) do { (b) = (b); } while(0)
+
+#if (!(RHEL_RELEASE_CODE && \
+       (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
+         (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
+        (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \
+     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
+typedef __u16 __bitwise __sum16;
+typedef __u32 __bitwise __wsum;
+#endif
+
+#if (!(RHEL_RELEASE_CODE && \
+       (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
+         (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
+        (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \
+     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
+static inline __wsum csum_unfold(__sum16 n)
+{
+	return (__force __wsum)n;
+}
+#endif
+
+#else /* < 2.6.20 */
+#define HAVE_DEVICE_NUMA_NODE
+#endif /* < 2.6.20 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+#define to_net_dev(class) container_of(class, struct net_device, class_dev)
+#define NETDEV_CLASS_DEV
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
+#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
+#define vlan_group_set_device(vg, id, dev)		\
+	do {						\
+		if (vg) vg->vlan_devices[id] = dev;	\
+	} while (0)
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
+#define pci_channel_offline(pdev) (pdev->error_state && \
+	pdev->error_state != pci_channel_io_normal)
+#define pci_request_selected_regions(pdev, bars, name) \
+        pci_request_regions(pdev, name)
+#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
+
+#ifndef __aligned
+#define __aligned(x)			__attribute__((aligned(x)))
+#endif
+
+struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev);
+#define netdev_to_dev(netdev)	\
+	pci_dev_to_dev(_kc_netdev_to_pdev(netdev))
+#define devm_kzalloc(dev, size, flags) kzalloc(size, flags)
+#define devm_kfree(dev, p) kfree(p)
+#else /* 2.6.21 */
+static inline struct device *netdev_to_dev(struct net_device *netdev)
+{
+	return &netdev->dev;
+}
+
+#endif /* < 2.6.21 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+#define tcp_hdr(skb) (skb->h.th)
+#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
+#define skb_transport_offset(skb) (skb->h.raw - skb->data)
+#define skb_transport_header(skb) (skb->h.raw)
+#define ipv6_hdr(skb) (skb->nh.ipv6h)
+#define ip_hdr(skb) (skb->nh.iph)
+#define skb_network_offset(skb) (skb->nh.raw - skb->data)
+#define skb_network_header(skb) (skb->nh.raw)
+#define skb_tail_pointer(skb) skb->tail
+#define skb_reset_tail_pointer(skb) \
+	do { \
+		skb->tail = skb->data; \
+	} while (0)
+#define skb_set_tail_pointer(skb, offset) \
+	do { \
+		skb->tail = skb->data + offset; \
+	} while (0)
+#define skb_copy_to_linear_data(skb, from, len) \
+				memcpy(skb->data, from, len)
+#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
+				memcpy(skb->data + offset, from, len)
+#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
+#define pci_register_driver pci_module_init
+#define skb_mac_header(skb) skb->mac.raw
+
+#ifdef NETIF_F_MULTI_QUEUE
+#ifndef alloc_etherdev_mq
+#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
+#endif
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef ETH_FCS_LEN
+#define ETH_FCS_LEN 4
+#endif
+#define cancel_work_sync(x) flush_scheduled_work()
+#ifndef udp_hdr
+#define udp_hdr _udp_hdr
+static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
+{
+	return (struct udphdr *)skb_transport_header(skb);
+}
+#endif
+
+#ifdef cpu_to_be16
+#undef cpu_to_be16
+#endif
+#define cpu_to_be16(x) __constant_htons(x)
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)))
+enum {
+	DUMP_PREFIX_NONE,
+	DUMP_PREFIX_ADDRESS,
+	DUMP_PREFIX_OFFSET
+};
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */
+#ifndef hex_asc
+#define hex_asc(x)	"0123456789abcdef"[x]
+#endif
+#include <linux/ctype.h>
+void _kc_print_hex_dump(const char *level, const char *prefix_str,
+			int prefix_type, int rowsize, int groupsize,
+			const void *buf, size_t len, bool ascii);
+#define print_hex_dump(lvl, s, t, r, g, b, l, a) \
+		_kc_print_hex_dump(lvl, s, t, r, g, b, l, a)
+#ifndef ADVERTISED_2500baseX_Full
+#define ADVERTISED_2500baseX_Full BIT(15)
+#endif
+#ifndef SUPPORTED_2500baseX_Full
+#define SUPPORTED_2500baseX_Full BIT(15)
+#endif
+
+#ifndef ETH_P_PAUSE
+#define ETH_P_PAUSE 0x8808
+#endif
+
+static inline int compound_order(struct page *page)
+{
+	return 0;
+}
+
+#define __must_be_array(a) 0
+
+#ifndef SKB_WITH_OVERHEAD
+#define SKB_WITH_OVERHEAD(X) \
+	((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#endif
+#else /* 2.6.22 */
+#define ETH_TYPE_TRANS_SETS_DEV
+#define HAVE_NETDEV_STATS_IN_NETDEV
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
+#endif /* > 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
+#define netif_subqueue_stopped(_a, _b) 0
+#ifndef PTR_ALIGN
+#define PTR_ALIGN(p, a)         ((typeof(p))ALIGN((unsigned long)(p), (a)))
+#endif
+
+#ifndef CONFIG_PM_SLEEP
+#define CONFIG_PM_SLEEP	CONFIG_PM
+#endif
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) )
+#define HAVE_ETHTOOL_GET_PERM_ADDR
+#endif /* 2.6.14 through 2.6.22 */
+
+static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom)
+{
+	int delta = 0;
+
+	if (headroom > (skb->data - skb->head))
+		delta = headroom - (skb->data - skb->head);
+
+	if (delta || skb_header_cloned(skb))
+		return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
+					GFP_ATOMIC);
+	return 0;
+}
+#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h))
+#endif /* < 2.6.23 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+#ifndef ETH_FLAG_LRO
+#define ETH_FLAG_LRO NETIF_F_LRO
+#endif
+
+#ifndef ACCESS_ONCE
+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+#endif
+
+/* if GRO is supported then the napi struct must already exist */
+#ifndef NETIF_F_GRO
+/* NAPI API changes in 2.6.24 break everything */
+struct napi_struct {
+	/* used to look up the real NAPI polling routine */
+	int (*poll)(struct napi_struct *, int);
+	struct net_device *dev;
+	int weight;
+};
+#endif
+
+#ifdef NAPI
+int __kc_adapter_clean(struct net_device *, int *);
+/* The following definitions are multi-queue aware, and thus we have a driver
+ * define list which determines which drivers support multiple queues, and
+ * thus need these stronger defines. If a driver does not support multi-queue
+ * functionality, you don't need to add it to this list.
+ */
+struct net_device *napi_to_poll_dev(const struct napi_struct *napi);
+
+static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+					  int (*poll)(struct napi_struct *, int), int weight)
+{
+	struct net_device *poll_dev = napi_to_poll_dev(napi);
+	poll_dev->poll = __kc_adapter_clean;
+	poll_dev->priv = napi;
+	poll_dev->weight = weight;
+	set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state);
+	set_bit(__LINK_STATE_START, &poll_dev->state);
+	dev_hold(poll_dev);
+	napi->poll = poll;
+	napi->weight = weight;
+	napi->dev = dev;
+}
+#define netif_napi_add __kc_mq_netif_napi_add
+
+static inline void __kc_mq_netif_napi_del(struct napi_struct *napi)
+{
+	struct net_device *poll_dev = napi_to_poll_dev(napi);
+	WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state));
+	dev_put(poll_dev);
+	memset(poll_dev, 0, sizeof(struct net_device));
+}
+
+#define netif_napi_del __kc_mq_netif_napi_del
+
+static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi)
+{
+	return netif_running(napi->dev) &&
+		netif_rx_schedule_prep(napi_to_poll_dev(napi));
+}
+#define napi_schedule_prep __kc_mq_napi_schedule_prep
+
+static inline void __kc_mq_napi_schedule(struct napi_struct *napi)
+{
+	if (napi_schedule_prep(napi))
+		__netif_rx_schedule(napi_to_poll_dev(napi));
+}
+#define napi_schedule __kc_mq_napi_schedule
+
+#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi))
+#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi))
+#ifdef CONFIG_SMP
+static inline void napi_synchronize(const struct napi_struct *n)
+{
+	struct net_device *dev = napi_to_poll_dev(n);
+
+	while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
+		/* No hurry. */
+		msleep(1);
+	}
+}
+#else
+#define napi_synchronize(n)	barrier()
+#endif /* CONFIG_SMP */
+#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
+static inline void _kc_napi_complete(struct napi_struct *napi)
+{
+#ifdef NETIF_F_GRO
+	napi_gro_flush(napi);
+#endif
+	netif_rx_complete(napi_to_poll_dev(napi));
+}
+#define napi_complete _kc_napi_complete
+#else /* NAPI */
+
+/* The following definitions are only used if we don't support NAPI at all. */
+
+static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+				  int (*poll)(struct napi_struct *, int), int weight)
+{
+	dev->poll = poll;
+	dev->weight = weight;
+	napi->poll = poll;
+	napi->weight = weight;
+	napi->dev = dev;
+}
+#define netif_napi_del(_a) do {} while (0)
+#endif /* NAPI */
+
+#undef dev_get_by_name
+#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
+#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
+#ifndef DMA_BIT_MASK
+#define DMA_BIT_MASK(n)	(((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1))
+#endif
+
+#ifdef NETIF_F_TSO6
+#define skb_is_gso_v6 _kc_skb_is_gso_v6
+static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
+{
+	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
+}
+#endif /* NETIF_F_TSO6 */
+
+#ifndef KERN_CONT
+#define KERN_CONT	""
+#endif
+#ifndef pr_err
+#define pr_err(fmt, arg...) \
+	printk(KERN_ERR fmt, ##arg)
+#endif
+
+#ifndef rounddown_pow_of_two
+#define rounddown_pow_of_two(n) \
+	__builtin_constant_p(n) ? ( \
+		(n == 1) ? 0 : \
+		(1UL << ilog2(n))) : \
+		(1UL << (fls_long(n) - 1))
+#endif
+
+#else /* < 2.6.24 */
+#define HAVE_ETHTOOL_GET_SSET_COUNT
+#define HAVE_NETDEV_NAPI_LIST
+#endif /* < 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
+#define INCLUDE_PM_QOS_PARAMS_H
+#include <linux/pm_qos_params.h>
+#else /* >= 3.2.0 */
+#include <linux/pm_qos.h>
+#endif /* else >= 3.2.0 */
+#endif /* > 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
+#define PM_QOS_CPU_DMA_LATENCY	1
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
+#include <linux/latency.h>
+#define PM_QOS_DEFAULT_VALUE	INFINITE_LATENCY
+#define pm_qos_add_requirement(pm_qos_class, name, value) \
+		set_acceptable_latency(name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name) \
+		remove_acceptable_latency(name)
+#define pm_qos_update_requirement(pm_qos_class, name, value) \
+		modify_acceptable_latency(name, value)
+#else
+#define PM_QOS_DEFAULT_VALUE	-1
+#define pm_qos_add_requirement(pm_qos_class, name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name)
+#define pm_qos_update_requirement(pm_qos_class, name, value) { \
+	if (value != PM_QOS_DEFAULT_VALUE) { \
+		printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
+			pci_name(adapter->pdev)); \
+	} \
+}
+
+#endif /* > 2.6.18 */
+
+#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
+
+#ifndef DEFINE_PCI_DEVICE_TABLE
+#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[]
+#endif /* DEFINE_PCI_DEVICE_TABLE */
+
+#ifndef strict_strtol
+#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r)
+static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res)
+{
+	/* adapted from strict_strtoul() in 2.6.25 */
+	char *tail;
+	long val;
+	size_t len;
+
+	*res = 0;
+	len = strlen(buf);
+	if (!len)
+		return -EINVAL;
+	val = simple_strtol(buf, &tail, base);
+	if (tail == buf)
+		return -EINVAL;
+	if ((*tail == '\0') ||
+	    ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) {
+		*res = val;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+#endif
+
+#else /* < 2.6.25 */
+
+#endif /* < 2.6.25 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
+#ifndef clamp_t
+#define clamp_t(type, val, min, max) ({		\
+	type __val = (val);			\
+	type __min = (min);			\
+	type __max = (max);			\
+	__val = __val < __min ? __min : __val;	\
+	__val > __max ? __max : __val; })
+#endif /* clamp_t */
+#undef kzalloc_node
+#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags)
+
+void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
+#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
+#else /* < 2.6.26 */
+#define NETDEV_CAN_SET_GSO_MAX_SIZE
+#ifdef HAVE_PCI_ASPM_H
+#include <linux/pci-aspm.h>
+#endif
+#define HAVE_NETDEV_VLAN_FEATURES
+#ifndef PCI_EXP_LNKCAP_ASPMS
+#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
+#endif /* PCI_EXP_LNKCAP_ASPMS */
+#endif /* < 2.6.26 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
+static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
+					     __u32 speed)
+{
+	ep->speed = (__u16)speed;
+	/* ep->speed_hi = (__u16)(speed >> 16); */
+}
+#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
+
+static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep)
+{
+	/* no speed_hi before 2.6.27, and probably no need for it yet */
+	return (__u32)ep->speed;
+}
+#define ethtool_cmd_speed _kc_ethtool_cmd_speed
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM))
+#define ANCIENT_PM 1
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \
+       (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \
+       defined(CONFIG_PM_SLEEP))
+#define NEWER_PM 1
+#endif
+#if defined(ANCIENT_PM) || defined(NEWER_PM)
+#undef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val) \
+	do { \
+		u16 pmc = 0; \
+		int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
+		if (pm) { \
+			pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
+				&pmc); \
+		} \
+		(dev)->power.can_wakeup = !!(pmc >> 11); \
+		(dev)->power.should_wakeup = (val && (pmc >> 11)); \
+	} while (0)
+#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
+#endif /* 2.6.15 through 2.6.27 */
+#ifndef netif_napi_del
+#define netif_napi_del(_a) do {} while (0)
+#ifdef NAPI
+#ifdef CONFIG_NETPOLL
+#undef netif_napi_del
+#define netif_napi_del(_a) list_del(&(_a)->dev_list);
+#endif
+#endif
+#endif /* netif_napi_del */
+#ifdef dma_mapping_error
+#undef dma_mapping_error
+#endif
+#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr)
+
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+#define HAVE_TX_MQ
+#endif
+
+#ifndef DMA_ATTR_WEAK_ORDERING
+#define DMA_ATTR_WEAK_ORDERING 0
+#endif
+
+#ifdef HAVE_TX_MQ
+void _kc_netif_tx_stop_all_queues(struct net_device *);
+void _kc_netif_tx_wake_all_queues(struct net_device *);
+void _kc_netif_tx_start_all_queues(struct net_device *);
+#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
+#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
+#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
+#undef netif_stop_subqueue
+#define netif_stop_subqueue(_ndev,_qi) do { \
+	if (netif_is_multiqueue((_ndev))) \
+		netif_stop_subqueue((_ndev), (_qi)); \
+	else \
+		netif_stop_queue((_ndev)); \
+	} while (0)
+#undef netif_start_subqueue
+#define netif_start_subqueue(_ndev,_qi) do { \
+	if (netif_is_multiqueue((_ndev))) \
+		netif_start_subqueue((_ndev), (_qi)); \
+	else \
+		netif_start_queue((_ndev)); \
+	} while (0)
+#else /* HAVE_TX_MQ */
+#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
+#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )
+#define netif_tx_start_all_queues(a) netif_start_queue(a)
+#else
+#define netif_tx_start_all_queues(a) do {} while (0)
+#endif
+#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
+#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
+#endif /* HAVE_TX_MQ */
+#ifndef NETIF_F_MULTI_QUEUE
+#define NETIF_F_MULTI_QUEUE 0
+#define netif_is_multiqueue(a) 0
+#define netif_wake_subqueue(a, b)
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef __WARN_printf
+void __kc_warn_slowpath(const char *file, const int line,
+		const char *fmt, ...) __attribute__((format(printf, 3, 4)));
+#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg)
+#endif /* __WARN_printf */
+
+#ifndef WARN
+#define WARN(condition, format...) ({						\
+	int __ret_warn_on = !!(condition);				\
+	if (unlikely(__ret_warn_on))					\
+		__WARN_printf(format);					\
+	unlikely(__ret_warn_on);					\
+})
+#endif /* WARN */
+#undef HAVE_IXGBE_DEBUG_FS
+#undef HAVE_IGB_DEBUG_FS
+#define qdisc_reset_all_tx(a)
+#else /* < 2.6.27 */
+#include <net/sch_generic.h>
+#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
+static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
+					     __u32 speed)
+{
+	ep->speed = (__u16)(speed & 0xFFFF);
+	ep->speed_hi = (__u16)(speed >> 16);
+}
+#define HAVE_TX_MQ
+#define HAVE_NETDEV_SELECT_QUEUE
+#ifdef CONFIG_DEBUG_FS
+#define HAVE_IXGBE_DEBUG_FS
+#define HAVE_IGB_DEBUG_FS
+#endif /* CONFIG_DEBUG_FS */
+#endif /* < 2.6.27 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
+#define pci_ioremap_bar(pdev, bar)	ioremap(pci_resource_start(pdev, bar), \
+					        pci_resource_len(pdev, bar))
+#define pci_wake_from_d3 _kc_pci_wake_from_d3
+#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
+int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
+int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
+#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
+#ifndef __skb_queue_head_init
+static inline void __kc_skb_queue_head_init(struct sk_buff_head *list)
+{
+	list->prev = list->next = (struct sk_buff *)list;
+	list->qlen = 0;
+}
+#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
+#endif
+
+#define PCI_EXP_DEVCAP2		36	/* Device Capabilities 2 */
+#define PCI_EXP_DEVCTL2		40	/* Device Control 2 */
+
+#define PCI_EXP_DEVCAP_FLR	0x10000000 /* Function Level Reset */
+#define PCI_EXP_DEVCTL_BCR_FLR	0x8000 /* Bridge Configuration Retry / FLR */
+
+#endif /* < 2.6.28 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
+#ifndef swap
+#define swap(a, b) \
+	do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+#endif
+#define pci_request_selected_regions_exclusive(pdev, bars, name) \
+		pci_request_selected_regions(pdev, bars, name)
+#ifndef CONFIG_NR_CPUS
+#define CONFIG_NR_CPUS 1
+#endif /* CONFIG_NR_CPUS */
+#ifndef pcie_aspm_enabled
+#define pcie_aspm_enabled()   (1)
+#endif /* pcie_aspm_enabled */
+
+#define  PCI_EXP_SLTSTA_PDS	0x0040	/* Presence Detect State */
+
+#ifndef PCI_EXP_LNKSTA_CLS
+#define  PCI_EXP_LNKSTA_CLS    0x000f  /* Current Link Speed */
+#endif
+#ifndef PCI_EXP_LNKSTA_NLW
+#define  PCI_EXP_LNKSTA_NLW    0x03f0  /* Negotiated Link Width */
+#endif
+
+#ifndef pci_clear_master
+void _kc_pci_clear_master(struct pci_dev *dev);
+#define pci_clear_master(dev)	_kc_pci_clear_master(dev)
+#endif
+
+#ifndef PCI_EXP_LNKCTL_ASPMC
+#define  PCI_EXP_LNKCTL_ASPMC	0x0003	/* ASPM Control */
+#endif
+
+#ifndef PCI_EXP_LNKCAP_MLW
+#define PCI_EXP_LNKCAP_MLW	0x000003f0 /* Maximum Link Width */
+#endif
+
+#else /* < 2.6.29 */
+#ifndef HAVE_NET_DEVICE_OPS
+#define HAVE_NET_DEVICE_OPS
+#endif
+#ifdef CONFIG_DCB
+#define HAVE_PFC_MODE_ENABLE
+#endif /* CONFIG_DCB */
+#endif /* < 2.6.29 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
+#define NO_PTP_SUPPORT
+#define skb_rx_queue_recorded(a) false
+#define skb_get_rx_queue(a) 0
+#define skb_record_rx_queue(a, b) do {} while (0)
+#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues)
+#ifndef CONFIG_PCI_IOV
+#undef pci_enable_sriov
+#define pci_enable_sriov(a, b) -ENOTSUPP
+#undef pci_disable_sriov
+#define pci_disable_sriov(a) do {} while (0)
+#endif /* CONFIG_PCI_IOV */
+#ifndef pr_cont
+#define pr_cont(fmt, ...) \
+	printk(KERN_CONT fmt, ##__VA_ARGS__)
+#endif /* pr_cont */
+static inline void _kc_synchronize_irq(unsigned int a)
+{
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
+	synchronize_irq();
+#else /* < 2.5.28 */
+	synchronize_irq(a);
+#endif /* < 2.5.28 */
+}
+#undef synchronize_irq
+#define synchronize_irq(a) _kc_synchronize_irq(a)
+
+#define PCI_EXP_LNKCTL2		48	/* Link Control 2 */
+
+#ifdef nr_cpus_node
+#undef nr_cpus_node
+#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
+#endif
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5))
+#define HAVE_PCI_DEV_IS_VIRTFN_BIT
+#endif /* RHEL >= 5.5 */
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5)))
+static inline bool pci_is_root_bus(struct pci_bus *pbus)
+{
+	return !(pbus->parent);
+}
+#endif
+
+#else /* < 2.6.30 */
+#define HAVE_ASPM_QUIRKS
+#define HAVE_PCI_DEV_IS_VIRTFN_BIT
+#endif /* < 2.6.30 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) )
+#define ETH_P_1588 0x88F7
+#define ETH_P_FIP  0x8914
+#ifndef netdev_uc_count
+#define netdev_uc_count(dev) ((dev)->uc_count)
+#endif
+#ifndef netdev_for_each_uc_addr
+#define netdev_for_each_uc_addr(uclist, dev) \
+	for (uclist = dev->uc_list; uclist; uclist = uclist->next)
+#endif
+#ifndef PORT_OTHER
+#define PORT_OTHER 0xff
+#endif
+#ifndef MDIO_PHY_ID_PRTAD
+#define MDIO_PHY_ID_PRTAD 0x03e0
+#endif
+#ifndef MDIO_PHY_ID_DEVAD
+#define MDIO_PHY_ID_DEVAD 0x001f
+#endif
+#ifndef skb_dst
+#define skb_dst(s) ((s)->dst)
+#endif
+
+#ifndef SUPPORTED_1000baseKX_Full
+#define SUPPORTED_1000baseKX_Full	BIT(17)
+#endif
+#ifndef SUPPORTED_10000baseKX4_Full
+#define SUPPORTED_10000baseKX4_Full	BIT(18)
+#endif
+#ifndef SUPPORTED_10000baseKR_Full
+#define SUPPORTED_10000baseKR_Full	BIT(19)
+#endif
+
+#ifndef ADVERTISED_1000baseKX_Full
+#define ADVERTISED_1000baseKX_Full	BIT(17)
+#endif
+#ifndef ADVERTISED_10000baseKX4_Full
+#define ADVERTISED_10000baseKX4_Full	BIT(18)
+#endif
+#ifndef ADVERTISED_10000baseKR_Full
+#define ADVERTISED_10000baseKR_Full	BIT(19)
+#endif
+
+static inline unsigned long dev_trans_start(struct net_device *dev)
+{
+	return dev->trans_start;
+}
+#else /* < 2.6.31 */
+#ifndef HAVE_NETDEV_STORAGE_ADDRESS
+#define HAVE_NETDEV_STORAGE_ADDRESS
+#endif
+#ifndef HAVE_NETDEV_HW_ADDR
+#define HAVE_NETDEV_HW_ADDR
+#endif
+#ifndef HAVE_TRANS_START_IN_QUEUE
+#define HAVE_TRANS_START_IN_QUEUE
+#endif
+#ifndef HAVE_INCLUDE_LINUX_MDIO_H
+#define HAVE_INCLUDE_LINUX_MDIO_H
+#endif
+#include <linux/mdio.h>
+#endif /* < 2.6.31 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) )
+#undef netdev_tx_t
+#define netdev_tx_t int
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef NETIF_F_FCOE_MTU
+#define NETIF_F_FCOE_MTU       BIT(26)
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+static inline int _kc_pm_runtime_get_sync()
+{
+	return 1;
+}
+#define pm_runtime_get_sync(dev)	_kc_pm_runtime_get_sync()
+#else /* 2.6.0 => 2.6.32 */
+static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev)
+{
+	return 1;
+}
+#ifndef pm_runtime_get_sync
+#define pm_runtime_get_sync(dev)	_kc_pm_runtime_get_sync(dev)
+#endif
+#endif /* 2.6.0 => 2.6.32 */
+#ifndef pm_runtime_put
+#define pm_runtime_put(dev)		do {} while (0)
+#endif
+#ifndef pm_runtime_put_sync
+#define pm_runtime_put_sync(dev)	do {} while (0)
+#endif
+#ifndef pm_runtime_resume
+#define pm_runtime_resume(dev)		do {} while (0)
+#endif
+#ifndef pm_schedule_suspend
+#define pm_schedule_suspend(dev, t)	do {} while (0)
+#endif
+#ifndef pm_runtime_set_suspended
+#define pm_runtime_set_suspended(dev)	do {} while (0)
+#endif
+#ifndef pm_runtime_disable
+#define pm_runtime_disable(dev)		do {} while (0)
+#endif
+#ifndef pm_runtime_put_noidle
+#define pm_runtime_put_noidle(dev)	do {} while (0)
+#endif
+#ifndef pm_runtime_set_active
+#define pm_runtime_set_active(dev)	do {} while (0)
+#endif
+#ifndef pm_runtime_enable
+#define pm_runtime_enable(dev)	do {} while (0)
+#endif
+#ifndef pm_runtime_get_noresume
+#define pm_runtime_get_noresume(dev)	do {} while (0)
+#endif
+#else /* < 2.6.32 */
+#if (RHEL_RELEASE_CODE && \
+     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
+#define HAVE_RHEL6_NET_DEVICE_EXTENDED
+#endif /* RHEL >= 6.2 && RHEL < 7.0 */
+#if (RHEL_RELEASE_CODE && \
+     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
+#define HAVE_RHEL6_NET_DEVICE_OPS_EXT
+#define HAVE_NDO_SET_FEATURES
+#endif /* RHEL >= 6.6 && RHEL < 7.0 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
+#define HAVE_NETDEV_OPS_FCOE_ENABLE
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_OPS_GETAPP
+#define HAVE_DCBNL_OPS_GETAPP
+#endif
+#endif /* CONFIG_DCB */
+#include <linux/pm_runtime.h>
+/* IOV bad DMA target work arounds require at least this kernel rev support */
+#define HAVE_PCIE_TYPE
+#endif /* < 2.6.32 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
+#ifndef pci_pcie_cap
+#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP)
+#endif
+#ifndef IPV4_FLOW
+#define IPV4_FLOW 0x10
+#endif /* IPV4_FLOW */
+#ifndef IPV6_FLOW
+#define IPV6_FLOW 0x11
+#endif /* IPV6_FLOW */
+/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */
+#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \
+      (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) )
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
+#define HAVE_NETDEV_OPS_FCOE_GETWWN
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#endif /* RHEL6 or SLES11 SP1 */
+#ifndef __percpu
+#define __percpu
+#endif /* __percpu */
+
+#ifndef PORT_DA
+#define PORT_DA PORT_OTHER
+#endif /* PORT_DA */
+#ifndef PORT_NONE
+#define PORT_NONE PORT_OTHER
+#endif
+
+#if ((RHEL_RELEASE_CODE && \
+     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))
+#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE)
+#undef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)	dma_addr_t ADDR_NAME
+#undef DEFINE_DMA_UNMAP_LEN
+#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)		__u32 LEN_NAME
+#undef dma_unmap_addr
+#define dma_unmap_addr(PTR, ADDR_NAME)		((PTR)->ADDR_NAME)
+#undef dma_unmap_addr_set
+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)	(((PTR)->ADDR_NAME) = (VAL))
+#undef dma_unmap_len
+#define dma_unmap_len(PTR, LEN_NAME)		((PTR)->LEN_NAME)
+#undef dma_unmap_len_set
+#define dma_unmap_len_set(PTR, LEN_NAME, VAL)	(((PTR)->LEN_NAME) = (VAL))
+#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */
+#endif /* RHEL_RELEASE_CODE */
+
+#if (!(RHEL_RELEASE_CODE && \
+       (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \
+         (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \
+        ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \
+         (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))))
+static inline bool pci_is_pcie(struct pci_dev *dev)
+{
+	return !!pci_pcie_cap(dev);
+}
+#endif /* RHEL_RELEASE_CODE */
+
+#if (!(RHEL_RELEASE_CODE && \
+      (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))))
+#define sk_tx_queue_get(_sk) (-1)
+#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0)
+#endif /* !(RHEL >= 6.2) */
+
+#if (RHEL_RELEASE_CODE && \
+     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
+#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#define HAVE_ETHTOOL_SET_PHYS_ID
+#define HAVE_ETHTOOL_GET_TS_INFO
+#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5))
+#define HAVE_ETHTOOL_GSRSSH
+#define HAVE_RHEL6_SRIOV_CONFIGURE
+#define HAVE_RXFH_NONCONST
+#endif /* RHEL > 6.5 */
+#endif /* RHEL >= 6.4 && RHEL < 7.0 */
+
+#else /* < 2.6.33 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
+#define HAVE_NETDEV_OPS_FCOE_GETWWN
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#endif /* < 2.6.33 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+#ifndef pci_num_vf
+#define pci_num_vf(pdev) _kc_pci_num_vf(pdev)
+int _kc_pci_num_vf(struct pci_dev *dev);
+#endif
+#endif /* RHEL_RELEASE_CODE */
+
+#ifndef dev_is_pci
+#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
+#endif
+
+#ifndef ETH_FLAG_NTUPLE
+#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
+#endif
+
+#ifndef netdev_mc_count
+#define netdev_mc_count(dev) ((dev)->mc_count)
+#endif
+#ifndef netdev_mc_empty
+#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
+#endif
+#ifndef netdev_for_each_mc_addr
+#define netdev_for_each_mc_addr(mclist, dev) \
+	for (mclist = dev->mc_list; mclist; mclist = mclist->next)
+#endif
+#ifndef netdev_uc_count
+#define netdev_uc_count(dev) ((dev)->uc.count)
+#endif
+#ifndef netdev_uc_empty
+#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0)
+#endif
+#ifndef netdev_for_each_uc_addr
+#define netdev_for_each_uc_addr(ha, dev) \
+	list_for_each_entry(ha, &dev->uc.list, list)
+#endif
+#ifndef dma_set_coherent_mask
+#define dma_set_coherent_mask(dev,mask) \
+	pci_set_consistent_dma_mask(to_pci_dev(dev),(mask))
+#endif
+#ifndef pci_dev_run_wake
+#define pci_dev_run_wake(pdev)	(0)
+#endif
+
+/* netdev logging taken from include/linux/netdevice.h */
+#ifndef netdev_name
+static inline const char *_kc_netdev_name(const struct net_device *dev)
+{
+	if (dev->reg_state != NETREG_REGISTERED)
+		return "(unregistered net_device)";
+	return dev->name;
+}
+#define netdev_name(netdev)	_kc_netdev_name(netdev)
+#endif /* netdev_name */
+
+#undef netdev_printk
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+#define netdev_printk(level, netdev, format, args...)		\
+do {								\
+	struct pci_dev *pdev = _kc_netdev_to_pdev(netdev);	\
+	printk(level "%s: " format, pci_name(pdev), ##args);	\
+} while(0)
+#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+#define netdev_printk(level, netdev, format, args...)		\
+do {								\
+	struct pci_dev *pdev = _kc_netdev_to_pdev(netdev);	\
+	struct device *dev = pci_dev_to_dev(pdev);		\
+	dev_printk(level, dev, "%s: " format,			\
+		   netdev_name(netdev), ##args);		\
+} while(0)
+#else /* 2.6.21 => 2.6.34 */
+#define netdev_printk(level, netdev, format, args...)		\
+	dev_printk(level, (netdev)->dev.parent,			\
+		   "%s: " format,				\
+		   netdev_name(netdev), ##args)
+#endif /* <2.6.0 <2.6.21 <2.6.34 */
+#undef netdev_emerg
+#define netdev_emerg(dev, format, args...)			\
+	netdev_printk(KERN_EMERG, dev, format, ##args)
+#undef netdev_alert
+#define netdev_alert(dev, format, args...)			\
+	netdev_printk(KERN_ALERT, dev, format, ##args)
+#undef netdev_crit
+#define netdev_crit(dev, format, args...)			\
+	netdev_printk(KERN_CRIT, dev, format, ##args)
+#undef netdev_err
+#define netdev_err(dev, format, args...)			\
+	netdev_printk(KERN_ERR, dev, format, ##args)
+#undef netdev_warn
+#define netdev_warn(dev, format, args...)			\
+	netdev_printk(KERN_WARNING, dev, format, ##args)
+#undef netdev_notice
+#define netdev_notice(dev, format, args...)			\
+	netdev_printk(KERN_NOTICE, dev, format, ##args)
+#undef netdev_info
+#define netdev_info(dev, format, args...)			\
+	netdev_printk(KERN_INFO, dev, format, ##args)
+#undef netdev_dbg
+#if defined(DEBUG)
+#define netdev_dbg(__dev, format, args...)			\
+	netdev_printk(KERN_DEBUG, __dev, format, ##args)
+#elif defined(CONFIG_DYNAMIC_DEBUG)
+#define netdev_dbg(__dev, format, args...)			\
+do {								\
+	dynamic_dev_dbg((__dev)->dev.parent, "%s: " format,	\
+			netdev_name(__dev), ##args);		\
+} while (0)
+#else /* DEBUG */
+#define netdev_dbg(__dev, format, args...)			\
+({								\
+	if (0)							\
+		netdev_printk(KERN_DEBUG, __dev, format, ##args); \
+	0;							\
+})
+#endif /* DEBUG */
+
+#undef netif_printk
+#define netif_printk(priv, type, level, dev, fmt, args...)	\
+do {								\
+	if (netif_msg_##type(priv))				\
+		netdev_printk(level, (dev), fmt, ##args);	\
+} while (0)
+
+#undef netif_emerg
+#define netif_emerg(priv, type, dev, fmt, args...)		\
+	netif_level(emerg, priv, type, dev, fmt, ##args)
+#undef netif_alert
+#define netif_alert(priv, type, dev, fmt, args...)		\
+	netif_level(alert, priv, type, dev, fmt, ##args)
+#undef netif_crit
+#define netif_crit(priv, type, dev, fmt, args...)		\
+	netif_level(crit, priv, type, dev, fmt, ##args)
+#undef netif_err
+#define netif_err(priv, type, dev, fmt, args...)		\
+	netif_level(err, priv, type, dev, fmt, ##args)
+#undef netif_warn
+#define netif_warn(priv, type, dev, fmt, args...)		\
+	netif_level(warn, priv, type, dev, fmt, ##args)
+#undef netif_notice
+#define netif_notice(priv, type, dev, fmt, args...)		\
+	netif_level(notice, priv, type, dev, fmt, ##args)
+#undef netif_info
+#define netif_info(priv, type, dev, fmt, args...)		\
+	netif_level(info, priv, type, dev, fmt, ##args)
+#undef netif_dbg
+#define netif_dbg(priv, type, dev, fmt, args...)		\
+	netif_level(dbg, priv, type, dev, fmt, ##args)
+
+#ifdef SET_SYSTEM_SLEEP_PM_OPS
+#define HAVE_SYSTEM_SLEEP_PM_OPS
+#endif
+
+#ifndef for_each_set_bit
+#define for_each_set_bit(bit, addr, size) \
+	for ((bit) = find_first_bit((addr), (size)); \
+		(bit) < (size); \
+		(bit) = find_next_bit((addr), (size), (bit) + 1))
+#endif /* for_each_set_bit */
+
+#ifndef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN
+#define dma_unmap_addr pci_unmap_addr
+#define dma_unmap_addr_set pci_unmap_addr_set
+#define dma_unmap_len pci_unmap_len
+#define dma_unmap_len_set pci_unmap_len_set
+#endif /* DEFINE_DMA_UNMAP_ADDR */
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3))
+#ifdef IGB_HWMON
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define sysfs_attr_init(attr)				\
+	do {						\
+		static struct lock_class_key __key;	\
+		(attr)->key = &__key;			\
+	} while (0)
+#else
+#define sysfs_attr_init(attr) do {} while (0)
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* IGB_HWMON */
+#endif /* RHEL_RELEASE_CODE */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+static inline bool _kc_pm_runtime_suspended()
+{
+	return false;
+}
+#define pm_runtime_suspended(dev)	_kc_pm_runtime_suspended()
+#else /* 2.6.0 => 2.6.34 */
+static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev)
+{
+	return false;
+}
+#ifndef pm_runtime_suspended
+#define pm_runtime_suspended(dev)	_kc_pm_runtime_suspended(dev)
+#endif
+#endif /* 2.6.0 => 2.6.34 */
+
+#ifndef pci_bus_speed
+/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */
+enum _kc_pci_bus_speed {
+	_KC_PCIE_SPEED_2_5GT		= 0x14,
+	_KC_PCIE_SPEED_5_0GT		= 0x15,
+	_KC_PCIE_SPEED_8_0GT		= 0x16,
+	_KC_PCI_SPEED_UNKNOWN		= 0xff,
+};
+#define pci_bus_speed		_kc_pci_bus_speed
+#define PCIE_SPEED_2_5GT	_KC_PCIE_SPEED_2_5GT
+#define PCIE_SPEED_5_0GT	_KC_PCIE_SPEED_5_0GT
+#define PCIE_SPEED_8_0GT	_KC_PCIE_SPEED_8_0GT
+#define PCI_SPEED_UNKNOWN	_KC_PCI_SPEED_UNKNOWN
+#endif /* pci_bus_speed */
+
+#else /* < 2.6.34 */
+#define HAVE_SYSTEM_SLEEP_PM_OPS
+#ifndef HAVE_SET_RX_MODE
+#define HAVE_SET_RX_MODE
+#endif
+
+#endif /* < 2.6.34 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
+ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
+				   const void __user *from, size_t count);
+#define simple_write_to_buffer _kc_simple_write_to_buffer
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)))
+static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
+{
+#ifdef HAVE_PCI_DEV_IS_VIRTFN_BIT
+#ifdef CONFIG_PCI_IOV
+	if (dev->is_virtfn)
+		dev = dev->physfn;
+#endif /* CONFIG_PCI_IOV */
+#endif /* HAVE_PCI_DEV_IS_VIRTFN_BIT */
+	return dev;
+}
+#endif /* ! RHEL >= 6.4 */
+
+#ifndef PCI_EXP_LNKSTA_NLW_SHIFT
+#define PCI_EXP_LNKSTA_NLW_SHIFT 4
+#endif
+
+#ifndef numa_node_id
+#define numa_node_id() 0
+#endif
+#ifndef numa_mem_id
+#define numa_mem_id numa_node_id
+#endif
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
+#ifdef HAVE_TX_MQ
+#include <net/sch_generic.h>
+#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
+#else /* CONFIG_NETDEVICES_MULTI_QUEUE */
+static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev,
+						   unsigned int txq)
+{
+	dev->egress_subqueue_count = txq;
+	return 0;
+}
+#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
+#else /* HAVE_TX_MQ */
+static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev,
+						   unsigned int __always_unused txq)
+{
+	return 0;
+}
+#endif /* HAVE_TX_MQ */
+#define netif_set_real_num_tx_queues(dev, txq) \
+	_kc_netif_set_real_num_tx_queues(dev, txq)
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
+#ifndef ETH_FLAG_RXHASH
+#define ETH_FLAG_RXHASH (1<<28)
+#endif /* ETH_FLAG_RXHASH */
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))
+#define HAVE_IRQ_AFFINITY_HINT
+#endif
+struct device_node;
+#else /* < 2.6.35 */
+#define HAVE_STRUCT_DEVICE_OF_NODE
+#define HAVE_PM_QOS_REQUEST_LIST
+#define HAVE_IRQ_AFFINITY_HINT
+#include <linux/of.h>
+#endif /* < 2.6.35 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
+int _kc_ethtool_op_set_flags(struct net_device *, u32, u32);
+#define ethtool_op_set_flags _kc_ethtool_op_set_flags
+u32 _kc_ethtool_op_get_flags(struct net_device *);
+#define ethtool_op_get_flags _kc_ethtool_op_get_flags
+
+enum {
+	WQ_UNBOUND = 0,
+	WQ_RESCUER = 0,
+};
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#ifdef NET_IP_ALIGN
+#undef NET_IP_ALIGN
+#endif
+#define NET_IP_ALIGN 0
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+
+#ifdef NET_SKB_PAD
+#undef NET_SKB_PAD
+#endif
+
+#if (L1_CACHE_BYTES > 32)
+#define NET_SKB_PAD L1_CACHE_BYTES
+#else
+#define NET_SKB_PAD 32
+#endif
+
+static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev,
+							    unsigned int length)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC);
+	if (skb) {
+#if (NET_IP_ALIGN + NET_SKB_PAD)
+		skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
+#endif
+		skb->dev = dev;
+	}
+	return skb;
+}
+
+#ifdef netdev_alloc_skb_ip_align
+#undef netdev_alloc_skb_ip_align
+#endif
+#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l)
+
+#undef netif_level
+#define netif_level(level, priv, type, dev, fmt, args...)	\
+do {								\
+	if (netif_msg_##type(priv))				\
+		netdev_##level(dev, fmt, ##args);		\
+} while (0)
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)))
+#undef usleep_range
+#define usleep_range(min, max)	msleep(DIV_ROUND_UP(min, 1000))
+#endif
+
+#define u64_stats_update_begin(a) do { } while(0)
+#define u64_stats_update_end(a) do { } while(0)
+#define u64_stats_fetch_begin(a) do { } while(0)
+#define u64_stats_fetch_retry_bh(a,b) (0)
+#define u64_stats_fetch_begin_bh(a) (0)
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1))
+#define HAVE_8021P_SUPPORT
+#endif
+
+/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
+     !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0)))
+static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb)
+{
+	return;
+}
+#endif
+
+#else /* < 2.6.36 */
+
+#define HAVE_PM_QOS_REQUEST_ACTIVE
+#define HAVE_8021P_SUPPORT
+#define HAVE_NDO_GET_STATS64
+#endif /* < 2.6.36 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
+#define HAVE_NON_CONST_PCI_DRIVER_NAME
+#ifndef netif_set_real_num_tx_queues
+static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev,
+						   unsigned int txq)
+{
+	netif_set_real_num_tx_queues(dev, txq);
+	return 0;
+}
+#define netif_set_real_num_tx_queues(dev, txq) \
+	_kc_netif_set_real_num_tx_queues(dev, txq)
+#endif
+#ifndef netif_set_real_num_rx_queues
+static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev,
+						    unsigned int __always_unused rxq)
+{
+	return 0;
+}
+#define netif_set_real_num_rx_queues(dev, rxq) \
+	__kc_netif_set_real_num_rx_queues((dev), (rxq))
+#endif
+#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
+#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
+#endif
+#ifndef VLAN_N_VID
+#define VLAN_N_VID	VLAN_GROUP_ARRAY_LEN
+#endif /* VLAN_N_VID */
+#ifndef ETH_FLAG_TXVLAN
+#define ETH_FLAG_TXVLAN BIT(7)
+#endif /* ETH_FLAG_TXVLAN */
+#ifndef ETH_FLAG_RXVLAN
+#define ETH_FLAG_RXVLAN BIT(8)
+#endif /* ETH_FLAG_RXVLAN */
+
+#define WQ_MEM_RECLAIM WQ_RESCUER
+
+static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb)
+{
+	WARN_ON(skb->ip_summed != CHECKSUM_NONE);
+}
+#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb)
+
+static inline void *_kc_vzalloc_node(unsigned long size, int node)
+{
+	void *addr = vmalloc_node(size, node);
+	if (addr)
+		memset(addr, 0, size);
+	return addr;
+}
+#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node)
+
+static inline void *_kc_vzalloc(unsigned long size)
+{
+	void *addr = vmalloc(size);
+	if (addr)
+		memset(addr, 0, size);
+	return addr;
+}
+#define vzalloc(_size) _kc_vzalloc(_size)
+
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \
+     (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0)))
+static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
+{
+	if (vlan_tx_tag_present(skb) ||
+	    skb->protocol != cpu_to_be16(ETH_P_8021Q))
+		return skb->protocol;
+
+	if (skb_headlen(skb) < sizeof(struct vlan_ethhdr))
+		return 0;
+
+	return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto;
+}
+#endif /* !RHEL5.7+ || RHEL6.0 */
+
+#ifdef HAVE_HW_TIME_STAMP
+#define SKBTX_HW_TSTAMP BIT(0)
+#define SKBTX_IN_PROGRESS BIT(2)
+#define SKB_SHARED_TX_IS_UNION
+#endif
+
+#ifndef device_wakeup_enable
+#define device_wakeup_enable(dev)	device_set_wakeup_enable(dev, true)
+#endif
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) )
+#ifndef HAVE_VLAN_RX_REGISTER
+#define HAVE_VLAN_RX_REGISTER
+#endif
+#endif /* > 2.4.18 */
+#endif /* < 2.6.37 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+#define skb_checksum_start_offset(skb) skb_transport_offset(skb)
+#else /* 2.6.22 -> 2.6.37 */
+static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb)
+{
+        return skb->csum_start - skb_headroom(skb);
+}
+#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb)
+#endif /* 2.6.22 -> 2.6.37 */
+#if IS_ENABLED(CONFIG_DCB)
+#ifndef IEEE_8021QAZ_MAX_TCS
+#define IEEE_8021QAZ_MAX_TCS 8
+#endif
+#ifndef DCB_CAP_DCBX_HOST
+#define DCB_CAP_DCBX_HOST		0x01
+#endif
+#ifndef DCB_CAP_DCBX_LLD_MANAGED
+#define DCB_CAP_DCBX_LLD_MANAGED	0x02
+#endif
+#ifndef DCB_CAP_DCBX_VER_CEE
+#define DCB_CAP_DCBX_VER_CEE		0x04
+#endif
+#ifndef DCB_CAP_DCBX_VER_IEEE
+#define DCB_CAP_DCBX_VER_IEEE		0x08
+#endif
+#ifndef DCB_CAP_DCBX_STATIC
+#define DCB_CAP_DCBX_STATIC		0x10
+#endif
+#endif /* CONFIG_DCB */
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))
+#define CONFIG_XPS
+#endif /* RHEL_RELEASE_VERSION(6,2) */
+#endif /* < 2.6.38 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
+#ifndef TC_BITMASK
+#define TC_BITMASK 15
+#endif
+#ifndef NETIF_F_RXCSUM
+#define NETIF_F_RXCSUM		BIT(29)
+#endif
+#ifndef skb_queue_reverse_walk_safe
+#define skb_queue_reverse_walk_safe(queue, skb, tmp)				\
+		for (skb = (queue)->prev, tmp = skb->prev;			\
+		     skb != (struct sk_buff *)(queue);				\
+		     skb = tmp, tmp = skb->prev)
+#endif
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef FCOE_MTU
+#define FCOE_MTU	2158
+#endif
+#endif
+#if IS_ENABLED(CONFIG_DCB)
+#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE
+#define IEEE_8021QAZ_APP_SEL_ETHERTYPE	1
+#endif
+#endif
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)))
+#define kstrtoul(a, b, c)  ((*(c)) = simple_strtoul((a), NULL, (b)), 0)
+#define kstrtouint(a, b, c)  ((*(c)) = simple_strtoul((a), NULL, (b)), 0)
+#define kstrtou32(a, b, c)  ((*(c)) = simple_strtoul((a), NULL, (b)), 0)
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
+u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16);
+#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q))
+u8 _kc_netdev_get_num_tc(struct net_device *dev);
+#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev)
+int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc);
+#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc))
+#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0)
+#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0)
+u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up);
+#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up)
+#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0)
+#else /* RHEL6.1 or greater */
+#ifndef HAVE_MQPRIO
+#define HAVE_MQPRIO
+#endif /* HAVE_MQPRIO */
+#if IS_ENABLED(CONFIG_DCB)
+#ifndef HAVE_DCBNL_IEEE
+#define HAVE_DCBNL_IEEE
+#ifndef IEEE_8021QAZ_TSA_STRICT
+#define IEEE_8021QAZ_TSA_STRICT		0
+#endif
+#ifndef IEEE_8021QAZ_TSA_ETS
+#define IEEE_8021QAZ_TSA_ETS		2
+#endif
+#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE
+#define IEEE_8021QAZ_APP_SEL_ETHERTYPE	1
+#endif
+#endif
+#endif /* CONFIG_DCB */
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
+
+#ifndef udp_csum
+#define udp_csum __kc_udp_csum
+static inline __wsum __kc_udp_csum(struct sk_buff *skb)
+{
+	__wsum csum = csum_partial(skb_transport_header(skb),
+				   sizeof(struct udphdr), skb->csum);
+
+	for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
+		csum = csum_add(csum, skb->csum);
+	}
+	return csum;
+}
+#endif /* udp_csum */
+#else /* < 2.6.39 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
+#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#ifndef HAVE_MQPRIO
+#define HAVE_MQPRIO
+#endif
+#ifndef HAVE_SETUP_TC
+#define HAVE_SETUP_TC
+#endif
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_IEEE
+#define HAVE_DCBNL_IEEE
+#endif
+#endif /* CONFIG_DCB */
+#ifndef HAVE_NDO_SET_FEATURES
+#define HAVE_NDO_SET_FEATURES
+#endif
+#define HAVE_IRQ_AFFINITY_NOTIFY
+#endif /* < 2.6.39 */
+
+/*****************************************************************************/
+/* use < 2.6.40 because of a Fedora 15 kernel update where they
+ * updated the kernel version to 2.6.40.x and they back-ported 3.0 features
+ * like set_phys_id for ethtool.
+ */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) )
+#ifdef ETHTOOL_GRXRINGS
+#ifndef FLOW_EXT
+#define	FLOW_EXT	0x80000000
+union _kc_ethtool_flow_union {
+	struct ethtool_tcpip4_spec		tcp_ip4_spec;
+	struct ethtool_usrip4_spec		usr_ip4_spec;
+	__u8					hdata[60];
+};
+struct _kc_ethtool_flow_ext {
+	__be16	vlan_etype;
+	__be16	vlan_tci;
+	__be32	data[2];
+};
+struct _kc_ethtool_rx_flow_spec {
+	__u32		flow_type;
+	union _kc_ethtool_flow_union h_u;
+	struct _kc_ethtool_flow_ext h_ext;
+	union _kc_ethtool_flow_union m_u;
+	struct _kc_ethtool_flow_ext m_ext;
+	__u64		ring_cookie;
+	__u32		location;
+};
+#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec
+#endif /* FLOW_EXT */
+#endif
+
+#define pci_disable_link_state_locked pci_disable_link_state
+
+#ifndef PCI_LTR_VALUE_MASK
+#define  PCI_LTR_VALUE_MASK	0x000003ff
+#endif
+#ifndef PCI_LTR_SCALE_MASK
+#define  PCI_LTR_SCALE_MASK	0x00001c00
+#endif
+#ifndef PCI_LTR_SCALE_SHIFT
+#define  PCI_LTR_SCALE_SHIFT	10
+#endif
+
+#else /* < 2.6.40 */
+#define HAVE_ETHTOOL_SET_PHYS_ID
+#endif /* < 2.6.40 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) )
+#define USE_LEGACY_PM_SUPPORT
+#ifndef kfree_rcu
+#define kfree_rcu(_ptr, _rcu_head) do { 				\
+	void __kc_kfree_rcu(struct rcu_head *rcu_head)			\
+	{								\
+		void *ptr = container_of(rcu_head,			\
+					 typeof(*_ptr),			\
+					 _rcu_head);			\
+		kfree(ptr);						\
+	}								\
+	call_rcu(&(_ptr)->_rcu_head, __kc_kfree_rcu);			\
+} while (0)
+#define HAVE_KFREE_RCU_BARRIER
+#endif /* kfree_rcu */
+#ifndef kstrtol_from_user
+#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r)
+static inline int _kc_kstrtol_from_user(const char __user *s, size_t count,
+					unsigned int base, long *res)
+{
+	/* sign, base 2 representation, newline, terminator */
+	char buf[1 + sizeof(long) * 8 + 1 + 1];
+
+	count = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, s, count))
+		return -EFAULT;
+	buf[count] = '\0';
+	return strict_strtol(buf, base, res);
+}
+#endif
+
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0) || \
+     RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,7)))
+/* 20000base_blah_full Supported and Advertised Registers */
+#define SUPPORTED_20000baseMLD2_Full	BIT(21)
+#define SUPPORTED_20000baseKR2_Full	BIT(22)
+#define ADVERTISED_20000baseMLD2_Full	BIT(21)
+#define ADVERTISED_20000baseKR2_Full	BIT(22)
+#endif /* RHEL_RELEASE_CODE */
+#endif /* < 3.0.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
+#ifndef __netdev_alloc_skb_ip_align
+#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l)
+#endif /* __netdev_alloc_skb_ip_align */
+#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app)
+#define dcb_ieee_delapp(dev, app) 0
+#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority)
+
+/* 1000BASE-T Control register */
+#define CTL1000_AS_MASTER	0x0800
+#define CTL1000_ENABLE_MASTER	0x1000
+
+/* kernels less than 3.0.0 don't have this */
+#ifndef ETH_P_8021AD
+#define ETH_P_8021AD	0x88A8
+#endif
+
+/* Stub definition for !CONFIG_OF is introduced later */
+#ifdef CONFIG_OF
+static inline struct device_node *
+pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev)
+{
+#ifdef HAVE_STRUCT_DEVICE_OF_NODE
+	return pdev ? pdev->dev.of_node : NULL;
+#else
+	return NULL;
+#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */
+}
+#endif /* CONFIG_OF */
+#else /* < 3.1.0 */
+#ifndef HAVE_DCBNL_IEEE_DELAPP
+#define HAVE_DCBNL_IEEE_DELAPP
+#endif
+#endif /* < 3.1.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
+#ifndef dma_zalloc_coherent
+#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f)
+static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size,
+					    dma_addr_t *dma_handle, gfp_t flag)
+{
+	void *ret = dma_alloc_coherent(dev, size, dma_handle, flag);
+	if (ret)
+		memset(ret, 0, size);
+	return ret;
+}
+#endif
+#ifdef ETHTOOL_GRXRINGS
+#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+#endif /* ETHTOOL_GRXRINGS */
+
+#ifndef skb_frag_size
+#define skb_frag_size(frag)	_kc_skb_frag_size(frag)
+static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag)
+{
+	return frag->size;
+}
+#endif /* skb_frag_size */
+
+#ifndef skb_frag_size_sub
+#define skb_frag_size_sub(frag, delta)	_kc_skb_frag_size_sub(frag, delta)
+static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta)
+{
+	frag->size -= delta;
+}
+#endif /* skb_frag_size_sub */
+
+#ifndef skb_frag_page
+#define skb_frag_page(frag)	_kc_skb_frag_page(frag)
+static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag)
+{
+	return frag->page;
+}
+#endif /* skb_frag_page */
+
+#ifndef skb_frag_address
+#define skb_frag_address(frag)	_kc_skb_frag_address(frag)
+static inline void *_kc_skb_frag_address(const skb_frag_t *frag)
+{
+	return page_address(skb_frag_page(frag)) + frag->page_offset;
+}
+#endif /* skb_frag_address */
+
+#ifndef skb_frag_dma_map
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
+#include <linux/dma-mapping.h>
+#endif
+#define skb_frag_dma_map(dev,frag,offset,size,dir) \
+		_kc_skb_frag_dma_map(dev,frag,offset,size,dir)
+static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev,
+					      const skb_frag_t *frag,
+					      size_t offset, size_t size,
+					      enum dma_data_direction dir)
+{
+	return dma_map_page(dev, skb_frag_page(frag),
+			    frag->page_offset + offset, size, dir);
+}
+#endif /* skb_frag_dma_map */
+
+#ifndef __skb_frag_unref
+#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag)
+static inline void __kc_skb_frag_unref(skb_frag_t *frag)
+{
+	put_page(skb_frag_page(frag));
+}
+#endif /* __skb_frag_unref */
+
+#ifndef SPEED_UNKNOWN
+#define SPEED_UNKNOWN	-1
+#endif
+#ifndef DUPLEX_UNKNOWN
+#define DUPLEX_UNKNOWN	0xff
+#endif
+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\
+     (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)))
+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_PCI_DEV_FLAGS_ASSIGNED
+#endif
+#endif
+#else /* < 3.2.0 */
+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_VF_SPOOFCHK_CONFIGURE
+#endif
+#ifndef HAVE_SKB_L4_RXHASH
+#define HAVE_SKB_L4_RXHASH
+#endif
+#define HAVE_IOMMU_PRESENT
+#define HAVE_PM_QOS_REQUEST_LIST_NEW
+#endif /* < 3.2.0 */
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2))
+#undef ixgbe_get_netdev_tc_txq
+#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc])
+#endif
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) )
+/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than
+ * alloc_workqueue() to avoid compiler warning from -Wvarargs
+ */
+static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4)))
+_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active,
+		    const char *fmt, ...)
+{
+	struct workqueue_struct *wq;
+	va_list args, temp;
+	unsigned int len;
+	char *p;
+
+	va_start(args, fmt);
+	va_copy(temp, args);
+	len = vsnprintf(NULL, 0, fmt, temp);
+	va_end(temp);
+
+	p = kmalloc(len + 1, GFP_KERNEL);
+	if (!p) {
+		va_end(args);
+		return NULL;
+	}
+
+	vsnprintf(p, len + 1, fmt, args);
+	va_end(args);
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
+	wq = create_workqueue(p);
+#else
+	wq = alloc_workqueue(p, flags, max_active);
+#endif
+	kfree(p);
+
+	return wq;
+}
+#ifdef alloc_workqueue
+#undef alloc_workqueue
+#endif
+#define alloc_workqueue(fmt, flags, max_active, args...) \
+	_kc_alloc_workqueue(flags, max_active, fmt, ##args)
+
+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5))
+typedef u32 netdev_features_t;
+#endif
+#undef PCI_EXP_TYPE_RC_EC
+#define  PCI_EXP_TYPE_RC_EC	0xa	/* Root Complex Event Collector */
+#ifndef CONFIG_BQL
+#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0)
+#define netdev_completed_queue(_n, _p, _b) do {} while (0)
+#define netdev_tx_sent_queue(_q, _b) do {} while (0)
+#define netdev_sent_queue(_n, _b) do {} while (0)
+#define netdev_tx_reset_queue(_q) do {} while (0)
+#define netdev_reset_queue(_n) do {} while (0)
+#endif
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#endif /* SLE_VERSION(11,3,0) */
+#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q)
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))
+static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start,
+					u8 *nexthdrp,
+					__be16 __always_unused *frag_offp)
+{
+	return ipv6_skip_exthdr(skb, start, nexthdrp);
+}
+#undef ipv6_skip_exthdr
+#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d))
+#endif /* !SLES11sp4 or greater */
+
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
+     !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0)))
+static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
+{
+	return index % n_rx_rings;
+}
+#endif
+
+#else /* ! < 3.3.0 */
+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#define HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef ETHTOOL_SRXNTUPLE
+#undef ETHTOOL_SRXNTUPLE
+#endif
+#endif /* < 3.3.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
+#ifndef NETIF_F_RXFCS
+#define NETIF_F_RXFCS	0
+#endif /* NETIF_F_RXFCS */
+#ifndef NETIF_F_RXALL
+#define NETIF_F_RXALL	0
+#endif /* NETIF_F_RXALL */
+
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+#define NUMTCS_RETURNS_U8
+
+int _kc_simple_open(struct inode *inode, struct file *file);
+#define simple_open _kc_simple_open
+#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
+
+#ifndef skb_add_rx_frag
+#define skb_add_rx_frag _kc_skb_add_rx_frag
+void _kc_skb_add_rx_frag(struct sk_buff * skb, int i, struct page *page,
+			 int off, int size, unsigned int truesize);
+#endif
+#ifdef NET_ADDR_RANDOM
+#define eth_hw_addr_random(N) do { \
+	eth_random_addr(N->dev_addr); \
+	N->addr_assign_type |= NET_ADDR_RANDOM; \
+	} while (0)
+#else /* NET_ADDR_RANDOM */
+#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr)
+#endif /* NET_ADDR_RANDOM */
+
+#ifndef for_each_set_bit_from
+#define for_each_set_bit_from(bit, addr, size) \
+	for ((bit) = find_next_bit((addr), (size), (bit)); \
+			(bit) < (size); \
+			(bit) = find_next_bit((addr), (size), (bit) + 1))
+#endif /* for_each_set_bit_from */
+
+#else /* < 3.4.0 */
+#include <linux/kconfig.h>
+#endif /* >= 3.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \
+    ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) )
+#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+#define HAVE_PTP_1588_CLOCK
+#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
+
+#ifndef SIZE_MAX
+#define SIZE_MAX (~(size_t)0)
+#endif
+
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG 64
+#endif
+
+#ifndef ether_addr_equal
+static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2)
+{
+	return !compare_ether_addr(addr1, addr2);
+}
+#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2))
+#endif
+
+/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */
+#ifdef CONFIG_OF_NET
+static inline int of_get_phy_mode(struct device_node __always_unused *np)
+{
+	return -ENODEV;
+}
+
+static inline const void *
+of_get_mac_address(struct device_node __always_unused *np)
+{
+	return NULL;
+}
+#endif
+#else
+#include <linux/of_net.h>
+#define HAVE_FDB_OPS
+#define HAVE_ETHTOOL_GET_TS_INFO
+#endif /* < 3.5.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) )
+#define PCI_EXP_LNKCAP2		44	/* Link Capability 2 */
+
+#ifndef MDIO_EEE_100TX
+#define MDIO_EEE_100TX		0x0002	/* 100TX EEE cap */
+#endif
+#ifndef MDIO_EEE_1000T
+#define MDIO_EEE_1000T		0x0004	/* 1000T EEE cap */
+#endif
+#ifndef MDIO_EEE_10GT
+#define MDIO_EEE_10GT		0x0008	/* 10GT EEE cap */
+#endif
+#ifndef MDIO_EEE_1000KX
+#define MDIO_EEE_1000KX		0x0010	/* 1000KX EEE cap */
+#endif
+#ifndef MDIO_EEE_10GKX4
+#define MDIO_EEE_10GKX4		0x0020	/* 10G KX4 EEE cap */
+#endif
+#ifndef MDIO_EEE_10GKR
+#define MDIO_EEE_10GKR		0x0040	/* 10G KR EEE cap */
+#endif
+
+#ifndef __GFP_MEMALLOC
+#define __GFP_MEMALLOC 0
+#endif
+
+#ifndef eth_broadcast_addr
+#define eth_broadcast_addr _kc_eth_broadcast_addr
+static inline void _kc_eth_broadcast_addr(u8 *addr)
+{
+	memset(addr, 0xff, ETH_ALEN);
+}
+#endif
+
+#ifndef eth_random_addr
+#define eth_random_addr _kc_eth_random_addr
+static inline void _kc_eth_random_addr(u8 *addr)
+{
+        get_random_bytes(addr, ETH_ALEN);
+        addr[0] &= 0xfe; /* clear multicast */
+        addr[0] |= 0x02; /* set local assignment */
+}
+#endif /* eth_random_addr */
+
+#ifndef DMA_ATTR_SKIP_CPU_SYNC
+#define DMA_ATTR_SKIP_CPU_SYNC 0
+#endif
+#else /* < 3.6.0 */
+#define HAVE_STRUCT_PAGE_PFMEMALLOC
+#endif /* < 3.6.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
+#include <linux/workqueue.h>
+#ifndef ADVERTISED_40000baseKR4_Full
+/* these defines were all added in one commit, so should be safe
+ * to trigger activiation on one define
+ */
+#define SUPPORTED_40000baseKR4_Full	BIT(23)
+#define SUPPORTED_40000baseCR4_Full	BIT(24)
+#define SUPPORTED_40000baseSR4_Full	BIT(25)
+#define SUPPORTED_40000baseLR4_Full	BIT(26)
+#define ADVERTISED_40000baseKR4_Full	BIT(23)
+#define ADVERTISED_40000baseCR4_Full	BIT(24)
+#define ADVERTISED_40000baseSR4_Full	BIT(25)
+#define ADVERTISED_40000baseLR4_Full	BIT(26)
+#endif
+
+#ifndef mmd_eee_cap_to_ethtool_sup_t
+/**
+ * mmd_eee_cap_to_ethtool_sup_t
+ * @eee_cap: value of the MMD EEE Capability register
+ *
+ * A small helper function that translates MMD EEE Capability (3.20) bits
+ * to ethtool supported settings.
+ */
+static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
+{
+	u32 supported = 0;
+
+	if (eee_cap & MDIO_EEE_100TX)
+		supported |= SUPPORTED_100baseT_Full;
+	if (eee_cap & MDIO_EEE_1000T)
+		supported |= SUPPORTED_1000baseT_Full;
+	if (eee_cap & MDIO_EEE_10GT)
+		supported |= SUPPORTED_10000baseT_Full;
+	if (eee_cap & MDIO_EEE_1000KX)
+		supported |= SUPPORTED_1000baseKX_Full;
+	if (eee_cap & MDIO_EEE_10GKX4)
+		supported |= SUPPORTED_10000baseKX4_Full;
+	if (eee_cap & MDIO_EEE_10GKR)
+		supported |= SUPPORTED_10000baseKR_Full;
+
+	return supported;
+}
+#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \
+	__kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap)
+#endif /* mmd_eee_cap_to_ethtool_sup_t */
+
+#ifndef mmd_eee_adv_to_ethtool_adv_t
+/**
+ * mmd_eee_adv_to_ethtool_adv_t
+ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
+ *
+ * A small helper function that translates the MMD EEE Advertisement (7.60)
+ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
+ * settings.
+ */
+static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
+{
+	u32 adv = 0;
+
+	if (eee_adv & MDIO_EEE_100TX)
+		adv |= ADVERTISED_100baseT_Full;
+	if (eee_adv & MDIO_EEE_1000T)
+		adv |= ADVERTISED_1000baseT_Full;
+	if (eee_adv & MDIO_EEE_10GT)
+		adv |= ADVERTISED_10000baseT_Full;
+	if (eee_adv & MDIO_EEE_1000KX)
+		adv |= ADVERTISED_1000baseKX_Full;
+	if (eee_adv & MDIO_EEE_10GKX4)
+		adv |= ADVERTISED_10000baseKX4_Full;
+	if (eee_adv & MDIO_EEE_10GKR)
+		adv |= ADVERTISED_10000baseKR_Full;
+
+	return adv;
+}
+
+#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \
+	__kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv)
+#endif /* mmd_eee_adv_to_ethtool_adv_t */
+
+#ifndef ethtool_adv_to_mmd_eee_adv_t
+/**
+ * ethtool_adv_to_mmd_eee_adv_t
+ * @adv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement settings
+ * to EEE advertisements for the MMD EEE Advertisement (7.60) and
+ * MMD EEE Link Partner Ability (7.61) registers.
+ */
+static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv)
+{
+	u16 reg = 0;
+
+	if (adv & ADVERTISED_100baseT_Full)
+		reg |= MDIO_EEE_100TX;
+	if (adv & ADVERTISED_1000baseT_Full)
+		reg |= MDIO_EEE_1000T;
+	if (adv & ADVERTISED_10000baseT_Full)
+		reg |= MDIO_EEE_10GT;
+	if (adv & ADVERTISED_1000baseKX_Full)
+		reg |= MDIO_EEE_1000KX;
+	if (adv & ADVERTISED_10000baseKX4_Full)
+		reg |= MDIO_EEE_10GKX4;
+	if (adv & ADVERTISED_10000baseKR_Full)
+		reg |= MDIO_EEE_10GKR;
+
+	return reg;
+}
+#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv)
+#endif /* ethtool_adv_to_mmd_eee_adv_t */
+
+#ifndef pci_pcie_type
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+static inline u8 pci_pcie_type(struct pci_dev *pdev)
+{
+	int pos;
+	u16 reg16;
+
+	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	BUG_ON(!pos);
+	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+	return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
+}
+#else /* < 2.6.24 */
+#define pci_pcie_type(x)	(x)->pcie_type
+#endif /* < 2.6.24 */
+#endif /* pci_pcie_type */
+
+#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \
+    ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \
+    ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) )
+#define ptp_clock_register(caps, args...) ptp_clock_register(caps)
+#endif
+
+#ifndef pcie_capability_read_word
+int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
+#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v)
+#endif /* pcie_capability_read_word */
+
+#ifndef pcie_capability_read_dword
+int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
+#define pcie_capability_read_dword(d,p,v) __kc_pcie_capability_read_dword(d,p,v)
+#endif
+
+#ifndef pcie_capability_write_word
+int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
+#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v)
+#endif /* pcie_capability_write_word */
+
+#ifndef pcie_capability_clear_and_set_word
+int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+					    u16 clear, u16 set);
+#define pcie_capability_clear_and_set_word(d,p,c,s) \
+	__kc_pcie_capability_clear_and_set_word(d,p,c,s)
+#endif /* pcie_capability_clear_and_set_word */
+
+#ifndef pcie_capability_clear_word
+int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos,
+					     u16 clear);
+#define pcie_capability_clear_word(d, p, c) \
+	__kc_pcie_capability_clear_word(d, p, c)
+#endif /* pcie_capability_clear_word */
+
+#ifndef PCI_EXP_LNKSTA2
+#define PCI_EXP_LNKSTA2		50	/* Link Status 2 */
+#endif
+
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+#define USE_CONST_DEV_UC_CHAR
+#define HAVE_NDO_FDB_ADD_NLATTR
+#endif
+
+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8))
+#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi)
+#endif /* !RHEL6.8+ */
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6))
+#include <linux/hashtable.h>
+#else
+
+#define DEFINE_HASHTABLE(name, bits)						\
+	struct hlist_head name[1 << (bits)] =					\
+			{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
+#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits)				\
+	struct hlist_head name[1 << (bits)] __read_mostly =			\
+			{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
+#define DECLARE_HASHTABLE(name, bits)                                   	\
+	struct hlist_head name[1 << (bits)]
+
+#define HASH_SIZE(name) (ARRAY_SIZE(name))
+#define HASH_BITS(name) ilog2(HASH_SIZE(name))
+
+/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
+#define hash_min(val, bits)							\
+	(sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
+
+static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
+{
+	unsigned int i;
+
+	for (i = 0; i < sz; i++)
+		INIT_HLIST_HEAD(&ht[i]);
+}
+
+#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
+
+#define hash_add(hashtable, node, key)						\
+	hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
+
+static inline bool hash_hashed(struct hlist_node *node)
+{
+	return !hlist_unhashed(node);
+}
+
+static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
+{
+	unsigned int i;
+
+	for (i = 0; i < sz; i++)
+		if (!hlist_empty(&ht[i]))
+			return false;
+
+	return true;
+}
+
+#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
+
+static inline void hash_del(struct hlist_node *node)
+{
+	hlist_del_init(node);
+}
+#endif /* RHEL >= 6.6 */
+
+/* We don't have @flags support prior to 3.7, so we'll simply ignore the flags
+ * parameter on these older kernels.
+ */
+#define __setup_timer(_timer, _fn, _data, _flags)	\
+	setup_timer((_timer), (_fn), (_data))		\
+
+#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) ) ) && \
+	( ! ( SLE_VERSION_CODE >= SLE_VERSION(12,0,0) ) )
+
+#ifndef mod_delayed_work
+/**
+ * __mod_delayed_work - modify delay or queue delayed work
+ * @wq: workqueue to use
+ * @dwork: delayed work to queue
+ * @delay: number of jiffies to wait before queueing
+ *
+ * Return: %true if @dwork was pending and was rescheduled;
+ *         %false if it wasn't pending
+ *
+ * Note: the dwork parameter was declared as a void*
+ *       to avoid comptibility problems with early 2.6 kernels
+ *       where struct delayed_work is not declared. Unlike the original
+ *       implementation flags are not preserved and it shouldn't be
+ *       used in the interrupt context.
+ */
+static inline bool __mod_delayed_work(struct workqueue_struct *wq,
+				    void *dwork,
+				    unsigned long delay)
+{
+	bool ret = cancel_delayed_work(dwork);
+	queue_delayed_work(wq, dwork, delay);
+	return ret;
+}
+#define mod_delayed_work(wq, dwork, delay) __mod_delayed_work(wq, dwork, delay)
+#endif /* mod_delayed_work */
+
+#endif /* !(RHEL >= 6.7) && !(SLE >= 12.0) */
+#else /* >= 3.7.0 */
+#include <linux/hashtable.h>
+#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS
+#define USE_CONST_DEV_UC_CHAR
+#define HAVE_NDO_FDB_ADD_NLATTR
+#endif /* >= 3.7.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) )
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \
+     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)))
+#ifndef pci_sriov_set_totalvfs
+static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs)
+{
+	return 0;
+}
+#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b))
+#endif
+#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */
+#ifndef PCI_EXP_LNKCTL_ASPM_L0S
+#define  PCI_EXP_LNKCTL_ASPM_L0S  0x01	/* L0s Enable */
+#endif
+#ifndef PCI_EXP_LNKCTL_ASPM_L1
+#define  PCI_EXP_LNKCTL_ASPM_L1   0x02	/* L1 Enable */
+#endif
+#define HAVE_CONFIG_HOTPLUG
+/* Reserved Ethernet Addresses per IEEE 802.1Q */
+static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = {
+	0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+
+#ifndef is_link_local_ether_addr
+static inline bool __kc_is_link_local_ether_addr(const u8 *addr)
+{
+	__be16 *a = (__be16 *)addr;
+	static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
+	static const __be16 m = cpu_to_be16(0xfff0);
+
+	return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
+}
+#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr)
+#endif /* is_link_local_ether_addr */
+
+#ifndef FLOW_MAC_EXT
+#define FLOW_MAC_EXT	0x40000000
+#endif /* FLOW_MAC_EXT */
+
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))
+#define HAVE_SRIOV_CONFIGURE
+#endif
+
+#ifndef PCI_EXP_LNKCAP_SLS_2_5GB
+#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
+#endif
+
+#ifndef PCI_EXP_LNKCAP_SLS_5_0GB
+#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
+#endif
+
+#undef PCI_EXP_LNKCAP2_SLS_2_5GB
+#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */
+
+#undef PCI_EXP_LNKCAP2_SLS_5_0GB
+#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */
+
+#undef PCI_EXP_LNKCAP2_SLS_8_0GB
+#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */
+
+#else /* >= 3.8.0 */
+#ifndef __devinit
+#define __devinit
+#endif
+
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+
+#ifndef __devinitconst
+#define __devinitconst
+#endif
+
+#ifndef __devexit
+#define __devexit
+#endif
+
+#ifndef __devexit_p
+#define __devexit_p
+#endif
+
+#ifndef HAVE_ENCAP_CSUM_OFFLOAD
+#define HAVE_ENCAP_CSUM_OFFLOAD
+#endif
+
+#ifndef HAVE_GRE_ENCAP_OFFLOAD
+#define HAVE_GRE_ENCAP_OFFLOAD
+#endif
+
+#ifndef HAVE_SRIOV_CONFIGURE
+#define HAVE_SRIOV_CONFIGURE
+#endif
+
+#define HAVE_BRIDGE_ATTRIBS
+#ifndef BRIDGE_MODE_VEB
+#define BRIDGE_MODE_VEB		0	/* Default loopback mode */
+#endif /* BRIDGE_MODE_VEB */
+#ifndef BRIDGE_MODE_VEPA
+#define BRIDGE_MODE_VEPA	1	/* 802.1Qbg defined VEPA mode */
+#endif /* BRIDGE_MODE_VEPA */
+#endif /* >= 3.8.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
+
+#undef BUILD_BUG_ON
+#ifdef __CHECKER__
+#define BUILD_BUG_ON(condition) (0)
+#else /* __CHECKER__ */
+#ifndef __compiletime_warning
+#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400)
+#define __compiletime_warning(message) __attribute__((warning(message)))
+#else /* __GNUC__ */
+#define __compiletime_warning(message)
+#endif /* __GNUC__ */
+#endif /* __compiletime_warning */
+#ifndef __compiletime_error
+#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400)
+#define __compiletime_error(message) __attribute__((error(message)))
+#define __compiletime_error_fallback(condition) do { } while (0)
+#else /* __GNUC__ */
+#define __compiletime_error(message)
+#define __compiletime_error_fallback(condition) \
+	do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
+#endif /* __GNUC__ */
+#else /* __compiletime_error */
+#define __compiletime_error_fallback(condition) do { } while (0)
+#endif /* __compiletime_error */
+#define __compiletime_assert(condition, msg, prefix, suffix)		\
+	do {								\
+		bool __cond = !(condition);				\
+		extern void prefix ## suffix(void) __compiletime_error(msg); \
+		if (__cond)						\
+			prefix ## suffix();				\
+		__compiletime_error_fallback(__cond);			\
+	} while (0)
+
+#define _compiletime_assert(condition, msg, prefix, suffix) \
+	__compiletime_assert(condition, msg, prefix, suffix)
+#define compiletime_assert(condition, msg) \
+	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
+#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg)
+#ifndef __OPTIMIZE__
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+#else /* __OPTIMIZE__ */
+#define BUILD_BUG_ON(condition) \
+	BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition)
+#endif /* __OPTIMIZE__ */
+#endif /* __CHECKER__ */
+
+#undef hlist_entry
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#undef hlist_entry_safe
+#define hlist_entry_safe(ptr, type, member) \
+	({ typeof(ptr) ____ptr = (ptr); \
+	   ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
+	})
+
+#undef hlist_for_each_entry
+#define hlist_for_each_entry(pos, head, member)                             \
+	for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \
+	     pos;                                                           \
+	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+#undef hlist_for_each_entry_safe
+#define hlist_for_each_entry_safe(pos, n, head, member) 		    \
+	for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);   \
+	     pos && ({ n = pos->member.next; 1; });			    \
+	     pos = hlist_entry_safe(n, typeof(*pos), member))
+
+#undef hlist_for_each_entry_continue
+#define hlist_for_each_entry_continue(pos, member)			\
+	for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
+	     pos;							\
+	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+#undef hlist_for_each_entry_from
+#define hlist_for_each_entry_from(pos, member)				\
+	for (; pos;							\
+	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+#undef hash_for_each
+#define hash_for_each(name, bkt, obj, member)				\
+	for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+			(bkt)++)\
+		hlist_for_each_entry(obj, &name[bkt], member)
+
+#undef hash_for_each_safe
+#define hash_for_each_safe(name, bkt, tmp, obj, member)			\
+	for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+			(bkt)++)\
+		hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
+
+#undef hash_for_each_possible
+#define hash_for_each_possible(name, obj, member, key)			\
+	hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
+
+#undef hash_for_each_possible_safe
+#define hash_for_each_possible_safe(name, obj, tmp, member, key)	\
+	hlist_for_each_entry_safe(obj, tmp,\
+		&name[hash_min(key, HASH_BITS(name))], member)
+
+#ifdef CONFIG_XPS
+int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16);
+#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx))
+#else /* CONFIG_XPS */
+#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0)
+#endif /* CONFIG_XPS */
+
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+#define _kc_hashrnd 0xd631614b /* not so random hash salt */
+u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
+#define __netdev_pick_tx __kc_netdev_pick_tx
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+#else
+#define HAVE_BRIDGE_FILTER
+#define HAVE_FDB_DEL_NLATTR
+#endif /* < 3.9.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+#ifndef NAPI_POLL_WEIGHT
+#define NAPI_POLL_WEIGHT 64
+#endif
+#ifdef CONFIG_PCI_IOV
+int __kc_pci_vfs_assigned(struct pci_dev *dev);
+#else
+static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev)
+{
+	return 0;
+}
+#endif
+#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev)
+
+#ifndef list_first_entry_or_null
+#define list_first_entry_or_null(ptr, type, member) \
+	(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+#endif
+
+#ifndef VLAN_TX_COOKIE_MAGIC
+static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb,
+							 u16 vlan_tci)
+{
+#ifdef VLAN_TAG_PRESENT
+	vlan_tci |= VLAN_TAG_PRESENT;
+#endif
+	skb->vlan_tci = vlan_tci;
+        return skb;
+}
+#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \
+	__kc__vlan_hwaccel_put_tag(skb, vlan_tci)
+#endif
+
+#ifdef HAVE_FDB_OPS
+#if defined(HAVE_NDO_FDB_ADD_NLATTR)
+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+			  struct net_device *dev,
+			  const unsigned char *addr, u16 flags);
+#elif defined(USE_CONST_DEV_UC_CHAR)
+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+			  const unsigned char *addr, u16 flags);
+#else
+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+			  unsigned char *addr, u16 flags);
+#endif /* HAVE_NDO_FDB_ADD_NLATTR */
+#if defined(HAVE_FDB_DEL_NLATTR)
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+			  struct net_device *dev,
+			  const unsigned char *addr);
+#elif defined(USE_CONST_DEV_UC_CHAR)
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+			  const unsigned char *addr);
+#else
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+			  unsigned char *addr);
+#endif /* HAVE_FDB_DEL_NLATTR */
+#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add
+#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del
+#endif /* HAVE_FDB_OPS */
+
+#ifndef PCI_DEVID
+#define PCI_DEVID(bus, devfn)  ((((u16)(bus)) << 8) | (devfn))
+#endif
+
+/* The definitions for these functions when CONFIG_OF_NET is defined are
+ * pulled in from <linux/of_net.h>. For kernels older than 3.5 we already have
+ * backports for when CONFIG_OF_NET is true. These are separated and
+ * duplicated in order to cover all cases so that all kernels get either the
+ * real definitions (when CONFIG_OF_NET is defined) or the stub definitions
+ * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real
+ * definitions).
+ */
+#ifndef CONFIG_OF_NET
+static inline int of_get_phy_mode(struct device_node __always_unused *np)
+{
+	return -ENODEV;
+}
+
+static inline const void *
+of_get_mac_address(struct device_node __always_unused *np)
+{
+	return NULL;
+}
+#endif
+
+#else /* >= 3.10.0 */
+#define HAVE_ENCAP_TSO_OFFLOAD
+#define USE_DEFAULT_FDB_DEL_DUMP
+#define HAVE_SKB_INNER_NETWORK_HEADER
+
+#if (RHEL_RELEASE_CODE && \
+     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)))
+#define HAVE_RHEL7_PCI_DRIVER_RH
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))
+#define HAVE_RHEL7_PCI_RESET_NOTIFY
+#endif /* RHEL >= 7.2 */
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))
+#define HAVE_GENEVE_RX_OFFLOAD
+#endif /* RHEL >=7.3 && RHEL < 7.5 */
+#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC
+#define HAVE_RHEL7_NET_DEVICE_OPS_EXT
+#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE)
+#define HAVE_UDP_ENC_TUNNEL
+#endif
+#endif /* RHEL >= 7.3 */
+
+/* new hooks added to net_device_ops_extended in RHEL7.4 */
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
+#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
+#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL
+#define HAVE_UDP_ENC_RX_OFFLOAD
+#endif /* RHEL >= 7.4 */
+#endif /* RHEL >= 7.0 && RHEL < 8.0 */
+
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0))
+#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK
+#define NO_NETDEV_BPF_PROG_ATTACHED
+#endif /* RHEL >= 8.0 */
+#endif /* >= 3.10.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) )
+#define netdev_notifier_info_to_dev(ptr) ptr
+#ifndef time_in_range64
+#define time_in_range64(a, b, c) \
+	(time_after_eq64(a, b) && \
+	 time_before_eq64(a, c))
+#endif /* time_in_range64 */
+#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\
+     (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)))
+#define HAVE_NDO_SET_VF_LINK_STATE
+#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#endif
+#else /* >= 3.11.0 */
+#define HAVE_NDO_SET_VF_LINK_STATE
+#define HAVE_SKB_INNER_PROTOCOL
+#define HAVE_MPLS_FEATURES
+#endif /* >= 3.11.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) )
+int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
+			       enum pcie_link_width *width);
+#ifndef pcie_get_minimum_link
+#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w)
+#endif
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7))
+int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev);
+#define pci_wait_for_pending_transaction _kc_pci_wait_for_pending_transaction
+#endif /* <RHEL6.7 */
+
+#else /* >= 3.12.0 */
+#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#endif
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) )
+#define HAVE_VXLAN_RX_OFFLOAD
+#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN)
+#define HAVE_UDP_ENC_TUNNEL
+#endif
+#endif /* < 4.8.0 */
+#define HAVE_NDO_GET_PHYS_PORT_ID
+#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK
+#endif /* >= 3.12.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) )
+#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m)
+int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask);
+#ifndef u64_stats_init
+#define u64_stats_init(a) do { } while(0)
+#endif
+#undef BIT_ULL
+#define BIT_ULL(n) (1ULL << (n))
+
+#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) && \
+     !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)))
+static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
+{
+	dev = pci_physfn(dev);
+	if (pci_is_root_bus(dev->bus))
+		return NULL;
+
+	return dev->bus->self;
+}
+#endif
+
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0))
+#undef HAVE_STRUCT_PAGE_PFMEMALLOC
+#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT
+#endif
+#ifndef list_next_entry
+#define list_next_entry(pos, member) \
+	list_entry((pos)->member.next, typeof(*(pos)), member)
+#endif
+#ifndef list_prev_entry
+#define list_prev_entry(pos, member) \
+	list_entry((pos)->member.prev, typeof(*(pos)), member)
+#endif
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20) )
+#define devm_kcalloc(dev, cnt, size, flags) \
+	devm_kzalloc(dev, cnt * size, flags)
+#endif /* > 2.6.20 */
+
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)))
+#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member)
+#endif
+
+#else /* >= 3.13.0 */
+#define HAVE_VXLAN_CHECKS
+#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24))
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#else
+#define HAVE_NDO_SELECT_QUEUE_ACCEL
+#endif
+#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
+#endif
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) )
+
+#ifndef U16_MAX
+#define U16_MAX ((u16)~0U)
+#endif
+
+#ifndef U32_MAX
+#define U32_MAX ((u32)~0U)
+#endif
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)))
+#define dev_consume_skb_any(x) dev_kfree_skb_any(x)
+#define dev_consume_skb_irq(x) dev_kfree_skb_irq(x)
+#endif
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \
+     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)))
+
+/* it isn't expected that this would be a #define unless we made it so */
+#ifndef skb_set_hash
+
+#define PKT_HASH_TYPE_NONE	0
+#define PKT_HASH_TYPE_L2	1
+#define PKT_HASH_TYPE_L3	2
+#define PKT_HASH_TYPE_L4	3
+
+enum _kc_pkt_hash_types {
+	_KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE,
+	_KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2,
+	_KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3,
+	_KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4,
+};
+#define pkt_hash_types         _kc_pkt_hash_types
+
+#define skb_set_hash __kc_skb_set_hash
+static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb,
+				     u32 __maybe_unused hash,
+				     int __maybe_unused type)
+{
+#ifdef HAVE_SKB_L4_RXHASH
+	skb->l4_rxhash = (type == PKT_HASH_TYPE_L4);
+#endif
+#ifdef NETIF_F_RXHASH
+	skb->rxhash = hash;
+#endif
+}
+#endif /* !skb_set_hash */
+
+#else	/* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))
+#ifndef HAVE_VXLAN_RX_OFFLOAD
+#define HAVE_VXLAN_RX_OFFLOAD
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+#endif
+
+#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN)
+#define HAVE_UDP_ENC_TUNNEL
+#endif
+
+#ifndef HAVE_VXLAN_CHECKS
+#define HAVE_VXLAN_CHECKS
+#endif /* HAVE_VXLAN_CHECKS */
+#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */
+
+#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\
+     (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)))
+#define HAVE_NDO_DFWD_OPS
+#endif
+
+#ifndef pci_enable_msix_range
+int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
+			       int minvec, int maxvec);
+#define pci_enable_msix_range __kc_pci_enable_msix_range
+#endif
+
+#ifndef ether_addr_copy
+#define ether_addr_copy __kc_ether_addr_copy
+static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+	*(u32 *)dst = *(const u32 *)src;
+	*(u16 *)(dst + 4) = *(const u16 *)(src + 4);
+#else
+	u16 *a = (u16 *)dst;
+	const u16 *b = (const u16 *)src;
+
+	a[0] = b[0];
+	a[1] = b[1];
+	a[2] = b[2];
+#endif
+}
+#endif /* ether_addr_copy */
+int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
+		       int target, unsigned short *fragoff, int *flags);
+#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e))
+
+#ifndef OPTIMIZE_HIDE_VAR
+#ifdef __GNUC__
+#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
+#else
+#include <linux/barrier.h>
+#define OPTIMIZE_HIDE_VAR(var)	barrier()
+#endif
+#endif
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0)) && \
+     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,4,0)))
+static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
+{
+#ifdef NETIF_F_RXHASH
+	return skb->rxhash;
+#else
+	return 0;
+#endif /* NETIF_F_RXHASH */
+}
+#endif /* !RHEL > 5.9 && !SLES >= 10.4 */
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))
+#define request_firmware_direct	request_firmware
+#endif /* !RHEL || RHEL < 7.5 */
+
+#else /* >= 3.14.0 */
+
+/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */
+#ifndef HAVE_NDO_DFWD_OPS
+#define HAVE_NDO_DFWD_OPS
+#endif
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#endif /* 3.14.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) )
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \
+     !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30)))
+#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh
+#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh
+#endif
+
+char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
+#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp)
+
+#else
+#define HAVE_NET_GET_RANDOM_ONCE
+#define HAVE_PTP_1588_CLOCK_PINS
+#define HAVE_NETDEV_PORT
+#endif /* 3.15.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) )
+#ifndef smp_mb__before_atomic
+#define smp_mb__before_atomic() smp_mb()
+#define smp_mb__after_atomic()  smp_mb()
+#endif
+#ifndef __dev_uc_sync
+#ifdef HAVE_SET_RX_MODE
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list,
+		struct net_device *dev,
+		int (*sync)(struct net_device *, const unsigned char *),
+		int (*unsync)(struct net_device *, const unsigned char *));
+void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
+		struct net_device *dev,
+		int (*unsync)(struct net_device *, const unsigned char *));
+#endif
+#ifndef NETDEV_HW_ADDR_T_MULTICAST
+int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count,
+		struct net_device *dev,
+		int (*sync)(struct net_device *, const unsigned char *),
+		int (*unsync)(struct net_device *, const unsigned char *));
+void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count,
+		struct net_device *dev,
+		int (*unsync)(struct net_device *, const unsigned char *));
+#endif
+#endif /* HAVE_SET_RX_MODE */
+
+static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev,
+				   int __maybe_unused (*sync)(struct net_device *, const unsigned char *),
+				   int __maybe_unused (*unsync)(struct net_device *, const unsigned char *))
+{
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+	return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
+#elif defined(HAVE_SET_RX_MODE)
+	return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count,
+				      dev, sync, unsync);
+#else
+	return 0;
+#endif
+}
+#define __dev_uc_sync __kc_dev_uc_sync
+
+static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev,
+				      int __maybe_unused (*unsync)(struct net_device *, const unsigned char *))
+{
+#ifdef HAVE_SET_RX_MODE
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+	__kc_hw_addr_unsync_dev(&dev->uc, dev, unsync);
+#else /* NETDEV_HW_ADDR_T_MULTICAST */
+	__kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync);
+#endif /* NETDEV_HW_ADDR_T_UNICAST */
+#endif /* HAVE_SET_RX_MODE */
+}
+#define __dev_uc_unsync __kc_dev_uc_unsync
+
+static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev,
+				   int __maybe_unused (*sync)(struct net_device *, const unsigned char *),
+				   int __maybe_unused (*unsync)(struct net_device *, const unsigned char *))
+{
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+	return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
+#elif defined(HAVE_SET_RX_MODE)
+	return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count,
+				      dev, sync, unsync);
+#else
+	return 0;
+#endif
+
+}
+#define __dev_mc_sync __kc_dev_mc_sync
+
+static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev,
+				      int __maybe_unused (*unsync)(struct net_device *, const unsigned char *))
+{
+#ifdef HAVE_SET_RX_MODE
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+	__kc_hw_addr_unsync_dev(&dev->mc, dev, unsync);
+#else /* NETDEV_HW_ADDR_T_MULTICAST */
+	__kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync);
+#endif /* NETDEV_HW_ADDR_T_MULTICAST */
+#endif /* HAVE_SET_RX_MODE */
+}
+#define __dev_mc_unsync __kc_dev_mc_unsync
+#endif /* __dev_uc_sync */
+
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
+#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+#endif
+
+#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM
+/* if someone backports this, hopefully they backport as a #define.
+ * declare it as zero on older kernels so that if it get's or'd in
+ * it won't effect anything, therefore preventing core driver changes
+ */
+#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0
+#define SKB_GSO_UDP_TUNNEL_CSUM 0
+#endif
+void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len,
+			gfp_t gfp);
+#define devm_kmemdup __kc_devm_kmemdup
+
+#else
+#if ( ( LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0) ) && \
+      ! ( SLE_VERSION_CODE && ( SLE_VERSION_CODE >= SLE_VERSION(12,4,0)) ) )
+#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY
+#endif /* >= 3.16.0 && < 4.13.0 && !(SLES >= 12sp4) */
+#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+#endif /* 3.16.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) )
+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \
+      RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \
+    !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))
+#ifndef timespec64
+#define timespec64 timespec
+static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
+{
+	return ts;
+}
+static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
+{
+	return ts64;
+}
+#define timespec64_equal timespec_equal
+#define timespec64_compare timespec_compare
+#define set_normalized_timespec64 set_normalized_timespec
+#define timespec64_add_safe timespec_add_safe
+#define timespec64_add timespec_add
+#define timespec64_sub timespec_sub
+#define timespec64_valid timespec_valid
+#define timespec64_valid_strict timespec_valid_strict
+#define timespec64_to_ns timespec_to_ns
+#define ns_to_timespec64 ns_to_timespec
+#define ktime_to_timespec64 ktime_to_timespec
+#define ktime_get_ts64 ktime_get_ts
+#define ktime_get_real_ts64 ktime_get_real_ts
+#define timespec64_add_ns timespec_add_ns
+#endif /* timespec64 */
+#endif /* !(RHEL6.8<RHEL7.0) && !RHEL7.2+ */
+
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \
+     RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))
+static inline void ktime_get_real_ts64(struct timespec64 *ts)
+{
+	*ts = ktime_to_timespec64(ktime_get_real());
+}
+
+static inline void ktime_get_ts64(struct timespec64 *ts)
+{
+	*ts = ktime_to_timespec64(ktime_get());
+}
+#endif
+
+#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
+#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a)
+#endif
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))
+#endif /* RHEL_RELEASE_CODE < RHEL7.5 */
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3))
+static inline u64 ktime_get_ns(void)
+{
+	return ktime_to_ns(ktime_get());
+}
+
+static inline u64 ktime_get_real_ns(void)
+{
+	return ktime_to_ns(ktime_get_real());
+}
+
+static inline u64 ktime_get_boot_ns(void)
+{
+	return ktime_to_ns(ktime_get_boottime());
+}
+#endif /* RHEL < 7.3 */
+
+#else
+#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT
+#include <linux/time64.h>
+#endif /* 3.17.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) )
+#ifndef NO_PTP_SUPPORT
+#include <linux/errqueue.h>
+struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb);
+void __kc_skb_complete_tx_timestamp(struct sk_buff *skb,
+				    struct skb_shared_hwtstamps *hwtstamps);
+#define skb_clone_sk __kc_skb_clone_sk
+#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp
+#endif
+#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))))
+u32 __kc_eth_get_headlen(const struct net_device *dev, unsigned char *data,
+			 unsigned int max_len);
+#else
+unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len);
+#endif /* !RHEL >= 8.2 */
+
+#define eth_get_headlen __kc_eth_get_headlen
+#ifndef ETH_P_XDSA
+#define ETH_P_XDSA 0x00F8
+#endif
+/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1))
+#define HAVE_SKBUFF_CSUM_LEVEL
+#endif /* >= RH 7.1 */
+
+/* RHEL 7.3 backported xmit_more */
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))
+#define HAVE_SKB_XMIT_MORE
+#endif /* >= RH 7.3 */
+
+#undef GENMASK
+#define GENMASK(h, l) \
+	(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#undef GENMASK_ULL
+#define GENMASK_ULL(h, l) \
+	(((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#else /*  3.18.0 */
+#define HAVE_SKBUFF_CSUM_LEVEL
+#define HAVE_SKB_XMIT_MORE
+#define HAVE_SKB_INNER_PROTOCOL_TYPE
+#endif /* 3.18.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) )
+#else
+#define HAVE_NDO_FEATURES_CHECK
+#endif /* 3.18.4 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,13) )
+#ifndef WRITE_ONCE
+#define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = (val); })
+#endif
+#endif /* 3.18.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) )
+/* netdev_phys_port_id renamed to netdev_phys_item_id */
+#define netdev_phys_item_id netdev_phys_port_id
+
+static inline void _kc_napi_complete_done(struct napi_struct *napi,
+					  int __always_unused work_done) {
+	napi_complete(napi);
+}
+/* don't use our backport if the distro kernels already have it */
+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \
+    (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)))
+#define napi_complete_done _kc_napi_complete_done
+#endif
+
+int _kc_bitmap_print_to_pagebuf(bool list, char *buf,
+				const unsigned long *maskp, int nmaskbits);
+#define bitmap_print_to_pagebuf _kc_bitmap_print_to_pagebuf
+
+#ifndef NETDEV_RSS_KEY_LEN
+#define NETDEV_RSS_KEY_LEN (13 * 4)
+#endif
+#if (!(RHEL_RELEASE_CODE && \
+      ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \
+       (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)))))
+#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len)
+#endif /* RHEL_RELEASE_CODE */
+void __kc_netdev_rss_key_fill(void *buffer, size_t len);
+#define SPEED_20000 20000
+#define SPEED_40000 40000
+#ifndef dma_rmb
+#define dma_rmb() rmb()
+#endif
+#ifndef dev_alloc_pages
+#ifndef NUMA_NO_NODE
+#define NUMA_NO_NODE -1
+#endif
+#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order))
+#endif
+#ifndef dev_alloc_page
+#define dev_alloc_page() dev_alloc_pages(0)
+#endif
+#if !defined(eth_skb_pad) && !defined(skb_put_padto)
+/**
+ *     __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size
+ *     @skb: buffer to pad
+ *     @len: minimal length
+ *
+ *     Pads up a buffer to ensure the trailing bytes exist and are
+ *     blanked. If the buffer already contains sufficient data it
+ *     is untouched. Otherwise it is extended. Returns zero on
+ *     success. The skb is freed on error.
+ */
+static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len)
+{
+	unsigned int size = skb->len;
+
+	if (unlikely(size < len)) {
+		len -= size;
+		if (skb_pad(skb, len))
+			return -ENOMEM;
+		__skb_put(skb, len);
+	}
+	return 0;
+}
+#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len)
+
+static inline int __kc_eth_skb_pad(struct sk_buff *skb)
+{
+	return __kc_skb_put_padto(skb, ETH_ZLEN);
+}
+#define eth_skb_pad(skb) __kc_eth_skb_pad(skb)
+#endif /* eth_skb_pad && skb_put_padto */
+
+#ifndef SKB_ALLOC_NAPI
+/* RHEL 7.2 backported napi_alloc_skb and friends */
+static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length)
+{
+	return netdev_alloc_skb_ip_align(napi->dev, length);
+}
+#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len)
+#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len)
+#endif /* SKB_ALLOC_NAPI */
+#define HAVE_CONFIG_PM_RUNTIME
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
+#define HAVE_RXFH_HASHFUNC
+#endif /* 6.7 < RHEL < 7.0 */
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
+#define HAVE_RXFH_HASHFUNC
+#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS
+#endif /* RHEL > 7.1 */
+#ifndef napi_schedule_irqoff
+#define napi_schedule_irqoff	napi_schedule
+#endif
+#ifndef READ_ONCE
+#define READ_ONCE(_x) ACCESS_ONCE(_x)
+#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
+#define HAVE_NDO_FDB_ADD_VID
+#endif
+#ifndef ETH_MODULE_SFF_8636
+#define ETH_MODULE_SFF_8636		0x3
+#endif
+#ifndef ETH_MODULE_SFF_8636_LEN
+#define ETH_MODULE_SFF_8636_LEN		256
+#endif
+#ifndef ETH_MODULE_SFF_8436
+#define ETH_MODULE_SFF_8436		0x4
+#endif
+#ifndef ETH_MODULE_SFF_8436_LEN
+#define ETH_MODULE_SFF_8436_LEN		256
+#endif
+#ifndef writel_relaxed
+#define writel_relaxed	writel
+#endif
+#else /* 3.19.0 */
+#define HAVE_NDO_FDB_ADD_VID
+#define HAVE_RXFH_HASHFUNC
+#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS
+#endif /* 3.19.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) )
+/* vlan_tx_xx functions got renamed to skb_vlan */
+#ifndef skb_vlan_tag_get
+#define skb_vlan_tag_get vlan_tx_tag_get
+#endif
+#ifndef skb_vlan_tag_present
+#define skb_vlan_tag_present vlan_tx_tag_present
+#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
+#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H
+#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
+#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
+#endif
+#else
+#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H
+#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
+#endif /* 3.20.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) )
+/* Definition for CONFIG_OF was introduced earlier */
+#if !defined(CONFIG_OF) && \
+    !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
+static inline struct device_node *
+pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; }
+#else /* !CONFIG_OF && RHEL < 7.3 */
+#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT
+#endif /* !CONFIG_OF && RHEL < 7.3 */
+#else /* < 4.0 */
+#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT
+#endif /* < 4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) )
+#ifndef NO_PTP_SUPPORT
+#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H
+#include <linux/timecounter.h>
+#else
+#include <linux/clocksource.h>
+#endif
+static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta)
+{
+	tc->nsec += delta;
+}
+
+static inline struct net_device *
+of_find_net_device_by_node(struct device_node __always_unused *np)
+{
+	return NULL;
+}
+
+#define timecounter_adjtime __kc_timecounter_adjtime
+#endif
+#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) || \
+     (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))))
+#define HAVE_NDO_SET_VF_RSS_QUERY_EN
+#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
+#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
+#endif
+#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \
+      (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \
+      (SLE_VERSION_CODE > SLE_VERSION(12,1,0)))
+unsigned int _kc_cpumask_local_spread(unsigned int i, int node);
+#define cpumask_local_spread _kc_cpumask_local_spread
+#endif
+#else /* >= 4,1,0 */
+#define HAVE_PTP_CLOCK_INFO_GETTIME64
+#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
+#define HAVE_PASSTHRU_FEATURES_CHECK
+#define HAVE_NDO_SET_VF_RSS_QUERY_EN
+#define HAVE_NDO_SET_TX_MAXRATE
+#endif /* 4,1,0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9))
+#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \
+     !((SLE_VERSION_CODE == SLE_VERSION(11,3,0)) && \
+       (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0,47,71))) && \
+     !((SLE_VERSION_CODE == SLE_VERSION(11,4,0)) && \
+       (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65,0,0))) && \
+     !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0)))
+static inline bool page_is_pfmemalloc(struct page __maybe_unused *page)
+{
+#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC
+	return page->pfmemalloc;
+#else
+	return false;
+#endif
+}
+#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */
+#else
+#undef HAVE_STRUCT_PAGE_PFMEMALLOC
+#endif /* 4.1.9 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0))
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) && \
+     !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0)))
+#define ETHTOOL_RX_FLOW_SPEC_RING	0x00000000FFFFFFFFULL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF	0x000000FF00000000ULL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32
+static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
+{
+	return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
+};
+
+static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
+{
+	return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
+				ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+};
+#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
+#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
+#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \
+	RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \
+       RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)))
+static inline bool pci_ari_enabled(struct pci_bus *bus)
+{
+	return bus->self && bus->self->ari_enabled;
+}
+#endif /* !(RHEL6.8+ || RHEL7.2+) */
+#else
+static inline bool pci_ari_enabled(struct pci_bus *bus)
+{
+	return false;
+}
+#endif /* 2.6.27 */
+#else
+#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
+#define HAVE_VF_STATS
+#endif /* 4.2.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0))
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))
+#define HAVE_NDO_SET_VF_TRUST
+#endif /* (RHEL_RELEASE >= 7.3) */
+#ifndef CONFIG_64BIT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+#include <asm-generic/io-64-nonatomic-lo-hi.h>	/* 32-bit readq/writeq */
+#else /* 3.3.0 => 4.3.x */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
+#include <asm-generic/int-ll64.h>
+#endif /* 2.6.26 => 3.3.0 */
+#ifndef readq
+static inline __u64 readq(const volatile void __iomem *addr)
+{
+	const volatile u32 __iomem *p = addr;
+	u32 low, high;
+
+	low = readl(p);
+	high = readl(p + 1);
+
+	return low + ((u64)high << 32);
+}
+#define readq readq
+#endif
+
+#ifndef writeq
+static inline void writeq(__u64 val, volatile void __iomem *addr)
+{
+	writel(val, addr);
+	writel(val >> 32, addr + 4);
+}
+#define writeq writeq
+#endif
+#endif /* < 3.3.0 */
+#endif /* !CONFIG_64BIT */
+#else /* < 4.4.0 */
+#define HAVE_NDO_SET_VF_TRUST
+
+#ifndef CONFIG_64BIT
+#include <linux/io-64-nonatomic-lo-hi.h>	/* 32-bit readq/writeq */
+#endif /* !CONFIG_64BIT */
+#endif /* 4.4.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0))
+/* protect against a likely backport */
+#ifndef NETIF_F_CSUM_MASK
+#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM
+#endif /* NETIF_F_CSUM_MASK */
+#ifndef NETIF_F_SCTP_CRC
+#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM
+#endif /* NETIF_F_SCTP_CRC */
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)))
+#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address
+int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused,
+				     u8 *mac_addr __maybe_unused);
+#endif /* !(RHEL_RELEASE >= 7.3) */
+#else /* 4.5.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) )
+#define HAVE_GENEVE_RX_OFFLOAD
+#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE)
+#define HAVE_UDP_ENC_TUNNEL
+#endif
+#endif /* < 4.8.0 */
+#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD
+#endif /* 4.5.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0))
+#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,3))
+static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
+	return skb->head + skb->csum_start;
+#else /* < 2.6.22 */
+	return skb_transport_header(skb);
+#endif
+}
+#endif
+
+#if !(UBUNTU_VERSION_CODE && \
+		UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \
+	!(RHEL_RELEASE_CODE && \
+		(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \
+	!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0)))
+static inline void napi_consume_skb(struct sk_buff *skb,
+				    int __always_unused budget)
+{
+	dev_consume_skb_any(skb);
+}
+
+#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */
+#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \
+	!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
+static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
+{
+	* sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
+}
+#endif
+#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \
+	!(SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0)))
+static inline void page_ref_inc(struct page *page)
+{
+	get_page(page);
+}
+#else
+#define HAVE_PAGE_COUNT_BULK_UPDATE
+#endif
+#ifndef IPV4_USER_FLOW
+#define	IPV4_USER_FLOW	0x0d	/* spec only (usr_ip4_spec) */
+#endif
+
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
+#define HAVE_TC_SETUP_CLSFLOWER
+#define HAVE_TC_FLOWER_ENC
+#endif
+
+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) || \
+     (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)))
+#define HAVE_TC_SETUP_CLSU32
+#endif
+
+#if (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))
+#define HAVE_TC_SETUP_CLSFLOWER
+#endif
+
+#else /* >= 4.6.0 */
+#define HAVE_PAGE_COUNT_BULK_UPDATE
+#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC
+#define HAVE_PTP_CROSSTIMESTAMP
+#define HAVE_TC_SETUP_CLSFLOWER
+#define HAVE_TC_SETUP_CLSU32
+#endif /* 4.6.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0))
+#if ((SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) ||\
+     (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)))
+#define HAVE_NETIF_TRANS_UPDATE
+#endif /* SLES12sp3+ || RHEL7.4+ */
+#if ((UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) || \
+     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) || \
+     (SLE_VERSION_CODE >= SLE_VERSION(12,3,0)))
+#define HAVE_DEVLINK_SUPPORT
+#endif /* UBUNTU 4,4,0,21, RHEL 7.4, SLES12 SP3 */
+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\
+     (SLE_VERSION_CODE >= SLE_VERSION(12,3,0)))
+#define HAVE_ETHTOOL_25G_BITS
+#define HAVE_ETHTOOL_50G_BITS
+#define HAVE_ETHTOOL_100G_BITS
+#endif /* RHEL7.3+ || SLES12sp3+ */
+#else /* 4.7.0 */
+#define HAVE_DEVLINK_SUPPORT
+#define HAVE_NETIF_TRANS_UPDATE
+#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE
+#define HAVE_ETHTOOL_25G_BITS
+#define HAVE_ETHTOOL_50G_BITS
+#define HAVE_ETHTOOL_100G_BITS
+#define HAVE_TCF_MIRRED_REDIRECT
+#endif /* 4.7.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0))
+#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
+enum udp_parsable_tunnel_type {
+	UDP_TUNNEL_TYPE_VXLAN,
+	UDP_TUNNEL_TYPE_GENEVE,
+};
+struct udp_tunnel_info {
+	unsigned short type;
+	sa_family_t sa_family;
+	__be16 port;
+};
+#endif
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))
+#define HAVE_TCF_EXTS_TO_LIST
+#endif
+
+#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE < UBUNTU_VERSION(4,8,0,0))
+#define tc_no_actions(_exts) true
+#define tc_for_each_action(_a, _exts) while (0)
+#endif
+#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) &&\
+	!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
+static inline int
+#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME
+pci_request_io_regions(struct pci_dev *pdev, char *name)
+#else
+pci_request_io_regions(struct pci_dev *pdev, const char *name)
+#endif
+{
+	return pci_request_selected_regions(pdev,
+			    pci_select_bars(pdev, IORESOURCE_IO), name);
+}
+
+static inline void
+pci_release_io_regions(struct pci_dev *pdev)
+{
+	return pci_release_selected_regions(pdev,
+			    pci_select_bars(pdev, IORESOURCE_IO));
+}
+
+static inline int
+#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME
+pci_request_mem_regions(struct pci_dev *pdev, char *name)
+#else
+pci_request_mem_regions(struct pci_dev *pdev, const char *name)
+#endif
+{
+	return pci_request_selected_regions(pdev,
+			    pci_select_bars(pdev, IORESOURCE_MEM), name);
+}
+
+static inline void
+pci_release_mem_regions(struct pci_dev *pdev)
+{
+	return pci_release_selected_regions(pdev,
+			    pci_select_bars(pdev, IORESOURCE_MEM));
+}
+#endif /* !SLE_VERSION(12,3,0) */
+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ||\
+     (SLE_VERSION_CODE >= SLE_VERSION(12,3,0)))
+#define HAVE_ETHTOOL_NEW_50G_BITS
+#endif /* RHEL7.4+ || SLES12sp3+ */
+#else
+#define HAVE_UDP_ENC_RX_OFFLOAD
+#define HAVE_TCF_EXTS_TO_LIST
+#define HAVE_ETHTOOL_NEW_50G_BITS
+#endif /* 4.8.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0))
+#ifdef HAVE_TC_SETUP_CLSFLOWER
+#if (!(RHEL_RELEASE_CODE) && !(SLE_VERSION_CODE) || \
+    (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))))
+#define HAVE_TC_FLOWER_VLAN_IN_TAGS
+#endif /* !RHEL_RELEASE_CODE && !SLE_VERSION_CODE || <SLE_VERSION(12,3,0) */
+#endif /* HAVE_TC_SETUP_CLSFLOWER */
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
+#define HAVE_ETHTOOL_NEW_1G_BITS
+#define HAVE_ETHTOOL_NEW_10G_BITS
+#endif /* RHEL7.4+ */
+#if (!(SLE_VERSION_CODE) && !(RHEL_RELEASE_CODE)) || \
+     SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0)) || \
+     RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5))
+#define time_is_before_jiffies64(a)	time_after64(get_jiffies_64(), a)
+#endif /* !SLE_VERSION_CODE && !RHEL_RELEASE_CODE || (SLES <= 12.3.0) || (RHEL <= 7.5) */
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4))
+static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
+{
+	dst[0] = mask & ULONG_MAX;
+
+	if (sizeof(mask) > sizeof(unsigned long))
+		dst[1] = mask >> 32;
+}
+#endif /* <RHEL7.4 */
+#else /* >=4.9 */
+#define HAVE_FLOW_DISSECTOR_KEY_VLAN_PRIO
+#define HAVE_ETHTOOL_NEW_1G_BITS
+#define HAVE_ETHTOOL_NEW_10G_BITS
+#endif /* KERNEL_VERSION(4.9.0) */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
+/* SLES 12.3 and RHEL 7.5 backported this interface */
+#if (!SLE_VERSION_CODE && !RHEL_RELEASE_CODE) || \
+    (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \
+    (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)))
+static inline bool _kc_napi_complete_done2(struct napi_struct *napi,
+					   int __always_unused work_done)
+{
+	/* it was really hard to get napi_complete_done to be safe to call
+	 * recursively without running into our own kcompat, so just use
+	 * napi_complete
+	 */
+	napi_complete(napi);
+
+	/* true means that the stack is telling the driver to go-ahead and
+	 * re-enable interrupts
+	 */
+	return true;
+}
+
+#ifdef napi_complete_done
+#undef napi_complete_done
+#endif
+#define napi_complete_done _kc_napi_complete_done2
+#endif /* sles and rhel exclusion for < 4.10 */
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))
+#define HAVE_DEV_WALK_API
+#define HAVE_ETHTOOL_NEW_2500MB_BITS
+#define HAVE_ETHTOOL_5G_BITS
+#endif /* RHEL7.4+ */
+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12,3,0)))
+#define HAVE_STRUCT_DMA_ATTRS
+#endif /* (SLES == 12.3.0) */
+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0)))
+#define HAVE_NETDEVICE_MIN_MAX_MTU
+#endif /* (SLES >= 12.3.0) */
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))
+#define HAVE_STRUCT_DMA_ATTRS
+#define HAVE_RHEL7_EXTENDED_MIN_MAX_MTU
+#define HAVE_NETDEVICE_MIN_MAX_MTU
+#endif
+#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \
+     !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))))
+#ifndef dma_map_page_attrs
+#define dma_map_page_attrs __kc_dma_map_page_attrs
+static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev,
+						 struct page *page,
+						 size_t offset, size_t size,
+						 enum dma_data_direction dir,
+						 unsigned long __always_unused attrs)
+{
+	return dma_map_page(dev, page, offset, size, dir);
+}
+#endif
+
+#ifndef dma_unmap_page_attrs
+#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs
+static inline void __kc_dma_unmap_page_attrs(struct device *dev,
+					     dma_addr_t addr, size_t size,
+					     enum dma_data_direction dir,
+					     unsigned long __always_unused attrs)
+{
+	dma_unmap_page(dev, addr, size, dir);
+}
+#endif
+
+static inline void __page_frag_cache_drain(struct page *page,
+					   unsigned int count)
+{
+#ifdef HAVE_PAGE_COUNT_BULK_UPDATE
+	if (!page_ref_sub_and_test(page, count))
+		return;
+
+	init_page_count(page);
+#else
+	BUG_ON(count > 1);
+	if (!count)
+		return;
+#endif
+	__free_pages(page, compound_order(page));
+}
+#endif /* !SLE_VERSION(12,3,0) && !RHEL_VERSION(7,5) */
+#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) ||\
+     (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))
+#define HAVE_SWIOTLB_SKIP_CPU_SYNC
+#endif
+
+#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(15,0,0))) ||\
+     (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,4))))
+#define page_frag_free __free_page_frag
+#endif
+#ifndef ETH_MIN_MTU
+#define ETH_MIN_MTU 68
+#endif /* ETH_MIN_MTU */
+#else /* >= 4.10 */
+#define HAVE_TC_FLOWER_ENC
+#define HAVE_NETDEVICE_MIN_MAX_MTU
+#define HAVE_SWIOTLB_SKIP_CPU_SYNC
+#define HAVE_NETDEV_TC_RESETS_XPS
+#define HAVE_XPS_QOS_SUPPORT
+#define HAVE_DEV_WALK_API
+#define HAVE_ETHTOOL_NEW_2500MB_BITS
+#define HAVE_ETHTOOL_5G_BITS
+/* kernel 4.10 onwards, as part of busy_poll rewrite, new state were added
+ * which is part of NAPI:state. If NAPI:state=NAPI_STATE_IN_BUSY_POLL,
+ * it means napi_poll is invoked in busy_poll context
+ */
+#define HAVE_NAPI_STATE_IN_BUSY_POLL
+#define HAVE_TCF_MIRRED_EGRESS_REDIRECT
+#endif /* 4.10.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0))
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#define HAVE_NDO_BUSY_POLL
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) || \
+     (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))))
+#define HAVE_VOID_NDO_GET_STATS64
+#endif /* (SLES >= 12.3.0) && (RHEL >= 7.5) */
+
+static inline void _kc_dev_kfree_skb_irq(struct sk_buff *skb)
+{
+	if (!skb)
+		return;
+	dev_kfree_skb_irq(skb);
+}
+
+#undef dev_kfree_skb_irq
+#define dev_kfree_skb_irq _kc_dev_kfree_skb_irq
+
+static inline void _kc_dev_consume_skb_irq(struct sk_buff *skb)
+{
+	if (!skb)
+		return;
+	dev_consume_skb_irq(skb);
+}
+
+#undef dev_consume_skb_irq
+#define dev_consume_skb_irq _kc_dev_consume_skb_irq
+
+static inline void _kc_dev_kfree_skb_any(struct sk_buff *skb)
+{
+	if (!skb)
+		return;
+	dev_kfree_skb_any(skb);
+}
+
+#undef dev_kfree_skb_any
+#define dev_kfree_skb_any _kc_dev_kfree_skb_any
+
+static inline void _kc_dev_consume_skb_any(struct sk_buff *skb)
+{
+	if (!skb)
+		return;
+	dev_consume_skb_any(skb);
+}
+
+#undef dev_consume_skb_any
+#define dev_consume_skb_any _kc_dev_consume_skb_any
+
+#else /* > 4.11 */
+#define HAVE_VOID_NDO_GET_STATS64
+#define HAVE_VM_OPS_FAULT_NO_VMA
+#endif /* 4.11.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0))
+#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) || \
+     (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))
+#define HAVE_TCF_EXTS_HAS_ACTION
+#endif
+#define  PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0)))
+#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE
+#endif /* SLES >= 12sp4 */
+#else /* > 4.13 */
+#define HAVE_HWTSTAMP_FILTER_NTP_ALL
+#define HAVE_NDO_SETUP_TC_CHAIN_INDEX
+#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE
+#define HAVE_PTP_CLOCK_DO_AUX_WORK
+#endif /* 4.13.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0))
+#ifdef ETHTOOL_GLINKSETTINGS
+#ifndef ethtool_link_ksettings_del_link_mode
+#define ethtool_link_ksettings_del_link_mode(ptr, name, mode)		\
+	__clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
+#endif
+#endif /* ETHTOOL_GLINKSETTINGS */
+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0)))
+#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
+#endif
+
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))
+#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
+#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC
+#endif
+
+#define TIMER_DATA_TYPE		unsigned long
+#define TIMER_FUNC_TYPE		void (*)(TIMER_DATA_TYPE)
+
+#define timer_setup(timer, callback, flags)				\
+	__setup_timer((timer), (TIMER_FUNC_TYPE)(callback),		\
+		      (TIMER_DATA_TYPE)(timer), (flags))
+
+#define from_timer(var, callback_timer, timer_fieldname) \
+	container_of(callback_timer, typeof(*var), timer_fieldname)
+
+#ifndef xdp_do_flush_map
+#define xdp_do_flush_map() do {} while (0)
+#endif
+struct _kc_xdp_buff {
+	void *data;
+	void *data_end;
+	void *data_hard_start;
+};
+#define xdp_buff _kc_xdp_buff
+struct _kc_bpf_prog {
+};
+#define bpf_prog _kc_bpf_prog
+#ifndef DIV_ROUND_DOWN_ULL
+#define DIV_ROUND_DOWN_ULL(ll, d) \
+	({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
+#endif /* DIV_ROUND_DOWN_ULL */
+#else /* > 4.14 */
+#define HAVE_XDP_SUPPORT
+#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
+#define HAVE_TCF_EXTS_HAS_ACTION
+#endif /* 4.14.0 */
+
+/*****************************************************************************/
+#ifndef ETHTOOL_GLINKSETTINGS
+
+#define __ETHTOOL_LINK_MODE_MASK_NBITS 32
+#define ETHTOOL_LINK_MASK_SIZE BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS)
+
+/**
+ * struct ethtool_link_ksettings
+ * @link_modes: supported and advertising, single item arrays
+ * @link_modes.supported: bitmask of supported link speeds
+ * @link_modes.advertising: bitmask of currently advertised speeds
+ * @base: base link details
+ * @base.speed: current link speed
+ * @base.port: current port type
+ * @base.duplex: current duplex mode
+ * @base.autoneg: current autonegotiation settings
+ *
+ * This struct and the following macros provide a way to support the old
+ * ethtool get/set_settings API on older kernels, but in the style of the new
+ * GLINKSETTINGS API.  In this way, the same code can be used to support both
+ * APIs as seemlessly as possible.
+ *
+ * It should be noted the old API only has support up to the first 32 bits.
+ */
+struct ethtool_link_ksettings {
+	struct {
+		u32 speed;
+		u8 port;
+		u8 duplex;
+		u8 autoneg;
+	} base;
+	struct {
+		unsigned long supported[ETHTOOL_LINK_MASK_SIZE];
+		unsigned long advertising[ETHTOOL_LINK_MASK_SIZE];
+	} link_modes;
+};
+
+#define ETHTOOL_LINK_NAME_advertising(mode) ADVERTISED_ ## mode
+#define ETHTOOL_LINK_NAME_supported(mode) SUPPORTED_ ## mode
+#define ETHTOOL_LINK_NAME(name) ETHTOOL_LINK_NAME_ ## name
+#define ETHTOOL_LINK_CONVERT(name, mode) ETHTOOL_LINK_NAME(name)(mode)
+
+/**
+ * ethtool_link_ksettings_zero_link_mode
+ * @ptr: ptr to ksettings struct
+ * @name: supported or advertising
+ */
+#define ethtool_link_ksettings_zero_link_mode(ptr, name)\
+	(*((ptr)->link_modes.name) = 0x0)
+
+/**
+ * ethtool_link_ksettings_add_link_mode
+ * @ptr: ptr to ksettings struct
+ * @name: supported or advertising
+ * @mode: link mode to add
+ */
+#define ethtool_link_ksettings_add_link_mode(ptr, name, mode)\
+	(*((ptr)->link_modes.name) |= (typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode))
+
+/**
+ * ethtool_link_ksettings_del_link_mode
+ * @ptr: ptr to ksettings struct
+ * @name: supported or advertising
+ * @mode: link mode to delete
+ */
+#define ethtool_link_ksettings_del_link_mode(ptr, name, mode)\
+	(*((ptr)->link_modes.name) &= ~(typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode))
+
+/**
+ * ethtool_link_ksettings_test_link_mode
+ * @ptr: ptr to ksettings struct
+ * @name: supported or advertising
+ * @mode: link mode to add
+ */
+#define ethtool_link_ksettings_test_link_mode(ptr, name, mode)\
+	(!!(*((ptr)->link_modes.name) & ETHTOOL_LINK_CONVERT(name, mode)))
+
+/**
+ * _kc_ethtool_ksettings_to_cmd - Convert ethtool_link_ksettings to ethtool_cmd
+ * @ks: ethtool_link_ksettings struct
+ * @cmd: ethtool_cmd struct
+ *
+ * Convert an ethtool_link_ksettings structure into the older ethtool_cmd
+ * structure. We provide this in kcompat.h so that drivers can easily
+ * implement the older .{get|set}_settings as wrappers around the new api.
+ * Hence, we keep it prefixed with _kc_ to make it clear this isn't actually
+ * a real function in the kernel.
+ */
+static inline void
+_kc_ethtool_ksettings_to_cmd(struct ethtool_link_ksettings *ks,
+			     struct ethtool_cmd *cmd)
+{
+	cmd->supported = (u32)ks->link_modes.supported[0];
+	cmd->advertising = (u32)ks->link_modes.advertising[0];
+	ethtool_cmd_speed_set(cmd, ks->base.speed);
+	cmd->duplex = ks->base.duplex;
+	cmd->autoneg = ks->base.autoneg;
+	cmd->port = ks->base.port;
+}
+
+#endif /* !ETHTOOL_GLINKSETTINGS */
+
+/*****************************************************************************/
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \
+     (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \
+     (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5))))
+#define phy_speed_to_str _kc_phy_speed_to_str
+const char *_kc_phy_speed_to_str(int speed);
+#else /* (LINUX >= 4.14.0) || (SLES > 12.3.0) || (RHEL > 7.5) */
+#include <linux/phy.h>
+#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0))
+#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) || \
+     (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))))
+#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
+#define HAVE_TCF_BLOCK
+#else /* RHEL >= 7.6 || SLES >= 15.1 */
+#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO
+#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */
+void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst,
+				      struct ethtool_link_ksettings *src);
+#define ethtool_intersect_link_masks _kc_ethtool_intersect_link_masks
+#else /* >= 4.15 */
+#define HAVE_NDO_BPF
+#define HAVE_XDP_BUFF_DATA_META
+#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
+#define HAVE_TCF_BLOCK
+#endif /* 4.15.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0))
+#define pci_printk(level, pdev, fmt, arg...) \
+	dev_printk(level, &(pdev)->dev, fmt, ##arg)
+#define pci_emerg(pdev, fmt, arg...)	dev_emerg(&(pdev)->dev, fmt, ##arg)
+#define pci_alert(pdev, fmt, arg...)	dev_alert(&(pdev)->dev, fmt, ##arg)
+#define pci_crit(pdev, fmt, arg...)	dev_crit(&(pdev)->dev, fmt, ##arg)
+#define pci_err(pdev, fmt, arg...)	dev_err(&(pdev)->dev, fmt, ##arg)
+#define pci_warn(pdev, fmt, arg...)	dev_warn(&(pdev)->dev, fmt, ##arg)
+#define pci_notice(pdev, fmt, arg...)	dev_notice(&(pdev)->dev, fmt, ##arg)
+#define pci_info(pdev, fmt, arg...)	dev_info(&(pdev)->dev, fmt, ##arg)
+#define pci_dbg(pdev, fmt, arg...)	dev_dbg(&(pdev)->dev, fmt, ##arg)
+
+#ifndef array_index_nospec
+static inline unsigned long _kc_array_index_mask_nospec(unsigned long index,
+							unsigned long size)
+{
+	/*
+	 * Always calculate and emit the mask even if the compiler
+	 * thinks the mask is not needed. The compiler does not take
+	 * into account the value of @index under speculation.
+	 */
+	OPTIMIZER_HIDE_VAR(index);
+	return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1);
+}
+
+#define array_index_nospec(index, size)					\
+({									\
+	typeof(index) _i = (index);					\
+	typeof(size) _s = (size);					\
+	unsigned long _mask = _kc_array_index_mask_nospec(_i, _s);	\
+									\
+	BUILD_BUG_ON(sizeof(_i) > sizeof(long));			\
+	BUILD_BUG_ON(sizeof(_s) > sizeof(long));			\
+									\
+	(typeof(_i)) (_i & _mask);					\
+})
+#endif /* array_index_nospec */
+#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) && \
+     !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))))
+#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
+#include <net/pkt_cls.h>
+static inline bool
+tc_cls_can_offload_and_chain0(const struct net_device *dev,
+			      struct tc_cls_common_offload *common)
+{
+	if (!tc_can_offload(dev))
+		return false;
+	if (common->chain_index)
+		return false;
+
+	return true;
+}
+#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */
+#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */
+#ifndef sizeof_field
+#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->MEMBER)))
+#endif /* sizeof_field */
+#else /* >= 4.16 */
+#include <linux/nospec.h>
+#define HAVE_XDP_BUFF_RXQ
+#define HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK
+#define HAVE_TCF_MIRRED_DEV
+#define HAVE_VF_STATS_DROPPED
+#endif /* 4.16.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0))
+#include <linux/pci_regs.h>
+#include <linux/pci.h>
+#define PCIE_SPEED_16_0GT 0x17
+#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */
+#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */
+#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */
+void _kc_pcie_print_link_status(struct pci_dev *dev);
+#define pcie_print_link_status _kc_pcie_print_link_status
+#else /* >= 4.17.0 */
+#define HAVE_XDP_BUFF_IN_XDP_H
+#endif /* 4.17.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0))
+#ifdef NETIF_F_HW_L2FW_DOFFLOAD
+#include <linux/if_macvlan.h>
+#ifndef macvlan_supports_dest_filter
+#define macvlan_supports_dest_filter _kc_macvlan_supports_dest_filter
+static inline bool _kc_macvlan_supports_dest_filter(struct net_device *dev)
+{
+	struct macvlan_dev *macvlan = netdev_priv(dev);
+
+	return macvlan->mode == MACVLAN_MODE_PRIVATE ||
+	       macvlan->mode == MACVLAN_MODE_VEPA ||
+	       macvlan->mode == MACVLAN_MODE_BRIDGE;
+}
+#endif
+
+#if (!SLE_VERSION_CODE || (SLE_VERSION_CODE < SLE_VERSION(15,1,0)))
+#ifndef macvlan_accel_priv
+#define macvlan_accel_priv _kc_macvlan_accel_priv
+static inline void *_kc_macvlan_accel_priv(struct net_device *dev)
+{
+	struct macvlan_dev *macvlan = netdev_priv(dev);
+
+	return macvlan->fwd_priv;
+}
+#endif
+
+#ifndef macvlan_release_l2fw_offload
+#define macvlan_release_l2fw_offload _kc_macvlan_release_l2fw_offload
+static inline int _kc_macvlan_release_l2fw_offload(struct net_device *dev)
+{
+	struct macvlan_dev *macvlan = netdev_priv(dev);
+
+	macvlan->fwd_priv = NULL;
+	return dev_uc_add(macvlan->lowerdev, dev->dev_addr);
+}
+#endif
+#endif /* !SLES || SLES < 15.1 */
+#endif /* NETIF_F_HW_L2FW_DOFFLOAD */
+#include "kcompat_overflow.h"
+
+#if (SLE_VERSION_CODE < SLE_VERSION(15,1,0))
+#define firmware_request_nowarn	request_firmware_direct
+#endif /* !SLES || SLES < 15.1 */
+
+#else
+#include <linux/overflow.h>
+#include <net/xdp_sock.h>
+#define HAVE_XDP_FRAME_STRUCT
+#define HAVE_XDP_SOCK
+#define HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS
+#define NO_NDO_XDP_FLUSH
+#define HAVE_AF_XDP_SUPPORT
+#ifndef xdp_umem_get_data
+static inline char *__kc_xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
+{
+	return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
+}
+
+#define xdp_umem_get_data __kc_xdp_umem_get_data
+#endif /* !xdp_umem_get_data */
+#ifndef xdp_umem_get_dma
+static inline dma_addr_t __kc_xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
+{
+	return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
+}
+
+#define xdp_umem_get_dma __kc_xdp_umem_get_dma
+#endif /* !xdp_umem_get_dma */
+#endif /* 4.18.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0))
+#define bitmap_alloc(nbits, flags) \
+	kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags)
+#define bitmap_zalloc(nbits, flags) bitmap_alloc(nbits, ((flags) | __GFP_ZERO))
+#define bitmap_free(bitmap) kfree(bitmap)
+#ifdef ETHTOOL_GLINKSETTINGS
+#define ethtool_ks_clear(ptr, name) \
+	ethtool_link_ksettings_zero_link_mode(ptr, name)
+#define ethtool_ks_add_mode(ptr, name, mode) \
+	ethtool_link_ksettings_add_link_mode(ptr, name, mode)
+#define ethtool_ks_del_mode(ptr, name, mode) \
+	ethtool_link_ksettings_del_link_mode(ptr, name, mode)
+#define ethtool_ks_test(ptr, name, mode) \
+	ethtool_link_ksettings_test_link_mode(ptr, name, mode)
+#endif /* ETHTOOL_GLINKSETTINGS */
+#define HAVE_NETPOLL_CONTROLLER
+#define REQUIRE_PCI_CLEANUP_AER_ERROR_STATUS
+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))
+#define HAVE_TCF_MIRRED_DEV
+#define HAVE_NDO_SELECT_QUEUE_SB_DEV
+#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK
+#endif
+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) ||\
+     (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))
+#define HAVE_TCF_EXTS_FOR_EACH_ACTION
+#undef HAVE_TCF_EXTS_TO_LIST
+#endif /* RHEL8.0+ */
+#else /* >= 4.19.0 */
+#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK
+#define NO_NETDEV_BPF_PROG_ATTACHED
+#define HAVE_NDO_SELECT_QUEUE_SB_DEV
+#define HAVE_NETDEV_SB_DEV
+#undef HAVE_TCF_EXTS_TO_LIST
+#define HAVE_TCF_EXTS_FOR_EACH_ACTION
+#define HAVE_TCF_VLAN_TPID
+#endif /* 4.19.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0))
+#define HAVE_XDP_UMEM_PROPS
+#ifdef HAVE_AF_XDP_SUPPORT
+#ifndef napi_if_scheduled_mark_missed
+static inline bool __kc_napi_if_scheduled_mark_missed(struct napi_struct *n)
+{
+	unsigned long val, new;
+
+	do {
+		val = READ_ONCE(n->state);
+		if (val & NAPIF_STATE_DISABLE)
+			return true;
+
+		if (!(val & NAPIF_STATE_SCHED))
+			return false;
+
+		new = val | NAPIF_STATE_MISSED;
+	} while (cmpxchg(&n->state, val, new) != val);
+
+	return true;
+}
+
+#define napi_if_scheduled_mark_missed __kc_napi_if_scheduled_mark_missed
+#endif /* !napi_if_scheduled_mark_missed */
+#endif /* HAVE_AF_XDP_SUPPORT */
+#else /* >= 4.20.0 */
+#define HAVE_AF_XDP_ZC_SUPPORT
+#define HAVE_VXLAN_TYPE
+#endif /* 4.20.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0))
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,0)))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0))
+#define NETLINK_MAX_COOKIE_LEN	20
+struct netlink_ext_ack {
+	const char *_msg;
+	const struct nlattr *bad_attr;
+	u8 cookie[NETLINK_MAX_COOKIE_LEN];
+	u8 cookie_len;
+};
+
+#endif /* < 4.12 */
+static inline int _kc_dev_open(struct net_device *netdev,
+			       struct netlink_ext_ack __always_unused *extack)
+{
+	return dev_open(netdev);
+}
+
+#define dev_open _kc_dev_open
+#endif /* !(RHEL_RELEASE_CODE && RHEL > RHEL(8,0)) */
+#if (RHEL_RELEASE_CODE && \
+     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \
+      RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) || \
+     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1)))
+#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL
+#else /* RHEL >= 7.7 && RHEL < 8.0 || RHEL >= 8.1 */
+struct ptp_system_timestamp {
+	struct timespec64 pre_ts;
+	struct timespec64 post_ts;
+};
+
+static inline void
+ptp_read_system_prets(struct ptp_system_timestamp __always_unused *sts)
+{
+	;
+}
+
+static inline void
+ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts)
+{
+	;
+}
+#endif /* !(RHEL >= 7.7 && RHEL != 8.0) */
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1)))
+#define HAVE_NDO_BRIDGE_SETLINK_EXTACK
+#endif /* RHEL 8.1 */
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))
+#define HAVE_TC_INDIR_BLOCK
+#endif /* RHEL 8.2 */
+#else /* >= 5.0.0 */
+#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL
+#define HAVE_NDO_BRIDGE_SETLINK_EXTACK
+#define HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM
+#define HAVE_GENEVE_TYPE
+#define HAVE_TC_INDIR_BLOCK
+#endif /* 5.0.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0))
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1)))
+#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE
+#define HAVE_NDO_FDB_ADD_EXTACK
+#else /* RHEL < 8.1 */
+#ifdef HAVE_TC_SETUP_CLSFLOWER
+#include <net/pkt_cls.h>
+
+struct flow_match {
+	struct flow_dissector	*dissector;
+	void			*mask;
+	void			*key;
+};
+
+struct flow_match_basic {
+	struct flow_dissector_key_basic *key, *mask;
+};
+
+struct flow_match_control {
+	struct flow_dissector_key_control *key, *mask;
+};
+
+struct flow_match_eth_addrs {
+	struct flow_dissector_key_eth_addrs *key, *mask;
+};
+
+#ifdef HAVE_TC_FLOWER_ENC
+struct flow_match_enc_keyid {
+	struct flow_dissector_key_keyid *key, *mask;
+};
+#endif
+
+#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS
+struct flow_match_vlan {
+	struct flow_dissector_key_vlan *key, *mask;
+};
+#endif
+
+struct flow_match_ipv4_addrs {
+	struct flow_dissector_key_ipv4_addrs *key, *mask;
+};
+
+struct flow_match_ipv6_addrs {
+	struct flow_dissector_key_ipv6_addrs *key, *mask;
+};
+
+struct flow_match_ports {
+	struct flow_dissector_key_ports *key, *mask;
+};
+
+struct flow_rule {
+	struct flow_match	match;
+#if 0
+	/* In 5.1+ kernels, action is a member of struct flow_rule but is
+	 * not compatible with how we kcompat tc_cls_flower_offload_flow_rule
+	 * below.  By not declaring it here, any driver that attempts to use
+	 * action as an element of struct flow_rule will fail to compile
+	 * instead of silently trying to access memory that shouldn't be.
+	 */
+	struct flow_action	action;
+#endif
+};
+
+void flow_rule_match_basic(const struct flow_rule *rule,
+			   struct flow_match_basic *out);
+void flow_rule_match_control(const struct flow_rule *rule,
+			     struct flow_match_control *out);
+void flow_rule_match_eth_addrs(const struct flow_rule *rule,
+			       struct flow_match_eth_addrs *out);
+#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS
+void flow_rule_match_vlan(const struct flow_rule *rule,
+			  struct flow_match_vlan *out);
+#endif
+void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
+				struct flow_match_ipv4_addrs *out);
+void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
+				struct flow_match_ipv6_addrs *out);
+void flow_rule_match_ports(const struct flow_rule *rule,
+			   struct flow_match_ports *out);
+#ifdef HAVE_TC_FLOWER_ENC
+void flow_rule_match_enc_ports(const struct flow_rule *rule,
+			       struct flow_match_ports *out);
+void flow_rule_match_enc_control(const struct flow_rule *rule,
+				 struct flow_match_control *out);
+void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
+				    struct flow_match_ipv4_addrs *out);
+void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
+				    struct flow_match_ipv6_addrs *out);
+void flow_rule_match_enc_keyid(const struct flow_rule *rule,
+			       struct flow_match_enc_keyid *out);
+#endif
+
+static inline struct flow_rule *
+tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd)
+{
+	return (struct flow_rule *)&tc_flow_cmd->dissector;
+}
+
+static inline bool flow_rule_match_key(const struct flow_rule *rule,
+				       enum flow_dissector_key_id key)
+{
+	return dissector_uses_key(rule->match.dissector, key);
+}
+#endif /* HAVE_TC_SETUP_CLSFLOWER */
+
+#endif /* RHEL < 8.1 */
+#else /* >= 5.1.0 */
+#define HAVE_NDO_FDB_ADD_EXTACK
+#define NO_XDP_QUERY_XSK_UMEM
+#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE
+#define HAVE_TC_FLOWER_ENC_IP
+#endif /* 5.1.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0))
+#if (defined HAVE_SKB_XMIT_MORE) && \
+(!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))))
+#define netdev_xmit_more()	(skb->xmit_more)
+#else
+#define netdev_xmit_more()	(0)
+#endif
+
+#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))))
+#ifndef eth_get_headlen
+static inline u32
+__kc_eth_get_headlen(const struct net_device __always_unused *dev, void *data,
+		     unsigned int len)
+{
+	return eth_get_headlen(data, len);
+}
+
+#define eth_get_headlen(dev, data, len) __kc_eth_get_headlen(dev, data, len)
+#endif /* !eth_get_headlen */
+#endif /* !RHEL >= 8.2 */
+
+#ifndef mmiowb
+#ifdef CONFIG_IA64
+#define mmiowb() asm volatile ("mf.a" ::: "memory")
+#else
+#define mmiowb()
+#endif
+#endif /* mmiowb */
+
+#else /* >= 5.2.0 */
+#define HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED
+#define SPIN_UNLOCK_IMPLIES_MMIOWB
+#endif /* 5.2.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0))
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))
+#define flow_block_offload tc_block_offload
+#define flow_block_command tc_block_command
+#define flow_cls_offload tc_cls_flower_offload
+#define flow_block_binder_type tcf_block_binder_type
+#define flow_cls_common_offload tc_cls_common_offload
+#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule
+#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE
+#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY
+#define FLOW_CLS_STATS TC_CLSFLOWER_STATS
+#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE
+#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY
+#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS \
+		TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
+#define FLOW_BLOCK_BIND TC_BLOCK_BIND
+#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND
+
+#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
+#include <net/pkt_cls.h>
+
+int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f,
+				   struct list_head *driver_list,
+				   tc_setup_cb_t *cb,
+				   void *cb_ident, void *cb_priv,
+				   bool ingress_only);
+
+#define flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \
+				   ingress_only) \
+	_kc_flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \
+				       ingress_only)
+#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */
+#else /* RHEL >= 8.2 */
+#define HAVE_FLOW_BLOCK_API
+#endif /* RHEL >= 8.2 */
+#else /* >= 5.3.0 */
+#define XSK_UMEM_RETURNS_XDP_DESC
+#define HAVE_FLOW_BLOCK_API
+#endif /* 5.3.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0))
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) && \
+     !(SLE_VERSION_CODE >= SLE_VERSION(15,2,0)))
+static inline unsigned int skb_frag_off(const skb_frag_t *frag)
+{
+	return frag->page_offset;
+}
+
+static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
+{
+	frag->page_offset += delta;
+}
+#define __flow_indr_block_cb_register __tc_indr_block_cb_register
+#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister
+#endif /* !(RHEL >= 8.2) && !(SLES >= 15sp2) */
+#if (SLE_VERSION_CODE >= SLE_VERSION(15,2,0))
+#define HAVE_NDO_XSK_WAKEUP
+#endif /* SLES15sp2 */
+#else /* >= 5.4.0 */
+#define HAVE_NDO_XSK_WAKEUP
+#endif /* 5.4.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0))
+#else /* >= 5.6.0 */
+#define HAVE_TX_TIMEOUT_TXQUEUE
+#endif /* 5.6.0 */
+
+#endif /* _KCOMPAT_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/kcompat_overflow.h b/drivers/net/ethernet/intel/i40e/kcompat_overflow.h
new file mode 100644
index 000000000..b7848fed0
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/kcompat_overflow.h
@@ -0,0 +1,319 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
+
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+#ifndef __LINUX_OVERFLOW_H
+#define __LINUX_OVERFLOW_H
+
+#include <linux/compiler.h>
+
+/*
+ * In the fallback code below, we need to compute the minimum and
+ * maximum values representable in a given type. These macros may also
+ * be useful elsewhere, so we provide them outside the
+ * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
+ *
+ * It would seem more obvious to do something like
+ *
+ * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
+ * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
+ *
+ * Unfortunately, the middle expressions, strictly speaking, have
+ * undefined behaviour, and at least some versions of gcc warn about
+ * the type_max expression (but not if -fsanitize=undefined is in
+ * effect; in that case, the warning is deferred to runtime...).
+ *
+ * The slightly excessive casting in type_min is to make sure the
+ * macros also produce sensible values for the exotic type _Bool. [The
+ * overflow checkers only almost work for _Bool, but that's
+ * a-feature-not-a-bug, since people shouldn't be doing arithmetic on
+ * _Bools. Besides, the gcc builtins don't allow _Bool* as third
+ * argument.]
+ *
+ * Idea stolen from
+ * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
+ * credit to Christian Biere.
+ */
+/* The is_signed_type macro is redefined in a few places in various kernel
+ * headers. If this header is included at the same time as one of those, we
+ * will generate compilation warnings. Since we can't fix every old kernel,
+ * rename is_signed_type for this file to _kc_is_signed_type. This prevents
+ * the macro name collision, and should be safe since our drivers do not
+ * directly call the macro.
+ */
+#define _kc_is_signed_type(type)       (((type)(-1)) < (type)1)
+#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - _kc_is_signed_type(type)))
+#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
+#define type_min(T) ((T)((T)-type_max(T)-(T)1))
+
+
+#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
+/*
+ * For simplicity and code hygiene, the fallback code below insists on
+ * a, b and *d having the same type (similar to the min() and max()
+ * macros), whereas gcc's type-generic overflow checkers accept
+ * different types. Hence we don't just make check_add_overflow an
+ * alias for __builtin_add_overflow, but add type checks similar to
+ * below.
+ */
+#define check_add_overflow(a, b, d) ({		\
+	typeof(a) __a = (a);			\
+	typeof(b) __b = (b);			\
+	typeof(d) __d = (d);			\
+	(void) (&__a == &__b);			\
+	(void) (&__a == __d);			\
+	__builtin_add_overflow(__a, __b, __d);	\
+})
+
+#define check_sub_overflow(a, b, d) ({		\
+	typeof(a) __a = (a);			\
+	typeof(b) __b = (b);			\
+	typeof(d) __d = (d);			\
+	(void) (&__a == &__b);			\
+	(void) (&__a == __d);			\
+	__builtin_sub_overflow(__a, __b, __d);	\
+})
+
+#define check_mul_overflow(a, b, d) ({		\
+	typeof(a) __a = (a);			\
+	typeof(b) __b = (b);			\
+	typeof(d) __d = (d);			\
+	(void) (&__a == &__b);			\
+	(void) (&__a == __d);			\
+	__builtin_mul_overflow(__a, __b, __d);	\
+})
+
+#else
+
+
+/* Checking for unsigned overflow is relatively easy without causing UB. */
+#define __unsigned_add_overflow(a, b, d) ({	\
+	typeof(a) __a = (a);			\
+	typeof(b) __b = (b);			\
+	typeof(d) __d = (d);			\
+	(void) (&__a == &__b);			\
+	(void) (&__a == __d);			\
+	*__d = __a + __b;			\
+	*__d < __a;				\
+})
+#define __unsigned_sub_overflow(a, b, d) ({	\
+	typeof(a) __a = (a);			\
+	typeof(b) __b = (b);			\
+	typeof(d) __d = (d);			\
+	(void) (&__a == &__b);			\
+	(void) (&__a == __d);			\
+	*__d = __a - __b;			\
+	__a < __b;				\
+})
+/*
+ * If one of a or b is a compile-time constant, this avoids a division.
+ */
+#define __unsigned_mul_overflow(a, b, d) ({		\
+	typeof(a) __a = (a);				\
+	typeof(b) __b = (b);				\
+	typeof(d) __d = (d);				\
+	(void) (&__a == &__b);				\
+	(void) (&__a == __d);				\
+	*__d = __a * __b;				\
+	__builtin_constant_p(__b) ?			\
+	  __b > 0 && __a > type_max(typeof(__a)) / __b : \
+	  __a > 0 && __b > type_max(typeof(__b)) / __a;	 \
+})
+
+/*
+ * For signed types, detecting overflow is much harder, especially if
+ * we want to avoid UB. But the interface of these macros is such that
+ * we must provide a result in *d, and in fact we must produce the
+ * result promised by gcc's builtins, which is simply the possibly
+ * wrapped-around value. Fortunately, we can just formally do the
+ * operations in the widest relevant unsigned type (u64) and then
+ * truncate the result - gcc is smart enough to generate the same code
+ * with and without the (u64) casts.
+ */
+
+/*
+ * Adding two signed integers can overflow only if they have the same
+ * sign, and overflow has happened iff the result has the opposite
+ * sign.
+ */
+#define __signed_add_overflow(a, b, d) ({	\
+	typeof(a) __a = (a);			\
+	typeof(b) __b = (b);			\
+	typeof(d) __d = (d);			\
+	(void) (&__a == &__b);			\
+	(void) (&__a == __d);			\
+	*__d = (u64)__a + (u64)__b;		\
+	(((~(__a ^ __b)) & (*__d ^ __a))	\
+		& type_min(typeof(__a))) != 0;	\
+})
+
+/*
+ * Subtraction is similar, except that overflow can now happen only
+ * when the signs are opposite. In this case, overflow has happened if
+ * the result has the opposite sign of a.
+ */
+#define __signed_sub_overflow(a, b, d) ({	\
+	typeof(a) __a = (a);			\
+	typeof(b) __b = (b);			\
+	typeof(d) __d = (d);			\
+	(void) (&__a == &__b);			\
+	(void) (&__a == __d);			\
+	*__d = (u64)__a - (u64)__b;		\
+	((((__a ^ __b)) & (*__d ^ __a))		\
+		& type_min(typeof(__a))) != 0;	\
+})
+
+/*
+ * Signed multiplication is rather hard. gcc always follows C99, so
+ * division is truncated towards 0. This means that we can write the
+ * overflow check like this:
+ *
+ * (a > 0 && (b > MAX/a || b < MIN/a)) ||
+ * (a < -1 && (b > MIN/a || b < MAX/a) ||
+ * (a == -1 && b == MIN)
+ *
+ * The redundant casts of -1 are to silence an annoying -Wtype-limits
+ * (included in -Wextra) warning: When the type is u8 or u16, the
+ * __b_c_e in check_mul_overflow obviously selects
+ * __unsigned_mul_overflow, but unfortunately gcc still parses this
+ * code and warns about the limited range of __b.
+ */
+
+#define __signed_mul_overflow(a, b, d) ({				\
+	typeof(a) __a = (a);						\
+	typeof(b) __b = (b);						\
+	typeof(d) __d = (d);						\
+	typeof(a) __tmax = type_max(typeof(a));				\
+	typeof(a) __tmin = type_min(typeof(a));				\
+	(void) (&__a == &__b);						\
+	(void) (&__a == __d);						\
+	*__d = (u64)__a * (u64)__b;					\
+	(__b > 0   && (__a > __tmax/__b || __a < __tmin/__b)) ||	\
+	(__b < (typeof(__b))-1  && (__a > __tmin/__b || __a < __tmax/__b)) || \
+	(__b == (typeof(__b))-1 && __a == __tmin);			\
+})
+
+
+#define check_add_overflow(a, b, d)					\
+	__builtin_choose_expr(_kc_is_signed_type(typeof(a)),		\
+			__signed_add_overflow(a, b, d),			\
+			__unsigned_add_overflow(a, b, d))
+
+#define check_sub_overflow(a, b, d)					\
+	__builtin_choose_expr(_kc_is_signed_type(typeof(a)),		\
+			__signed_sub_overflow(a, b, d),			\
+			__unsigned_sub_overflow(a, b, d))
+
+#define check_mul_overflow(a, b, d)					\
+	__builtin_choose_expr(_kc_is_signed_type(typeof(a)),		\
+			__signed_mul_overflow(a, b, d),			\
+			__unsigned_mul_overflow(a, b, d))
+
+
+#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
+
+/** check_shl_overflow() - Calculate a left-shifted value and check overflow
+ *
+ * @a: Value to be shifted
+ * @s: How many bits left to shift
+ * @d: Pointer to where to store the result
+ *
+ * Computes *@d = (@a << @s)
+ *
+ * Returns true if '*d' cannot hold the result or when 'a << s' doesn't
+ * make sense. Example conditions:
+ * - 'a << s' causes bits to be lost when stored in *d.
+ * - 's' is garbage (e.g. negative) or so large that the result of
+ *   'a << s' is guaranteed to be 0.
+ * - 'a' is negative.
+ * - 'a << s' sets the sign bit, if any, in '*d'.
+ *
+ * '*d' will hold the results of the attempted shift, but is not
+ * considered "safe for use" if false is returned.
+ */
+#define check_shl_overflow(a, s, d) ({					\
+	typeof(a) _a = a;						\
+	typeof(s) _s = s;						\
+	typeof(d) _d = d;						\
+	u64 _a_full = _a;						\
+	unsigned int _to_shift =					\
+		_s >= 0 && _s < 8 * sizeof(*d) ? _s : 0;		\
+	*_d = (_a_full << _to_shift);					\
+	(_to_shift != _s || *_d < 0 || _a < 0 ||			\
+		(*_d >> _to_shift) != _a);				\
+})
+
+/**
+ * array_size() - Calculate size of 2-dimensional array.
+ *
+ * @a: dimension one
+ * @b: dimension two
+ *
+ * Calculates size of 2-dimensional array: @a * @b.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+static inline __must_check size_t array_size(size_t a, size_t b)
+{
+	size_t bytes;
+
+	if (check_mul_overflow(a, b, &bytes))
+		return SIZE_MAX;
+
+	return bytes;
+}
+
+/**
+ * array3_size() - Calculate size of 3-dimensional array.
+ *
+ * @a: dimension one
+ * @b: dimension two
+ * @c: dimension three
+ *
+ * Calculates size of 3-dimensional array: @a * @b * @c.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
+{
+	size_t bytes;
+
+	if (check_mul_overflow(a, b, &bytes))
+		return SIZE_MAX;
+	if (check_mul_overflow(bytes, c, &bytes))
+		return SIZE_MAX;
+
+	return bytes;
+}
+
+static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c)
+{
+	size_t bytes;
+
+	if (check_mul_overflow(n, size, &bytes))
+		return SIZE_MAX;
+	if (check_add_overflow(bytes, c, &bytes))
+		return SIZE_MAX;
+
+	return bytes;
+}
+
+/**
+ * struct_size() - Calculate size of structure with trailing array.
+ * @p: Pointer to the structure.
+ * @member: Name of the array member.
+ * @n: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure @p followed by an
+ * array of @n @member elements.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size(p, member, n)					\
+	__ab_c_size(n,							\
+		    sizeof(*(p)->member) + __must_be_array((p)->member),\
+		    sizeof(*(p)))
+
+#endif /* __LINUX_OVERFLOW_H */
diff --git a/drivers/net/ethernet/intel/i40e/kcompat_vfd.c b/drivers/net/ethernet/intel/i40e/kcompat_vfd.c
new file mode 100644
index 000000000..ce206f10e
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/kcompat_vfd.c
@@ -0,0 +1,2550 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
+
+#include "kcompat.h"
+#include "kcompat_vfd.h"
+
+#define to_dev(obj) container_of(obj, struct device, kobj)
+
+const struct vfd_ops *vfd_ops = NULL;
+
+/**
+ * __get_pf_pdev - helper function to get the pdev
+ * @kobj:	kobject passed
+ * @pdev:	PCI device information struct
+ */
+static int __get_pf_pdev(struct kobject *kobj, struct pci_dev **pdev)
+{
+	struct device *dev;
+
+	if (!kobj->parent)
+		return -EINVAL;
+
+	/* get pdev */
+	dev = to_dev(kobj->parent);
+	*pdev = to_pci_dev(dev);
+
+	return 0;
+}
+
+/**
+ * __get_pdev_and_vfid - helper function to get the pdev and the vf id
+ * @kobj:	kobject passed
+ * @pdev:	PCI device information struct
+ * @vf_id:	VF id of the VF under consideration
+ */
+static int __get_pdev_and_vfid(struct kobject *kobj, struct pci_dev **pdev,
+			       int *vf_id)
+{
+	struct device *dev;
+
+	if (!kobj->parent->parent)
+		return -EINVAL;
+
+	/* get pdev */
+	dev = to_dev(kobj->parent->parent);
+	*pdev = to_pci_dev(dev);
+
+	/* get vf_id */
+	if (kstrtoint(kobj->name, 10, vf_id) != 0) {
+		dev_err(&(*pdev)->dev, "Failed to convert %s to vf_id\n",
+			kobj->name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * __parse_bool_data - helper function to parse boolean data
+ * @pdev:	PCI device information struct
+ * @buff:	buffer with input data
+ * @attr_name:	name of the attribute
+ * @data:	pointer to output data
+ */
+static int __parse_bool_data(struct pci_dev *pdev, const char *buff,
+			     const char *attr_name, bool *data)
+{
+	if (sysfs_streq("on", buff)) {
+		*data = true;
+	} else if (sysfs_streq("off", buff)) {
+		*data = false;
+	} else {
+		dev_err(&pdev->dev, "set %s: invalid input string", attr_name);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * __parse_egress_ingress_input - helper function for ingress/egress_mirror attributes
+ * @pdev:	PCI device information struct
+ * @buff:	buffer with input data
+ * @attr_name:	name of the attribute
+ * @data_new:	pointer to input data merged with the old data
+ * @data_old:	pointer to old data of the attribute
+ *
+ * Get the input data for egress_mirror or ingress_mirror attribute in the form
+ * "add <number>" or "rem <number>".
+ * Set the output data to off if in "rem <number>", <number> matches old data.
+ *
+ */
+static int __parse_egress_ingress_input(struct pci_dev *pdev, const char *buff,
+					const char *attr_name, int *data_new,
+					int *data_old)
+{
+	int ret = 0;
+	char *p;
+
+	if (strstr(buff, "add")) {
+		p = strstr(buff, "add");
+
+		ret = kstrtoint(p + sizeof("add"), 10, data_new);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"add %s: input error %d\n", attr_name, ret);
+			return ret;
+		}
+	} else if (strstr(buff, "rem")) {
+		p = strstr(buff, "rem");
+
+		ret = kstrtoint(p + sizeof("rem"), 10, data_new);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"rem %s: input error %d\n", attr_name, ret);
+			return ret;
+		}
+
+		if (*data_new == *data_old) {
+			if (!strcmp(attr_name, "egress_mirror"))
+				*data_new = VFD_EGRESS_MIRROR_OFF;
+			else if (!strcmp(attr_name, "ingress_mirror"))
+				*data_new = VFD_INGRESS_MIRROR_OFF;
+		} else {
+			dev_err(&pdev->dev,
+				"rem %s: input doesn't match current value",
+				attr_name);
+			return -EINVAL;
+		}
+	} else {
+		dev_err(&pdev->dev, "set %s: invalid input string", attr_name);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+/**
+ * __parse_add_rem_bitmap - helper function to parse bitmap data
+ * @pdev:	PCI device information struct
+ * @buff:	buffer with input data
+ * @attr_name:	name of the attribute
+ * @data_new:	pointer to input data merged with the old data
+ * @data_old:	pointer to old data of the attribute
+ *
+ * If passed add: set data_new to "data_old || data_input"
+ * If passed rem: set data_new to "data_old || ~data_input"
+ */
+static int __parse_add_rem_bitmap(struct pci_dev *pdev, const char *buff,
+				  const char *attr_name,
+				  unsigned long *data_new,
+				  unsigned long *data_old)
+{
+	int ret = 0;
+	char *p;
+
+	if (strstr(buff, "add")) {
+		p = strstr(buff, "add");
+		bitmap_zero(data_new, VLAN_N_VID);
+
+		ret = bitmap_parselist(p + sizeof("add"), data_new, VLAN_N_VID);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"add %s: input error %d\n", attr_name, ret);
+			return ret;
+		}
+
+		bitmap_or(data_new, data_new, data_old, VLAN_N_VID);
+	} else if (strstr(buff, "rem")) {
+		p = strstr(buff, "rem");
+		bitmap_zero(data_new, VLAN_N_VID);
+
+		ret = bitmap_parselist(p + sizeof("rem"), data_new, VLAN_N_VID);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"rem %s: input error %d\n", attr_name, ret);
+			return ret;
+		}
+
+		/* new = old & ~rem */
+		bitmap_andnot(data_new, data_old, data_new, VLAN_N_VID);
+	} else {
+		dev_err(&pdev->dev, "set %s: invalid input string", attr_name);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * __parse_promisc_input - helper function for promisc attributes
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ * @cmd:	return pointer to cmd into buff
+ * @subcmd:	return pointer to subcmd into buff
+ *
+ * Get the input data for promisc attributes in the form "add/rem mcast/ucast".
+ */
+static int __parse_promisc_input(const char *buff, size_t count,
+				 const char **cmd, const char **subcmd)
+{
+	size_t idx = 0;
+
+	/* Remove start spaces */
+	while (buff[idx] == ' ' && idx < count)
+		idx++;
+
+	/* Parse cmd */
+	if (strncmp(&buff[idx], "add", strlen("add")) == 0) {
+		*cmd = &buff[idx];
+		idx += strlen("add");
+	} else if (strncmp(&buff[idx], "rem", strlen("rem")) == 0) {
+		*cmd = &buff[idx];
+		idx += strlen("rem");
+	} else {
+		return -EINVAL;
+	}
+
+	if (buff[idx++] != ' ')
+		return -EINVAL;
+
+	/* Remove spaces between cmd */
+	while (buff[idx] == ' ' && idx < count)
+		idx++;
+
+	/* Parse subcmd */
+	if (strncmp(&buff[idx], "ucast", strlen("ucast")) == 0) {
+		*subcmd = &buff[idx];
+		idx += strlen("ucast");
+	} else if (strncmp(&buff[idx], "mcast", strlen("mcast")) == 0) {
+		*subcmd = &buff[idx];
+		idx += strlen("mcast");
+	} else {
+		return -EINVAL;
+	}
+
+	/* Remove spaces after subcmd */
+	while ((buff[idx] == ' ' || buff[idx] == '\n') && idx < count)
+		idx++;
+
+	if (idx != count)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* Handlers for each VFd operation */
+
+/**
+ * vfd_trunk_show - handler for trunk show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ *
+ * Get current data from driver and copy to buffer
+ **/
+static ssize_t vfd_trunk_show(struct kobject *kobj,
+			      struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+
+	DECLARE_BITMAP(data, VLAN_N_VID);
+	bitmap_zero(data, VLAN_N_VID);
+
+	if (!vfd_ops->get_trunk)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_trunk(pdev, vf_id, data);
+	if (ret)
+		ret = bitmap_print_to_pagebuf(1, buff, data, VLAN_N_VID);
+
+	return ret;
+}
+
+/**
+ * vfd_trunk_store - handler for trunk store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ *
+ * Get current data from driver, compose new data based on input values
+ * depending on "add" or "rem" command, and pass new data to the driver to set.
+ *
+ * On success return count, indicating that we used the whole buffer. On
+ * failure return a negative error condition.
+ **/
+static ssize_t vfd_trunk_store(struct kobject *kobj,
+			       struct kobj_attribute *attr,
+			       const char *buff, size_t count)
+{
+	unsigned long *data_old, *data_new;
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+
+	if (!vfd_ops->set_trunk || !vfd_ops->get_trunk)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	data_old = kcalloc(BITS_TO_LONGS(VLAN_N_VID), sizeof(unsigned long),
+			   GFP_KERNEL);
+	if (!data_old)
+		return -ENOMEM;
+	data_new = kcalloc(BITS_TO_LONGS(VLAN_N_VID), sizeof(unsigned long),
+			   GFP_KERNEL);
+	if (!data_new) {
+		kfree(data_old);
+		return -ENOMEM;
+	}
+
+	ret = vfd_ops->get_trunk(pdev, vf_id, data_old);
+	if (ret < 0)
+		goto err_free;
+
+	ret = __parse_add_rem_bitmap(pdev, buff, "trunk", data_new, data_old);
+	if (ret)
+		goto err_free;
+
+	if (!bitmap_equal(data_new, data_old, VLAN_N_VID))
+		ret = vfd_ops->set_trunk(pdev, vf_id, data_new);
+
+err_free:
+	kfree(data_old);
+	kfree(data_new);
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_vlan_mirror_show - handler for vlan_mirror show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ *
+ * Get current data from driver and copy to buffer
+ **/
+static ssize_t vfd_vlan_mirror_show(struct kobject *kobj,
+				    struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+
+	DECLARE_BITMAP(data, VLAN_N_VID);
+	bitmap_zero(data, VLAN_N_VID);
+
+	if (!vfd_ops->get_vlan_mirror)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_vlan_mirror(pdev, vf_id, data);
+	if (ret)
+		ret = bitmap_print_to_pagebuf(1, buff, data, VLAN_N_VID);
+
+	return ret;
+}
+
+/**
+ * vfd_vlan_mirror_store - handler for vlan_mirror store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ *
+ * Get current data from driver, compose new data based on input values
+ * depending on "add" or "rem" command, and pass new data to the driver to set.
+ *
+ * On success return count, indicating that we used the whole buffer. On
+ * failure return a negative error condition.
+ **/
+static ssize_t vfd_vlan_mirror_store(struct kobject *kobj,
+				     struct kobj_attribute *attr,
+				     const char *buff, size_t count)
+{
+	unsigned long *data_old, *data_new;
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+
+	if (!vfd_ops->set_vlan_mirror || !vfd_ops->get_vlan_mirror)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	data_old = kcalloc(BITS_TO_LONGS(VLAN_N_VID), sizeof(unsigned long),
+			   GFP_KERNEL);
+	if (!data_old)
+		return -ENOMEM;
+	data_new = kcalloc(BITS_TO_LONGS(VLAN_N_VID), sizeof(unsigned long),
+			   GFP_KERNEL);
+	if (!data_new) {
+		kfree(data_old);
+		return -ENOMEM;
+	}
+
+	ret = vfd_ops->get_vlan_mirror(pdev, vf_id, data_old);
+	if (ret < 0)
+		goto err_free;
+
+	ret = __parse_add_rem_bitmap(pdev, buff, "vlan_mirror",
+				     data_new, data_old);
+	if (ret)
+		goto err_free;
+
+	if (!bitmap_equal(data_new, data_old, VLAN_N_VID))
+		ret = vfd_ops->set_vlan_mirror(pdev, vf_id, data_new);
+
+err_free:
+	kfree(data_old);
+	kfree(data_new);
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_egress_mirror_show - handler for egress_mirror show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_egress_mirror_show(struct kobject *kobj,
+				      struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	int data;
+
+	if (!vfd_ops->get_egress_mirror)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_egress_mirror(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	if (data == VFD_EGRESS_MIRROR_OFF)
+		ret = scnprintf(buff, PAGE_SIZE, "off\n");
+	else
+		ret = scnprintf(buff, PAGE_SIZE, "%u\n", data);
+
+	return ret;
+}
+
+/**
+ * vfd_egress_mirror_store - handler for egress_mirror store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t vfd_egress_mirror_store(struct kobject *kobj,
+				       struct kobj_attribute *attr,
+				       const char *buff, size_t count)
+{
+	int data_new, data_old;
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+
+	if (!vfd_ops->set_egress_mirror || !vfd_ops->get_egress_mirror)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_egress_mirror(pdev, vf_id, &data_old);
+	if (ret < 0)
+		return ret;
+
+	ret = __parse_egress_ingress_input(pdev, buff, "egress_mirror",
+					   &data_new, &data_old);
+	if (ret)
+		return ret;
+	if(data_new == vf_id) {
+		dev_err(&pdev->dev, "VF %d: Setting egress_mirror to itself is not allowed\n", vf_id);
+		return -EINVAL;
+	}
+
+	if (data_new != data_old)
+		ret = vfd_ops->set_egress_mirror(pdev, vf_id, data_new);
+
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_ingress_mirror_show - handler for ingress_mirror show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_ingress_mirror_show(struct kobject *kobj,
+				       struct kobj_attribute *attr,
+				       char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	int data;
+
+	if (!vfd_ops->get_ingress_mirror)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_ingress_mirror(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	if (data == VFD_INGRESS_MIRROR_OFF)
+		ret = scnprintf(buff, PAGE_SIZE, "off\n");
+	else
+		ret = scnprintf(buff, PAGE_SIZE, "%u\n", data);
+
+	return ret;
+}
+
+/**
+ * vfd_ingress_mirror_store - handler for ingress_mirror store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t vfd_ingress_mirror_store(struct kobject *kobj,
+					struct kobj_attribute *attr,
+					const char *buff, size_t count)
+{
+	int data_new, data_old;
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+
+	if (!vfd_ops->set_ingress_mirror || !vfd_ops->get_ingress_mirror)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_ingress_mirror(pdev, vf_id, &data_old);
+	if (ret < 0)
+		return ret;
+
+	ret = __parse_egress_ingress_input(pdev, buff, "ingress_mirror",
+					   &data_new, &data_old);
+	if (ret)
+		return ret;
+	if(data_new == vf_id) {
+		dev_err(&pdev->dev, "VF %d: Setting ingress_mirror to itself is not allowed\n", vf_id);
+		return -EINVAL;
+	}
+
+	if (data_new != data_old)
+		ret = vfd_ops->set_ingress_mirror(pdev, vf_id, data_new);
+
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_mac_anti_spoof_show - handler for mac_anti_spoof show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_mac_anti_spoof_show(struct kobject *kobj,
+				       struct kobj_attribute *attr,
+				       char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	bool data;
+
+	if (!vfd_ops->get_mac_anti_spoof)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_mac_anti_spoof(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	if (data)
+		ret = scnprintf(buff, PAGE_SIZE, "on\n");
+	else
+		ret = scnprintf(buff, PAGE_SIZE, "off\n");
+
+	return ret;
+}
+
+/**
+ * vfd_mac_anti_spoof_store - handler for mac_anti_spoof store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ *
+ * On success return count, indicating that we used the whole buffer. On
+ * failure return a negative error condition.
+ **/
+static ssize_t vfd_mac_anti_spoof_store(struct kobject *kobj,
+					struct kobj_attribute *attr,
+					const char *buff, size_t count)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	bool data_new, data_old;
+
+	if (!vfd_ops->set_mac_anti_spoof || !vfd_ops->get_mac_anti_spoof)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_mac_anti_spoof(pdev, vf_id, &data_old);
+	if (ret < 0)
+		return ret;
+
+	ret = __parse_bool_data(pdev, buff, "mac_anti_spoof", &data_new);
+	if (ret)
+		return ret;
+
+	if (data_new != data_old)
+		ret = vfd_ops->set_mac_anti_spoof(pdev, vf_id, data_new);
+
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_vlan_anti_spoof_show - handler for vlan_anti_spoof show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_vlan_anti_spoof_show(struct kobject *kobj,
+					struct kobj_attribute *attr,
+					char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	bool data;
+
+	if (!vfd_ops->get_vlan_anti_spoof)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_vlan_anti_spoof(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	if (data)
+		ret = scnprintf(buff, PAGE_SIZE, "on\n");
+	else
+		ret = scnprintf(buff, PAGE_SIZE, "off\n");
+
+	return ret;
+}
+
+/**
+ * vfd_vlan_anti_spoof_store - handler for vlan_anti_spoof store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ *
+ * On success return count, indicating that we used the whole buffer. On
+ * failure return a negative error condition.
+ **/
+static ssize_t vfd_vlan_anti_spoof_store(struct kobject *kobj,
+					 struct kobj_attribute *attr,
+					 const char *buff, size_t count)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	bool data_new, data_old;
+
+	if (!vfd_ops->set_vlan_anti_spoof || !vfd_ops->get_vlan_anti_spoof)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_vlan_anti_spoof(pdev, vf_id, &data_old);
+	if (ret < 0)
+		return ret;
+
+	ret = __parse_bool_data(pdev, buff, "vlan_anti_spoof", &data_new);
+	if (ret)
+		return ret;
+
+	if (data_new != data_old)
+		ret = vfd_ops->set_vlan_anti_spoof(pdev, vf_id, data_new);
+
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_allow_untagged_show - handler for allow_untagged show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_allow_untagged_show(struct kobject *kobj,
+				       struct kobj_attribute *attr,
+				       char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	bool data;
+
+	if (!vfd_ops->get_allow_untagged)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_allow_untagged(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	if (data)
+		ret = scnprintf(buff, PAGE_SIZE, "on\n");
+	else
+		ret = scnprintf(buff, PAGE_SIZE, "off\n");
+
+	return ret;
+}
+
+/**
+ * vfd_allow_untagged_store - handler for allow_untagged store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t vfd_allow_untagged_store(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  const char *buff, size_t count)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	bool data_new, data_old;
+
+	if (!vfd_ops->set_allow_untagged || !vfd_ops->get_allow_untagged)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_allow_untagged(pdev, vf_id, &data_old);
+	if (ret < 0)
+		return ret;
+
+	ret = __parse_bool_data(pdev, buff, "allow_untagged", &data_new);
+	if (ret)
+		return ret;
+
+	if (data_new != data_old)
+		ret = vfd_ops->set_allow_untagged(pdev, vf_id, data_new);
+
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_loopback_show - handler for loopback show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_loopback_show(struct kobject *kobj,
+				 struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	bool data;
+
+	if (!vfd_ops->get_loopback)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_loopback(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	if (data)
+		ret = scnprintf(buff, PAGE_SIZE, "on\n");
+	else
+		ret = scnprintf(buff, PAGE_SIZE, "off\n");
+
+	return ret;
+}
+
+/**
+ * vfd_loopback_store - handler for loopback store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t vfd_loopback_store(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  const char *buff, size_t count)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	bool data_new, data_old;
+
+	if (!vfd_ops->set_loopback || !vfd_ops->get_loopback)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_loopback(pdev, vf_id, &data_old);
+	if (ret < 0)
+		return ret;
+
+	ret = __parse_bool_data(pdev, buff, "loopback", &data_new);
+	if (ret)
+		return ret;
+
+	if (data_new != data_old)
+		ret = vfd_ops->set_loopback(pdev, vf_id, data_new);
+
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_mac_show - handler for mac show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_mac_show(struct kobject *kobj, struct kobj_attribute *attr,
+			    char *buff)
+{
+	u8 macaddr[ETH_ALEN];
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+
+	if (!vfd_ops->get_mac)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_mac(pdev, vf_id, macaddr);
+	if (ret < 0)
+		return ret;
+
+	ret = scnprintf(buff, PAGE_SIZE, "%pM\n", macaddr);
+
+	return ret;
+}
+
+/**
+ * vfd_mac_store - handler for mac store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t vfd_mac_store(struct kobject *kobj,
+			     struct kobj_attribute *attr,
+			     const char *buff, size_t count)
+{
+	u8 macaddr[ETH_ALEN];
+	u8 macaddr_old[ETH_ALEN];
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+
+	if (!vfd_ops->set_mac || !vfd_ops->get_mac)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_mac(pdev, vf_id, macaddr_old);
+	if (ret < 0)
+		return ret;
+
+	ret = sscanf(buff, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+		     &macaddr[0], &macaddr[1], &macaddr[2],
+		     &macaddr[3], &macaddr[4], &macaddr[5]);
+
+	if (ret != 6)
+		return -EINVAL;
+
+	if (!ether_addr_equal(macaddr, macaddr_old))
+		ret = vfd_ops->set_mac(pdev, vf_id, macaddr);
+
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_mac_list_show - handler for mac_list show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ *
+ * This function also frees the memory allocated for mac_list in another function.
+ *
+ **/
+static ssize_t vfd_mac_list_show(struct kobject *kobj,
+				 struct kobj_attribute *attr, char *buff)
+{
+	unsigned int mac_num_allowed, mac_num_list, mac_num_count;
+	const char *overflow_msg = "... and more\n";
+	unsigned int mac_msg_len = 3*ETH_ALEN;
+	struct list_head *pos, *n;
+	struct pci_dev *pdev;
+	int vf_id, ret;
+	char *written;
+	LIST_HEAD(mac_list);
+
+	if (!vfd_ops->get_mac_list)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_mac_list(pdev, vf_id, &mac_list);
+	if (ret < 0)
+		goto err_free;
+
+	mac_num_list = 0;
+	mac_num_count = 0;
+	list_for_each_safe(pos, n, &mac_list)
+		mac_num_list++;
+
+	mac_num_allowed = (PAGE_SIZE - 1) / mac_msg_len;
+	if (mac_num_list > mac_num_allowed)
+		mac_num_allowed = (PAGE_SIZE - 1 - strlen(overflow_msg)) /
+				   mac_msg_len;
+
+	written = buff;
+	list_for_each_safe(pos, n, &mac_list) {
+		struct vfd_macaddr *mac = NULL;
+
+		mac_num_count++;
+		mac = list_entry(pos, struct vfd_macaddr, list);
+		if (mac_num_count > mac_num_allowed) {
+			ret += scnprintf(written, PAGE_SIZE - ret,
+					 "%s", overflow_msg);
+			goto err_free;
+		} else if (list_is_last(pos, &mac_list)) {
+			ret += scnprintf(written, PAGE_SIZE - ret,
+					 "%pM\n", mac->mac);
+		} else {
+			ret += scnprintf(written, PAGE_SIZE - ret,
+					 "%pM,", mac->mac);
+		}
+		written += mac_msg_len;
+	}
+
+err_free:
+	list_for_each_safe(pos, n, &mac_list) {
+		struct vfd_macaddr *mac = NULL;
+
+		mac = list_entry(pos, struct vfd_macaddr, list);
+		list_del(pos);
+		kfree(mac);
+	}
+	return ret;
+}
+
+/**
+ * vfd_mac_list_store - handler for mac_list store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ *
+ * Get input mac list into the linked list and depending on "add" or "rem" command
+ * pass the input mac list to the driver to either add or remove macs to the list.
+ *
+ * This function also frees the memory allocated for mac_list in another function.
+ *
+ **/
+static ssize_t vfd_mac_list_store(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  const char *buff, size_t count)
+{
+	struct list_head *pos, *n;
+	struct pci_dev *pdev;
+	u8 macaddr[ETH_ALEN];
+	int vf_id, ret;
+	size_t shift;
+	bool add;
+	LIST_HEAD(mac_list_inp);
+
+	if (!vfd_ops->add_macs_to_list || !vfd_ops->rem_macs_from_list)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	if (strstr(buff, "add")) {
+		shift = sizeof("add");
+		add = true;
+	} else if (strstr(buff, "rem")) {
+		shift = sizeof("rem");
+		add = false;
+	} else {
+		dev_err(&pdev->dev, "Invalid input string");
+		ret = -EINVAL;
+		goto err_free;
+	}
+
+	/* Get input data */
+	for (;;) {
+		struct vfd_macaddr *mac_new;
+
+		if (*(buff + shift) == ' ' || *(buff + shift) == ',') {
+			shift++;
+			continue;
+		}
+
+		ret = sscanf(buff + shift,
+			     "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+			     &macaddr[0], &macaddr[1], &macaddr[2],
+			     &macaddr[3], &macaddr[4], &macaddr[5]);
+
+		if (ret != 6)
+			break;
+
+		if (!is_valid_ether_addr(macaddr)) {
+			shift += 3*ETH_ALEN;
+			continue;
+		}
+
+		mac_new = kmalloc(sizeof(struct vfd_macaddr), GFP_KERNEL);
+		if (!mac_new) {
+			ret = -ENOMEM;
+			goto err_free;
+		}
+
+		ether_addr_copy(mac_new->mac, macaddr);
+		list_add(&mac_new->list, &mac_list_inp);
+
+		shift += 3*ETH_ALEN;
+	}
+
+	if (add)
+		ret = vfd_ops->add_macs_to_list(pdev, vf_id, &mac_list_inp);
+	else
+		ret = vfd_ops->rem_macs_from_list(pdev, vf_id, &mac_list_inp);
+
+err_free:
+        list_for_each_safe(pos, n, &mac_list_inp) {
+                struct vfd_macaddr *mac = NULL;
+
+                mac = list_entry(pos, struct vfd_macaddr, list);
+                list_del(pos);
+                kfree(mac);
+        }
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_promisc_show - handler for promisc show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_promisc_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	u8 data;
+
+	if (!vfd_ops->get_promisc)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_promisc(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	if (data == VFD_PROMISC_UNICAST)
+		ret = scnprintf(buff, PAGE_SIZE, "ucast\n");
+	else if (data == VFD_PROMISC_MULTICAST)
+		ret = scnprintf(buff, PAGE_SIZE, "mcast\n");
+	else if (data == (VFD_PROMISC_UNICAST | VFD_PROMISC_MULTICAST))
+		ret = scnprintf(buff, PAGE_SIZE, "ucast, mcast\n");
+	else if (data == VFD_PROMISC_OFF)
+		ret = scnprintf(buff, PAGE_SIZE, "off\n");
+
+	return ret;
+}
+
+/**
+ * vfd_promisc_store - handler for promisc store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t vfd_promisc_store(struct kobject *kobj,
+				 struct kobj_attribute *attr,
+				 const char *buff, size_t count)
+{
+	u8 data_new, data_old;
+	struct pci_dev *pdev;
+	const char *subcmd;
+	const char *cmd;
+	int vf_id, ret;
+
+	if (!vfd_ops->get_promisc || !vfd_ops->set_promisc)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_promisc(pdev, vf_id, &data_old);
+	if (ret < 0)
+		return ret;
+
+	ret = __parse_promisc_input(buff, count, &cmd, &subcmd);
+	if (ret)
+		goto promisc_err;
+
+	if (strncmp(cmd, "add", strlen("add")) == 0) {
+		if (strncmp(subcmd, "ucast", strlen("ucast")) == 0)
+			data_new = data_old | VFD_PROMISC_UNICAST;
+		else if (strncmp(subcmd, "mcast", strlen("mcast")) == 0)
+			data_new = data_old | VFD_PROMISC_MULTICAST;
+		else
+			goto promisc_err;
+	} else if (strncmp(cmd, "rem", strlen("rem")) == 0) {
+		if (strncmp(subcmd, "ucast", strlen("ucast")) == 0)
+			data_new = data_old & ~VFD_PROMISC_UNICAST;
+		else if (strncmp(subcmd, "mcast", strlen("mcast")) == 0)
+			data_new = data_old & ~VFD_PROMISC_MULTICAST;
+		else
+			goto promisc_err;
+	} else {
+		goto promisc_err;
+	}
+
+	if (data_new != data_old)
+		ret = vfd_ops->set_promisc(pdev, vf_id, data_new);
+
+	return ret ? ret : count;
+
+promisc_err:
+	dev_err(&pdev->dev, "Invalid input string");
+	return -EINVAL;
+}
+
+/**
+ * vfd_vlan_strip_show - handler for vlan_strip show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_vlan_strip_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	bool data;
+
+	if (!vfd_ops->get_vlan_strip)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_vlan_strip(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	if (data)
+		ret = scnprintf(buff, PAGE_SIZE, "on\n");
+	else
+		ret = scnprintf(buff, PAGE_SIZE, "off\n");
+
+	return ret;
+}
+
+/**
+ * vfd_vlan_strip_store - handler for vlan_strip store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t vfd_vlan_strip_store(struct kobject *kobj,
+				    struct kobj_attribute *attr,
+				    const char *buff, size_t count)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	bool data_new, data_old;
+
+	if (!vfd_ops->set_vlan_strip || !vfd_ops->get_vlan_strip)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_vlan_strip(pdev, vf_id, &data_old);
+	if (ret < 0)
+		return ret;
+
+	ret = __parse_bool_data(pdev, buff, "vlan_strip", &data_new);
+	if (ret)
+		return ret;
+
+	if (data_new != data_old)
+		ret = vfd_ops->set_vlan_strip(pdev, vf_id, data_new);
+
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_link_state_show - handler for link_state show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_link_state_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buff)
+{
+	enum vfd_link_speed link_speed;
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	bool enabled;
+
+	if (!vfd_ops->get_link_state)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_link_state(pdev, vf_id, &enabled, &link_speed);
+	if (ret < 0)
+		return ret;
+
+	if (enabled) {
+		const char *speed_str;
+
+		switch (link_speed) {
+		case VFD_LINK_SPEED_100MB:
+			speed_str = "100 Mbps";
+			break;
+		case VFD_LINK_SPEED_1GB:
+			speed_str = "1 Gbps";
+			break;
+		case VFD_LINK_SPEED_2_5GB:
+			speed_str = "2.5 Gbps";
+			break;
+		case VFD_LINK_SPEED_5GB:
+			speed_str = "5 Gbps";
+			break;
+		case VFD_LINK_SPEED_10GB:
+			speed_str = "10 Gbps";
+			break;
+		case VFD_LINK_SPEED_40GB:
+			speed_str = "40 Gbps";
+			break;
+		case VFD_LINK_SPEED_20GB:
+			speed_str = "20 Gbps";
+			break;
+		case VFD_LINK_SPEED_25GB:
+			speed_str = "25 Gbps";
+			break;
+		case VFD_LINK_SPEED_UNKNOWN:
+			speed_str = "unknown speed";
+			break;
+		default:
+			dev_err(&pdev->dev, "Link speed is not supported");
+			return -EOPNOTSUPP;
+		}
+
+		ret = scnprintf(buff, PAGE_SIZE, "%s, %s\n", "up", speed_str);
+	} else {
+		ret = scnprintf(buff, PAGE_SIZE, "down\n");
+	}
+
+	return ret;
+}
+
+/**
+ * vfd_link_state_store - handler for link_state store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t vfd_link_state_store(struct kobject *kobj,
+				      struct kobj_attribute *attr,
+				      const char *buff, size_t count)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	u8 data;
+
+	if (!vfd_ops->set_link_state)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	if (sysfs_streq("enable", buff)) {
+		data = VFD_LINKSTATE_ON;
+	} else if (sysfs_streq("disable", buff)) {
+		data = VFD_LINKSTATE_OFF;
+	} else if (sysfs_streq("auto", buff)) {
+		data = VFD_LINKSTATE_AUTO;
+	} else {
+		dev_err(&pdev->dev, "Invalid input string");
+		return -EINVAL;
+	}
+
+	ret = vfd_ops->set_link_state(pdev, vf_id, data);
+
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_enable_show - handler for VF enable/disable show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_enable_show(struct kobject *kobj,
+			       struct kobj_attribute *attr,
+			       char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret;
+	bool data;
+
+	if (!vfd_ops->get_vf_enable)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_vf_enable(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	if (data)
+		ret = scnprintf(buff, PAGE_SIZE, "on\n");
+	else
+		ret = scnprintf(buff, PAGE_SIZE, "off\n");
+
+	return ret;
+}
+
+/**
+ * vfd_enable_store - handler for VF enable/disable store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ *
+ * On success return count, indicating that we used the whole buffer. On
+ * failure return a negative error condition.
+ **/
+static ssize_t vfd_enable_store(struct kobject *kobj,
+				struct kobj_attribute *attr,
+				const char *buff, size_t count)
+{
+	bool data_new, data_old;
+	struct pci_dev *pdev;
+	int vf_id, ret;
+
+	if (!vfd_ops->set_vf_enable || !vfd_ops->get_vf_enable)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_vf_enable(pdev, vf_id, &data_old);
+	if (ret < 0)
+		return ret;
+
+	ret = __parse_bool_data(pdev, buff, "enable", &data_new);
+	if (ret)
+		return ret;
+
+	if (data_new != data_old)
+		ret = vfd_ops->set_vf_enable(pdev, vf_id, data_new);
+
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_max_tx_rate_show - handler for mac_tx_rate show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_max_tx_rate_show(struct kobject *kobj,
+				    struct kobj_attribute *attr, char *buff)
+{
+	unsigned int max_tx_rate;
+	struct pci_dev *pdev;
+	int vf_id, ret;
+
+	if (!vfd_ops->get_max_tx_rate)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_max_tx_rate(pdev, vf_id, &max_tx_rate);
+	if (ret < 0)
+		return ret;
+
+	ret = scnprintf(buff, PAGE_SIZE, "%u\n", max_tx_rate);
+	return ret;
+}
+
+/**
+ * vfd_max_tx_rate_store - handler for max_tx_rate store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t vfd_max_tx_rate_store(struct kobject *kobj,
+				     struct kobj_attribute *attr,
+				     const char *buff, size_t count)
+{
+	unsigned int max_tx_rate;
+	struct pci_dev *pdev;
+	int vf_id, ret;
+
+	if (!vfd_ops->set_max_tx_rate)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = kstrtouint(buff, 10, &max_tx_rate);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Invalid argument, not a decimal number: %s", buff);
+		return ret;
+	}
+
+	ret = vfd_ops->set_max_tx_rate(pdev, vf_id, &max_tx_rate);
+
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_min_tx_rate_show - handler for min_tx_rate show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_min_tx_rate_show(struct kobject *kobj,
+				    struct kobj_attribute *attr, char *buff)
+{
+	if (!vfd_ops->get_min_tx_rate)
+		return -EOPNOTSUPP;
+
+	return vfd_ops->get_min_tx_rate(kobj, attr, buff);
+}
+
+/**
+ * vfd_min_tx_rate_store - handler for min_tx_rate store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t vfd_min_tx_rate_store(struct kobject *kobj,
+				     struct kobj_attribute *attr,
+				     const char *buff, size_t count)
+{
+	if (!vfd_ops->set_min_tx_rate)
+		return -EOPNOTSUPP;
+
+	return vfd_ops->set_min_tx_rate(kobj, attr, buff, count);
+}
+
+/**
+ * vfd_spoofcheck_show - handler for spoofcheck show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_spoofcheck_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buff)
+{
+	if (!vfd_ops->get_spoofcheck)
+		return -EOPNOTSUPP;
+
+	return vfd_ops->get_spoofcheck(kobj, attr, buff);
+}
+
+/**
+ * vfd_spoofcheck_store - handler for spoofcheck store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t vfd_spoofcheck_store(struct kobject *kobj,
+				    struct kobj_attribute *attr,
+				    const char *buff, size_t count)
+{
+	if (!vfd_ops->set_spoofcheck)
+		return -EOPNOTSUPP;
+
+	return vfd_ops->set_spoofcheck(kobj, attr, buff, count);
+}
+
+/**
+ * vfd_trust_show - handler for trust show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_trust_show(struct kobject *kobj,
+			      struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret;
+	bool data;
+
+	if (!vfd_ops->get_trust_state)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_trust_state(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	if (data)
+		ret = scnprintf(buff, PAGE_SIZE, "on\n");
+	else
+		ret = scnprintf(buff, PAGE_SIZE, "off\n");
+
+	return ret;
+}
+
+/**
+ * vfd_trust_store - handler for trust store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t vfd_trust_store(struct kobject *kobj,
+			       struct kobj_attribute *attr,
+			       const char *buff, size_t count)
+{
+	bool data_new, data_old;
+	struct pci_dev *pdev;
+	int vf_id, ret;
+
+	if (!vfd_ops->set_trust_state || !vfd_ops->get_trust_state)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_trust_state(pdev, vf_id, &data_old);
+	if (ret < 0)
+		return ret;
+
+	ret = __parse_bool_data(pdev, buff, "trust", &data_new);
+	if (ret)
+		return ret;
+
+	if (data_new != data_old)
+		ret = vfd_ops->set_trust_state(pdev, vf_id, data_new);
+
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_reset_stats_store - handler for reset stats store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t vfd_reset_stats_store(struct kobject *kobj,
+				     struct kobj_attribute *attr,
+				     const char *buff, size_t count)
+{
+	int vf_id, reset, ret;
+	struct pci_dev *pdev;
+
+	if (!vfd_ops->reset_stats)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+	ret = kstrtoint(buff, 10, &reset);
+	if (ret) {
+		dev_err(&pdev->dev, "Invalid input\n");
+		return ret;
+	}
+
+	if (reset != 1)
+		return -EINVAL;
+
+	ret = vfd_ops->reset_stats(pdev, vf_id);
+
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_rx_bytes_show - handler for rx_bytes show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_rx_bytes_show(struct kobject *kobj,
+				 struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	u64 data;
+
+	if (!vfd_ops->get_rx_bytes)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_rx_bytes(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data);
+
+	return ret;
+}
+
+/**
+ * vfd_rx_dropped_show - handler for rx_dropped show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_rx_dropped_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	u64 data;
+
+	if (!vfd_ops->get_rx_dropped)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_rx_dropped(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data);
+
+	return ret;
+}
+
+/**
+ * vfd_rx_packets_show - handler for rx_packets show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_rx_packets_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	u64 data;
+
+	if (!vfd_ops->get_rx_packets)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_rx_packets(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data);
+
+	return ret;
+}
+
+/**
+ * vfd_tx_bytes_show - handler for tx_bytes show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_tx_bytes_show(struct kobject *kobj,
+				 struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	u64 data;
+
+	if (!vfd_ops->get_tx_bytes)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_tx_bytes(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data);
+
+	return ret;
+}
+
+/**
+ * vfd_tx_dropped_show - handler for tx_dropped show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_tx_dropped_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	u64 data;
+
+	if (!vfd_ops->get_tx_dropped)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_tx_dropped(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data);
+
+	return ret;
+}
+
+/**
+ * vfd_tx_packets_show - handler for tx_packets show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_tx_packets_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	u64 data;
+
+	if (!vfd_ops->get_tx_packets)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_tx_packets(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data);
+
+	return ret;
+}
+
+/**
+ * vfd_tx_spoofed_show - handler for tx_spoofed show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_tx_spoofed_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	u64 data;
+
+	if (!vfd_ops->get_tx_spoofed)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_tx_spoofed(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data);
+
+	return ret;
+}
+
+/**
+ * vfd_tx_errors_show - handler for tx_errors show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_tx_errors_show(struct kobject *kobj,
+				  struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret = 0;
+	u64 data;
+
+	if (!vfd_ops->get_tx_errors)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_tx_errors(pdev, vf_id, &data);
+	if (ret < 0)
+		return ret;
+
+	ret = scnprintf(buff, PAGE_SIZE, "%llu\n", data);
+
+	return ret;
+}
+
+/**
+ * qos_share_show - handler for the bw_share show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t qos_share_show(struct kobject *kobj,
+			  struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret;
+	u8 data = 0;
+
+	if (!vfd_ops->get_vf_bw_share)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_vf_bw_share(pdev, vf_id, &data);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "No bw share applied for VF %d\n", vf_id);
+		return ret;
+	}
+
+	ret = scnprintf(buff, PAGE_SIZE, "%u\n", data);
+
+	return ret;
+}
+
+/**
+ * qos_share_store - handler for the bw_share store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t qos_share_store(struct kobject *kobj,
+			       struct kobj_attribute *attr,
+			       const char *buff, size_t count)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret;
+	u8 bw_share;
+
+	if (!vfd_ops->set_vf_bw_share)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	/* parse the bw_share */
+	ret = kstrtou8(buff, 10, &bw_share);
+	if (ret) {
+		dev_err(&pdev->dev, "Invalid input\n");
+		return ret;
+	}
+
+	/* check that the BW is between 1 and 100 */
+	if (bw_share < 1 || bw_share > 100) {
+		dev_err(&pdev->dev, "BW share has to be between 1-100\n");
+		return -EINVAL;
+	}
+	ret = vfd_ops->set_vf_bw_share(pdev, vf_id, bw_share);
+	return ret ? ret : count;
+}
+
+/**
+ * pf_qos_apply_store - handler for pf qos apply store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t pf_qos_apply_store(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  const char *buff, size_t count)
+{
+	int ret, apply;
+	struct pci_dev *pdev;
+
+	if (!vfd_ops->set_pf_qos_apply)
+		return -EOPNOTSUPP;
+
+	ret = __get_pf_pdev(kobj, &pdev);
+	if (ret)
+		return ret;
+
+	ret = kstrtoint(buff, 10, &apply);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Invalid input\n");
+		return ret;
+	}
+
+	if (apply != 1)
+		return -EINVAL;
+
+	ret = vfd_ops->set_pf_qos_apply(pdev);
+
+	return ret ? ret : count;
+}
+
+/**
+ * pf_ingress_mirror_show - handler for PF ingress mirror show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t pf_ingress_mirror_show(struct kobject *kobj,
+				      struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int ret, data;
+
+	if (!vfd_ops->get_pf_ingress_mirror)
+		return -EOPNOTSUPP;
+
+	ret = __get_pf_pdev(kobj, &pdev);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_pf_ingress_mirror(pdev, &data);
+	if (ret < 0)
+		return ret;
+
+	if (data == VFD_INGRESS_MIRROR_OFF)
+		ret = scnprintf(buff, PAGE_SIZE, "off\n");
+	else
+		ret = scnprintf(buff, PAGE_SIZE, "%u\n", data);
+
+	return ret;
+}
+
+/**
+ * pf_ingress_mirror_store - handler for pf ingress mirror store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t pf_ingress_mirror_store(struct kobject *kobj,
+				      struct kobj_attribute *attr,
+				      const char *buff, size_t count)
+{
+	int data_new, data_old;
+	struct pci_dev *pdev;
+	int ret;
+
+	if (!vfd_ops->set_pf_ingress_mirror || !vfd_ops->get_pf_ingress_mirror)
+		return -EOPNOTSUPP;
+
+	ret = __get_pf_pdev(kobj, &pdev);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_pf_ingress_mirror(pdev, &data_old);
+	if (ret < 0)
+		return ret;
+
+	ret = __parse_egress_ingress_input(pdev, buff, "ingress_mirror",
+					   &data_new, &data_old);
+	if (ret)
+		return ret;
+
+	if (data_new != data_old)
+		ret = vfd_ops->set_pf_ingress_mirror(pdev, data_new);
+
+	return ret ? ret : count;
+}
+
+/**
+ * pf_egress_mirror_show - handler for PF egress mirror show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t pf_egress_mirror_show(struct kobject *kobj,
+				     struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int ret, data;
+
+	if (!vfd_ops->get_pf_egress_mirror)
+		return -EOPNOTSUPP;
+
+	ret = __get_pf_pdev(kobj, &pdev);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_pf_egress_mirror(pdev, &data);
+	if (ret < 0)
+		return ret;
+
+	if (data == VFD_EGRESS_MIRROR_OFF)
+		ret = scnprintf(buff, PAGE_SIZE, "off\n");
+	else
+		ret = scnprintf(buff, PAGE_SIZE, "%u\n", data);
+
+	return ret;
+}
+
+/**
+ * pf_egress_mirror_store - handler for pf egress mirror store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t pf_egress_mirror_store(struct kobject *kobj,
+				      struct kobj_attribute *attr,
+				      const char *buff, size_t count)
+{
+	int data_new, data_old;
+	struct pci_dev *pdev;
+	int ret;
+
+	if (!vfd_ops->set_pf_egress_mirror || !vfd_ops->get_pf_egress_mirror)
+		return -EOPNOTSUPP;
+
+	ret = __get_pf_pdev(kobj, &pdev);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_pf_egress_mirror(pdev, &data_old);
+	if (ret < 0)
+		return ret;
+
+	ret = __parse_egress_ingress_input(pdev, buff, "egress_mirror",
+					   &data_new, &data_old);
+	if (ret)
+		return ret;
+
+	if (data_new != data_old)
+		ret = vfd_ops->set_pf_egress_mirror(pdev, data_new);
+
+	return ret ? ret : count;
+}
+
+/**
+ * pf_tpid_show - handler for pf tpid show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t pf_tpid_show(struct kobject *kobj,
+			    struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	u16 data;
+	int ret;
+
+	if (!vfd_ops->get_pf_tpid)
+		return -EOPNOTSUPP;
+
+	ret = __get_pf_pdev(kobj, &pdev);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_pf_tpid(pdev, &data);
+	if (ret < 0)
+		return ret;
+
+	ret = scnprintf(buff, PAGE_SIZE, "%x\n", data);
+
+	return ret;
+}
+
+/**
+ * pf_tpid_store - handler for pf tpid store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t pf_tpid_store(struct kobject *kobj,
+			     struct kobj_attribute *attr,
+			     const char *buff, size_t count)
+{
+	struct pci_dev *pdev;
+	u16 data;
+	int ret;
+
+	if (!vfd_ops->set_pf_tpid)
+		return -EOPNOTSUPP;
+
+	ret = __get_pf_pdev(kobj, &pdev);
+	if (ret)
+		return ret;
+
+	ret = kstrtou16(buff, 16, &data);
+	if (ret) {
+		dev_err(&pdev->dev, "Invalid input\n");
+		return ret;
+	}
+
+	ret = vfd_ops->set_pf_tpid(pdev, data);
+
+	return ret ? ret : count;
+}
+
+/**
+ * vfd_num_queues_show - handler for num_queues show function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer for data
+ **/
+static ssize_t vfd_num_queues_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buff)
+{
+	struct pci_dev *pdev;
+	int vf_id, ret;
+	int data;
+
+	if (!vfd_ops->get_num_queues)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_num_queues(pdev, vf_id, &data);
+	if (ret)
+		return ret;
+
+	ret = scnprintf(buff, PAGE_SIZE, "%d\n", data);
+
+	return ret;
+}
+
+/**
+ * vfd_num_queues_store - handler for num_queues store function
+ * @kobj:	kobject being called
+ * @attr:	struct kobj_attribute
+ * @buff:	buffer with input data
+ * @count:	size of buff
+ **/
+static ssize_t vfd_num_queues_store(struct kobject *kobj,
+				    struct kobj_attribute *attr,
+				    const char *buff, size_t count)
+{
+	int data_new, data_old;
+	struct pci_dev *pdev;
+	int vf_id, ret;
+
+	if (!vfd_ops->set_num_queues)
+		return -EOPNOTSUPP;
+
+	ret = __get_pdev_and_vfid(kobj, &pdev, &vf_id);
+	if (ret)
+		return ret;
+
+	ret = vfd_ops->get_num_queues(pdev, vf_id, &data_old);
+	if (ret)
+		return ret;
+
+	ret = kstrtoint(buff, 10, &data_new);
+	if (ret) {
+		dev_err(&pdev->dev, "Invalid input\n");
+		return ret;
+	}
+
+	if (data_new < 1) {
+		dev_err(&pdev->dev, "VF queue count must be at least 1\n");
+		return -EINVAL;
+	}
+
+	if (data_new != data_old)
+		ret = vfd_ops->set_num_queues(pdev, vf_id, data_new);
+
+	return ret ? ret : count;
+}
+
+static struct kobj_attribute trunk_attribute =
+	__ATTR(trunk, 0644, vfd_trunk_show, vfd_trunk_store);
+static struct kobj_attribute vlan_mirror_attribute =
+	__ATTR(vlan_mirror, 0644, vfd_vlan_mirror_show, vfd_vlan_mirror_store);
+static struct kobj_attribute egress_mirror_attribute =
+	__ATTR(egress_mirror, 0644,
+	       vfd_egress_mirror_show, vfd_egress_mirror_store);
+static struct kobj_attribute ingress_mirror_attribute =
+	__ATTR(ingress_mirror, 0644,
+	       vfd_ingress_mirror_show, vfd_ingress_mirror_store);
+static struct kobj_attribute mac_anti_spoof_attribute =
+	__ATTR(mac_anti_spoof, 0644,
+	       vfd_mac_anti_spoof_show, vfd_mac_anti_spoof_store);
+static struct kobj_attribute vlan_anti_spoof_attribute =
+	__ATTR(vlan_anti_spoof, 0644,
+	       vfd_vlan_anti_spoof_show, vfd_vlan_anti_spoof_store);
+static struct kobj_attribute allow_untagged_attribute =
+	__ATTR(allow_untagged, 0644,
+	       vfd_allow_untagged_show, vfd_allow_untagged_store);
+static struct kobj_attribute loopback_attribute =
+	__ATTR(loopback, 0644, vfd_loopback_show, vfd_loopback_store);
+static struct kobj_attribute mac_attribute =
+	__ATTR(mac, 0644, vfd_mac_show, vfd_mac_store);
+static struct kobj_attribute mac_list_attribute =
+	__ATTR(mac_list, 0644, vfd_mac_list_show, vfd_mac_list_store);
+static struct kobj_attribute promisc_attribute =
+	__ATTR(promisc, 0644, vfd_promisc_show, vfd_promisc_store);
+static struct kobj_attribute vlan_strip_attribute =
+	__ATTR(vlan_strip, 0644, vfd_vlan_strip_show, vfd_vlan_strip_store);
+static struct kobj_attribute link_state_attribute =
+	__ATTR(link_state, 0644, vfd_link_state_show, vfd_link_state_store);
+static struct kobj_attribute max_tx_rate_attribute =
+	__ATTR(max_tx_rate, 0644, vfd_max_tx_rate_show, vfd_max_tx_rate_store);
+static struct kobj_attribute min_tx_rate_attribute =
+	__ATTR(min_tx_rate, 0644, vfd_min_tx_rate_show, vfd_min_tx_rate_store);
+static struct kobj_attribute spoofcheck_attribute =
+	__ATTR(spoofcheck, 0644, vfd_spoofcheck_show, vfd_spoofcheck_store);
+static struct kobj_attribute trust_attribute =
+	__ATTR(trust, 0644, vfd_trust_show, vfd_trust_store);
+static struct kobj_attribute reset_stats_attribute =
+	__ATTR(reset_stats, 0200, NULL, vfd_reset_stats_store);
+static struct kobj_attribute enable_attribute =
+	__ATTR(enable, 0644, vfd_enable_show, vfd_enable_store);
+static struct kobj_attribute num_queues_attribute =
+	__ATTR(num_queues, 0644, vfd_num_queues_show, vfd_num_queues_store);
+
+static struct attribute *s_attrs[] = {
+	&trunk_attribute.attr,
+	&vlan_mirror_attribute.attr,
+	&egress_mirror_attribute.attr,
+	&ingress_mirror_attribute.attr,
+	&mac_anti_spoof_attribute.attr,
+	&vlan_anti_spoof_attribute.attr,
+	&allow_untagged_attribute.attr,
+	&loopback_attribute.attr,
+	&mac_attribute.attr,
+	&mac_list_attribute.attr,
+	&promisc_attribute.attr,
+	&vlan_strip_attribute.attr,
+	&link_state_attribute.attr,
+	&max_tx_rate_attribute.attr,
+	&min_tx_rate_attribute.attr,
+	&spoofcheck_attribute.attr,
+	&trust_attribute.attr,
+	&reset_stats_attribute.attr,
+	&enable_attribute.attr,
+	&num_queues_attribute.attr,
+	NULL,
+};
+
+static struct attribute_group vfd_group = {
+	.attrs = s_attrs,
+};
+
+static struct kobj_attribute rx_bytes_attribute =
+	__ATTR(rx_bytes, 0444, vfd_rx_bytes_show, NULL);
+static struct kobj_attribute rx_dropped_attribute =
+	__ATTR(rx_dropped, 0444, vfd_rx_dropped_show, NULL);
+static struct kobj_attribute rx_packets_attribute =
+	__ATTR(rx_packets, 0444, vfd_rx_packets_show, NULL);
+static struct kobj_attribute tx_bytes_attribute =
+	__ATTR(tx_bytes, 0444, vfd_tx_bytes_show, NULL);
+static struct kobj_attribute tx_dropped_attribute =
+	__ATTR(tx_dropped, 0444, vfd_tx_dropped_show, NULL);
+static struct kobj_attribute tx_packets_attribute =
+	__ATTR(tx_packets, 0444, vfd_tx_packets_show, NULL);
+static struct kobj_attribute tx_spoofed_attribute =
+	__ATTR(tx_spoofed, 0444, vfd_tx_spoofed_show, NULL);
+static struct kobj_attribute tx_errors_attribute =
+	__ATTR(tx_errors, 0444, vfd_tx_errors_show, NULL);
+
+static struct attribute *stats_attrs[] = {
+	&rx_bytes_attribute.attr,
+	&rx_dropped_attribute.attr,
+	&rx_packets_attribute.attr,
+	&tx_bytes_attribute.attr,
+	&tx_dropped_attribute.attr,
+	&tx_packets_attribute.attr,
+	&tx_spoofed_attribute.attr,
+	&tx_errors_attribute.attr,
+	NULL,
+};
+
+static struct attribute_group stats_group = {
+	.name = "stats",
+	.attrs = stats_attrs,
+};
+
+static struct kobj_attribute share_attribute =
+	__ATTR(share, 0644, qos_share_show, qos_share_store);
+
+static struct attribute *qos_attrs[] = {
+	&share_attribute.attr,
+	NULL,
+};
+
+static struct attribute_group qos_group = {
+	.name = "qos",
+	.attrs = qos_attrs,
+};
+
+static struct kobj_attribute apply_attribute =
+	__ATTR(apply, 0200, NULL, pf_qos_apply_store);
+
+static struct attribute *pf_qos_attrs[] = {
+	&apply_attribute.attr,
+	NULL,
+};
+
+static struct attribute_group pf_qos_group = {
+	.name = "qos",
+	.attrs = pf_qos_attrs,
+};
+
+static struct kobj_attribute pf_ingress_mirror_attribute =
+	__ATTR(ingress_mirror, 0644, pf_ingress_mirror_show, pf_ingress_mirror_store);
+static struct kobj_attribute pf_egress_mirror_attribute =
+	__ATTR(egress_mirror, 0644, pf_egress_mirror_show, pf_egress_mirror_store);
+static struct kobj_attribute pf_tpid_attribute =
+	__ATTR(tpid, 0644, pf_tpid_show, pf_tpid_store);
+
+static struct attribute *pf_attrs[] = {
+	&pf_ingress_mirror_attribute.attr,
+	&pf_egress_mirror_attribute.attr,
+	&pf_tpid_attribute.attr,
+	NULL,
+};
+
+static struct attribute_group pf_attr_group = {
+	.attrs = pf_attrs,
+};
+
+/**
+ * create_vfs_sysfs - create sysfs hierarchy for VF
+ * @pdev: 	PCI device information struct
+ * @vfd_obj: 	VF-d kobjects information struct
+ *
+ * Creates a kobject for Virtual Function and assigns attributes to it.
+ **/
+static int create_vfs_sysfs(struct pci_dev *pdev, struct vfd_objects *vfd_obj)
+{
+	struct kobject *vf_kobj;
+	char kname[4];
+	int ret, i;
+
+	for (i = 0; i < vfd_obj->num_vfs; i++) {
+		int length = snprintf(kname, sizeof(kname), "%d", i);
+
+		if (length >= sizeof(kname)) {
+			dev_err(&pdev->dev,
+				"cannot request %d vfs, try again with smaller number of vfs\n",
+				i);
+			--i;
+			ret = -EINVAL;
+			goto err_vfs_sysfs;
+		}
+
+		vf_kobj = kobject_create_and_add(kname, vfd_obj->sriov_kobj);
+		if (!vf_kobj) {
+			dev_err(&pdev->dev,
+				"failed to create VF kobj: %s\n", kname);
+			i--;
+			ret = -ENOMEM;
+			goto err_vfs_sysfs;
+		}
+		dev_info(&pdev->dev, "created VF %s sysfs", vf_kobj->name);
+		vfd_obj->vf_kobj[i] = vf_kobj;
+
+		/* create VF sys attr */
+		ret = sysfs_create_group(vfd_obj->vf_kobj[i], &vfd_group);
+		if (ret) {
+			dev_err(&pdev->dev, "failed to create VF sys attribute: %d", i);
+			goto err_vfs_sysfs;
+		}
+
+		/* create VF stats sys attr */
+		ret = sysfs_create_group(vfd_obj->vf_kobj[i], &stats_group);
+		if (ret) {
+			dev_err(&pdev->dev, "failed to create VF stats attribute: %d", i);
+			goto err_vfs_sysfs;
+		}
+
+		/* create VF qos sys attr */
+		ret = sysfs_create_group(vfd_obj->vf_kobj[i], &qos_group);
+		if (ret) {
+			dev_err(&pdev->dev, "failed to create VF qos attribute: %d", i);
+			goto err_vfs_sysfs;
+		}
+	}
+
+	return 0;
+
+err_vfs_sysfs:
+	for (; i >= 0; i--)
+		kobject_put(vfd_obj->vf_kobj[i]);
+	return ret;
+}
+
+/**
+ * create_vfd_sysfs - create sysfs hierarchy used by VF-d
+ * @pdev: 		PCI device information struct
+ * @num_alloc_vfs: 	number of VFs to allocate
+ *
+ * If the kobjects were not able to be created, NULL will be returned.
+ **/
+struct vfd_objects *create_vfd_sysfs(struct pci_dev *pdev, int num_alloc_vfs)
+{
+	struct vfd_objects *vfd_obj;
+	int ret;
+
+	vfd_obj = kzalloc(sizeof(*vfd_obj) +
+			  sizeof(struct kobject *)*num_alloc_vfs, GFP_KERNEL);
+	if (!vfd_obj)
+		return NULL;
+
+	vfd_obj->num_vfs = num_alloc_vfs;
+
+	vfd_obj->sriov_kobj = kobject_create_and_add("sriov", &pdev->dev.kobj);
+	if (!vfd_obj->sriov_kobj)
+		goto err_sysfs;
+
+	dev_info(&pdev->dev, "created %s sysfs", vfd_obj->sriov_kobj->name);
+
+	ret = create_vfs_sysfs(pdev, vfd_obj);
+	if (ret)
+		goto err_sysfs;
+
+	/* create PF qos sys attr */
+	ret = sysfs_create_group(vfd_obj->sriov_kobj, &pf_qos_group);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to create PF qos sys attribute");
+		goto err_sysfs;
+	}
+
+	/* create PF attrs */
+	ret = sysfs_create_group(vfd_obj->sriov_kobj, &pf_attr_group);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to create PF attr sys attribute");
+		goto err_sysfs;
+	}
+	return vfd_obj;
+
+err_sysfs:
+	kobject_put(vfd_obj->sriov_kobj);
+	kfree(vfd_obj);
+	return NULL;
+}
+
+/**
+ * destroy_vfd_sysfs - destroy sysfs hierarchy used by VF-d
+ * @pdev:	PCI device information struct
+ * @vfd_obj: 	VF-d kobjects information struct
+ **/
+void destroy_vfd_sysfs(struct pci_dev *pdev, struct vfd_objects *vfd_obj)
+{
+	int i;
+
+	for (i = 0; i < vfd_obj->num_vfs; i++) {
+		dev_info(&pdev->dev, "deleting VF %s sysfs",
+			 vfd_obj->vf_kobj[i]->name);
+		kobject_put(vfd_obj->vf_kobj[i]);
+	}
+
+	dev_info(&pdev->dev, "deleting %s sysfs", vfd_obj->sriov_kobj->name);
+	kobject_put(vfd_obj->sriov_kobj);
+	kfree(vfd_obj);
+}
diff --git a/drivers/net/ethernet/intel/i40e/kcompat_vfd.h b/drivers/net/ethernet/intel/i40e/kcompat_vfd.h
new file mode 100644
index 000000000..894cd2607
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/kcompat_vfd.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
+
+#ifndef _KCOMPAT_VFD_H_
+#define _KCOMPAT_VFD_H_
+
+#define VFD_PROMISC_OFF		0x00
+#define VFD_PROMISC_UNICAST	0x01
+#define VFD_PROMISC_MULTICAST	0x02
+
+#define VFD_LINKSTATE_OFF	0x00
+#define VFD_LINKSTATE_ON	0x01
+#define VFD_LINKSTATE_AUTO	0x02
+
+#define VFD_EGRESS_MIRROR_OFF	-1
+#define VFD_INGRESS_MIRROR_OFF	-1
+
+/**
+ * struct vfd_objects - VF-d kobjects information struct
+ * @num_vfs:	number of VFs allocated
+ * @sriov_kobj:	pointer to the top sriov kobject
+ * @vf_kobj:	array of pointer to each VF's kobjects
+ */
+struct vfd_objects {
+	int num_vfs;
+	struct kobject *sriov_kobj;
+	struct kobject *vf_kobj[0];
+};
+
+struct vfd_macaddr {
+	u8 mac[ETH_ALEN];
+	struct list_head list;
+};
+
+#define VFD_LINK_SPEED_2_5GB_SHIFT		0x0
+#define VFD_LINK_SPEED_100MB_SHIFT		0x1
+#define VFD_LINK_SPEED_1GB_SHIFT		0x2
+#define VFD_LINK_SPEED_10GB_SHIFT		0x3
+#define VFD_LINK_SPEED_40GB_SHIFT		0x4
+#define VFD_LINK_SPEED_20GB_SHIFT		0x5
+#define VFD_LINK_SPEED_25GB_SHIFT		0x6
+#define VFD_LINK_SPEED_5GB_SHIFT		0x7
+
+
+enum vfd_link_speed {
+	VFD_LINK_SPEED_UNKNOWN	= 0,
+	VFD_LINK_SPEED_100MB	= BIT(VFD_LINK_SPEED_100MB_SHIFT),
+	VFD_LINK_SPEED_1GB	= BIT(VFD_LINK_SPEED_1GB_SHIFT),
+	VFD_LINK_SPEED_2_5GB	= BIT(VFD_LINK_SPEED_2_5GB_SHIFT),
+	VFD_LINK_SPEED_5GB	= BIT(VFD_LINK_SPEED_5GB_SHIFT),
+	VFD_LINK_SPEED_10GB	= BIT(VFD_LINK_SPEED_10GB_SHIFT),
+	VFD_LINK_SPEED_40GB	= BIT(VFD_LINK_SPEED_40GB_SHIFT),
+	VFD_LINK_SPEED_20GB	= BIT(VFD_LINK_SPEED_20GB_SHIFT),
+	VFD_LINK_SPEED_25GB	= BIT(VFD_LINK_SPEED_25GB_SHIFT),
+};
+
+struct vfd_ops {
+	int (*get_trunk)(struct pci_dev *pdev, int vf_id, unsigned long *buff);
+	int (*set_trunk)(struct pci_dev *pdev, int vf_id,
+			 const unsigned long *buff);
+	int (*get_vlan_mirror)(struct pci_dev *pdev, int vf_id,
+			       unsigned long *buff);
+	int (*set_vlan_mirror)(struct pci_dev *pdev, int vf_id,
+			       const unsigned long *buff);
+	int (*get_egress_mirror)(struct pci_dev *pdev, int vf_id, int *data);
+	int (*set_egress_mirror)(struct pci_dev *pdev, int vf_id,
+				 const int data);
+	int (*get_ingress_mirror)(struct pci_dev *pdev, int vf_id, int *data);
+	int (*set_ingress_mirror)(struct pci_dev *pdev, int vf_id,
+				  const int data);
+	int (*get_mac_anti_spoof)(struct pci_dev *pdev, int vf_id, bool *data);
+	int (*set_mac_anti_spoof)(struct pci_dev *pdev, int vf_id,
+				  const bool data);
+	int (*get_vlan_anti_spoof)(struct pci_dev *pdev, int vf_id, bool *data);
+	int (*set_vlan_anti_spoof)(struct pci_dev *pdev, int vf_id,
+				   const bool data);
+	int (*get_allow_untagged)(struct pci_dev *pdev, int vf_id, bool *data);
+	int (*set_allow_untagged)(struct pci_dev *pdev, int vf_id,
+				  const bool data);
+	int (*get_loopback)(struct pci_dev *pdev, int vf_id, bool *data);
+	int (*set_loopback)(struct pci_dev *pdev, int vf_id, const bool data);
+	int (*get_mac)(struct pci_dev *pdev, int vf_id, u8 *macaddr);
+	int (*set_mac)(struct pci_dev *pdev, int vf_id, const u8 *macaddr);
+	int (*get_mac_list)(struct pci_dev *pdev, int vf_id,
+			    struct list_head *mac_list);
+	int (*add_macs_to_list)(struct pci_dev *pdev, int vf_id,
+				struct list_head *mac_list);
+	int (*rem_macs_from_list)(struct pci_dev *pdev, int vf_id,
+				  struct list_head *mac_list);
+	int (*get_promisc)(struct pci_dev *pdev, int vf_id, u8 *data);
+	int (*set_promisc)(struct pci_dev *pdev, int vf_id, const u8 data);
+	int (*get_vlan_strip)(struct pci_dev *pdev, int vf_id, bool *data);
+	int (*set_vlan_strip)(struct pci_dev *pdev, int vf_id, const bool data);
+	int (*get_link_state)(struct pci_dev *pdev, int vf_id, bool *enabled,
+			      enum vfd_link_speed *link_speed);
+	int (*set_link_state)(struct pci_dev *pdev, int vf_id, const u8 data);
+	int (*get_max_tx_rate)(struct pci_dev *pdev, int vf_id,
+			       unsigned int *max_tx_rate);
+	int (*set_max_tx_rate)(struct pci_dev *pdev, int vf_id,
+			       unsigned int *max_tx_rate);
+	int (*get_min_tx_rate)(struct kobject *,
+			       struct kobj_attribute *, char *);
+	int (*set_min_tx_rate)(struct kobject *, struct kobj_attribute *,
+			       const char *, size_t);
+	int (*get_spoofcheck)(struct kobject *,
+			      struct kobj_attribute *, char *);
+	int (*set_spoofcheck)(struct kobject *, struct kobj_attribute *,
+			      const char *, size_t);
+	int (*get_trust)(struct kobject *,
+			 struct kobj_attribute *, char *);
+	int (*set_trust)(struct kobject *, struct kobj_attribute *,
+			 const char *, size_t);
+	int (*get_vf_enable)(struct pci_dev *pdev, int vf_id, bool *data);
+	int (*set_vf_enable)(struct pci_dev *pdev, int vf_id, const bool data);
+	int (*get_rx_bytes)  (struct pci_dev *pdev, int vf_id, u64 *data);
+	int (*get_rx_dropped)(struct pci_dev *pdev, int vf_id, u64 *data);
+	int (*get_rx_packets)(struct pci_dev *pdev, int vf_id, u64 *data);
+	int (*get_tx_bytes)  (struct pci_dev *pdev, int vf_id, u64 *data);
+	int (*get_tx_dropped)(struct pci_dev *pdev, int vf_id, u64 *data);
+	int (*get_tx_packets)(struct pci_dev *pdev, int vf_id, u64 *data);
+	int (*get_tx_spoofed)(struct pci_dev *pdev, int vf_id, u64 *data);
+	int (*get_tx_errors)(struct pci_dev *pdev, int vf_id, u64 *data);
+	int (*reset_stats)(struct pci_dev *pdev, int vf_id);
+	int (*set_vf_bw_share)(struct pci_dev *pdev, int vf_id, u8 bw_share);
+	int (*get_vf_bw_share)(struct pci_dev *pdev, int vf_id, u8 *bw_share);
+	int (*set_pf_qos_apply)(struct pci_dev *pdev);
+	int (*get_pf_ingress_mirror)(struct pci_dev *pdev, int *data);
+	int (*set_pf_ingress_mirror)(struct pci_dev *pdev, const int data);
+	int (*get_pf_egress_mirror)(struct pci_dev *pdev, int *data);
+	int (*set_pf_egress_mirror)(struct pci_dev *pdev, const int data);
+	int (*get_pf_tpid)(struct pci_dev *pdev, u16 *data);
+	int (*set_pf_tpid)(struct pci_dev *pdev, const u16 data);
+	int (*get_num_queues)(struct pci_dev *pdev, int vf_id, int *num_queues);
+	int (*set_num_queues)(struct pci_dev *pdev, int vf_id, const int num_queues);
+	int (*get_trust_state)(struct pci_dev *pdev, int vf_id, bool *data);
+	int (*set_trust_state)(struct pci_dev *pdev, int vf_id, bool data);
+};
+
+extern const struct vfd_ops *vfd_ops;
+
+#endif /* _KCOMPAT_VFD_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/virtchnl.h b/drivers/net/ethernet/intel/i40e/virtchnl.h
new file mode 100644
index 000000000..c1b1ab3b7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/virtchnl.h
@@ -0,0 +1,949 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2020 Intel Corporation. */
+
+#ifndef _VIRTCHNL_H_
+#define _VIRTCHNL_H_
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the drivers for all devices starting from our 40G product line
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * The Firmware copies the cookie fields when sending messages between the
+ * PF and VF, but uses all other fields internally. Due to this limitation,
+ * we must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the VSI indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI.  Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value
+ * is of status_code type, defined in the shared type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of
+ * these opcodes. The VF driver must first validate the API version of the
+ * PF driver, then request a reset, then get resources, then configure
+ * queues and interrupts. After these operations are complete, the VF
+ * driver may start its queues, optionally add MAC and VLAN filters, and
+ * process traffic.
+ */
+
+/* START GENERIC DEFINES
+ * Need to ensure the following enums and defines hold the same meaning and
+ * value in current and future projects
+ */
+
+/* Error Codes */
+enum virtchnl_status_code {
+	VIRTCHNL_STATUS_SUCCESS				= 0,
+	VIRTCHNL_STATUS_ERR_PARAM			= -5,
+	VIRTCHNL_STATUS_ERR_NO_MEMORY			= -18,
+	VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH		= -38,
+	VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR		= -39,
+	VIRTCHNL_STATUS_ERR_INVALID_VF_ID		= -40,
+	VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR		= -53,
+	VIRTCHNL_STATUS_ERR_NOT_SUPPORTED		= -64,
+};
+
+/* Backward compatibility */
+#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
+#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
+
+#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT		0x0
+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT		0x1
+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT	0x2
+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT		0x3
+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT		0x4
+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT		0x5
+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT		0x6
+#define VIRTCHNL_LINK_SPEED_5GB_SHIFT		0x7
+
+enum virtchnl_link_speed {
+	VIRTCHNL_LINK_SPEED_UNKNOWN	= 0,
+	VIRTCHNL_LINK_SPEED_100MB	= BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
+	VIRTCHNL_LINK_SPEED_1GB		= BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
+	VIRTCHNL_LINK_SPEED_10GB	= BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
+	VIRTCHNL_LINK_SPEED_40GB	= BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
+	VIRTCHNL_LINK_SPEED_20GB	= BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
+	VIRTCHNL_LINK_SPEED_25GB	= BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+	VIRTCHNL_LINK_SPEED_2_5GB	= BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
+	VIRTCHNL_LINK_SPEED_5GB		= BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
+};
+
+/* for hsplit_0 field of Rx HMC context */
+/* deprecated with AVF 1.0 */
+enum virtchnl_rx_hsplit {
+	VIRTCHNL_RX_HSPLIT_NO_SPLIT      = 0,
+	VIRTCHNL_RX_HSPLIT_SPLIT_L2      = 1,
+	VIRTCHNL_RX_HSPLIT_SPLIT_IP      = 2,
+	VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
+	VIRTCHNL_RX_HSPLIT_SPLIT_SCTP    = 8,
+};
+
+/* END GENERIC DEFINES */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ * Use of "advanced opcode" features must be negotiated as part of capabilities
+ * exchange and are not considered part of base mode feature set.
+ */
+	VIRTCHNL_OP_UNKNOWN = 0,
+	VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+	VIRTCHNL_OP_RESET_VF = 2,
+	VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+	VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+	VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+	VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+	VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+	VIRTCHNL_OP_ENABLE_QUEUES = 8,
+	VIRTCHNL_OP_DISABLE_QUEUES = 9,
+	VIRTCHNL_OP_ADD_ETH_ADDR = 10,
+	VIRTCHNL_OP_DEL_ETH_ADDR = 11,
+	VIRTCHNL_OP_ADD_VLAN = 12,
+	VIRTCHNL_OP_DEL_VLAN = 13,
+	VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+	VIRTCHNL_OP_GET_STATS = 15,
+	VIRTCHNL_OP_RSVD = 16,
+	VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+	/* opcode 19 is reserved */
+	VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+	VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+	VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+	VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+	VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+	VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+	VIRTCHNL_OP_SET_RSS_HENA = 26,
+	VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+	VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
+	VIRTCHNL_OP_REQUEST_QUEUES = 29,
+	VIRTCHNL_OP_ENABLE_CHANNELS = 30,
+	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
+	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
+	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
+	/* opcodes 34, 35, 36, 37 and 38 are reserved */
+};
+
+/* These macros are used to generate compilation errors if a structure/union
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure/union is not of the correct size, otherwise it creates an enum
+ * that is never used.
+ */
+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
+	{ virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
+	{ virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct virtchnl_msg {
+	u8 pad[8];			 /* AQ flags/opcode/len/retval fields */
+	enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+	enum virtchnl_status_code v_retval;  /* ditto for desc->retval */
+	u32 vfid;			 /* used by PF when sending to VF */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
+
+/* Message descriptions and data structures. */
+
+/* VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define VIRTCHNL_VERSION_MAJOR		1
+#define VIRTCHNL_VERSION_MINOR		1
+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS	0
+
+struct virtchnl_version_info {
+	u32 major;
+	u32 minor;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
+
+#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
+
+/* VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
+ * vsi_type should always be 6 for backward compatibility. Add other fields
+ * as needed.
+ */
+enum virtchnl_vsi_type {
+	VIRTCHNL_VSI_TYPE_INVALID = 0,
+	VIRTCHNL_VSI_SRIOV = 6,
+};
+
+/* VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * virtchnl_vf_resource and one or more
+ * virtchnl_vsi_resource structures.
+ */
+
+struct virtchnl_vsi_resource {
+	u16 vsi_id;
+	u16 num_queue_pairs;
+	enum virtchnl_vsi_type vsi_type;
+	u16 qset_handle;
+	u8 default_mac_addr[ETH_ALEN];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+
+/* VF capability flags
+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.
+ */
+#define VIRTCHNL_VF_OFFLOAD_L2			0x00000001
+#define VIRTCHNL_VF_OFFLOAD_IWARP		0x00000002
+#define VIRTCHNL_VF_OFFLOAD_RSVD		0x00000004
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ		0x00000008
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG		0x00000010
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR		0x00000020
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		0x00000040
+#define VIRTCHNL_VF_OFFLOAD_CRC			0x00000080
+#define VIRTCHNL_VF_OFFLOAD_VLAN		0x00010000
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING		0x00020000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2	0x00040000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF		0X00080000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP		0X00100000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM		0X00200000
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM	0X00400000
+#define VIRTCHNL_VF_OFFLOAD_ADQ			0X00800000
+#define VIRTCHNL_VF_OFFLOAD_ADQ_V2		0X01000000
+#define VIRTCHNL_VF_OFFLOAD_USO			0X02000000
+	/* 0X80000000 is reserved */
+
+/* Define below the capability flags that are not offloads */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		0x00000080
+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
+			       VIRTCHNL_VF_OFFLOAD_VLAN | \
+			       VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
+struct virtchnl_vf_resource {
+	u16 num_vsis;
+	u16 num_queue_pairs;
+	u16 max_vectors;
+	u16 max_mtu;
+
+	u32 vf_cap_flags;
+	u32 rss_key_size;
+	u32 rss_lut_size;
+
+	struct virtchnl_vsi_resource vsi_res[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+
+/* VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct virtchnl_txq_info {
+	u16 vsi_id;
+	u16 queue_id;
+	u16 ring_len;		/* number of descriptors, multiple of 8 */
+	u16 headwb_enabled; /* deprecated with AVF 1.0 */
+	u64 dma_ring_addr;
+	u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+
+/* VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code. The
+ * crc_disable flag disables CRC stripping on the VF. Setting
+ * the crc_disable flag to 1 will disable CRC stripping for each
+ * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
+ * offload must have been set prior to sending this info or the PF
+ * will ignore the request. This flag should be set the same for
+ * all of the queues for a VF.
+ */
+
+/* Rx queue config info */
+struct virtchnl_rxq_info {
+	u16 vsi_id;
+	u16 queue_id;
+	u32 ring_len;		/* number of descriptors, multiple of 32 */
+	u16 hdr_size;
+	u16 splithdr_enabled; /* deprecated with AVF 1.0 */
+	u32 databuffer_size;
+	u32 max_pkt_size;
+	u8 crc_disable;
+	u8 pad1[3];
+	u64 dma_ring_addr;
+	enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+	u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
+
+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ * NOTE: The VF is not required to configure all queues in a single request.
+ * It may send multiple messages. PF drivers must correctly handle all VF
+ * requests.
+ */
+struct virtchnl_queue_pair_info {
+	/* NOTE: vsi_id and queue_id should be identical for both queues. */
+	struct virtchnl_txq_info txq;
+	struct virtchnl_rxq_info rxq;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
+
+struct virtchnl_vsi_queue_config_info {
+	u16 vsi_id;
+	u16 num_queue_pairs;
+	u32 pad;
+	struct virtchnl_queue_pair_info qpair[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
+/* VIRTCHNL_OP_REQUEST_QUEUES
+ * VF sends this message to request the PF to allocate additional queues to
+ * this VF.  Each VF gets a guaranteed number of queues on init but asking for
+ * additional queues must be negotiated.  This is a best effort request as it
+ * is possible the PF does not have enough queues left to support the request.
+ * If the PF cannot support the number requested it will respond with the
+ * maximum number it is able to support.  If the request is successful, PF will
+ * then reset the VF to institute required changes.
+ */
+
+/* VF resource request */
+struct virtchnl_vf_res_request {
+	u16 num_queue_pairs;
+};
+
+/* VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0. The VF may not request
+ * that vector 0 be used for traffic.
+ * PF configures interrupt mapping and returns status.
+ * NOTE: due to hardware requirements, all active queues (both TX and RX)
+ * should be mapped to interrupts, even if the driver intends to operate
+ * only in polling mode. In this case the interrupt may be disabled, but
+ * the ITR timer will still run to trigger writebacks.
+ */
+struct virtchnl_vector_map {
+	u16 vsi_id;
+	u16 vector_id;
+	u16 rxq_map;
+	u16 txq_map;
+	u16 rxitr_idx;
+	u16 txitr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
+
+struct virtchnl_irq_map_info {
+	u16 num_vectors;
+	struct virtchnl_vector_map vecmap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+
+/* VIRTCHNL_OP_ENABLE_QUEUES
+ * VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ * NOTE: The VF is not required to enable/disable all queues in a single
+ * request. It may send multiple messages.
+ * PF drivers must correctly handle all VF requests.
+ */
+struct virtchnl_queue_select {
+	u16 vsi_id;
+	u16 pad;
+	u32 rx_queues;
+	u32 tx_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
+
+/* VIRTCHNL_OP_ADD_ETH_ADDR
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* VIRTCHNL_OP_DEL_ETH_ADDR
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct virtchnl_ether_addr {
+	u8 addr[ETH_ALEN];
+	u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
+
+struct virtchnl_ether_addr_list {
+	u16 vsi_id;
+	u16 num_elements;
+	struct virtchnl_ether_addr list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+
+/* VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct virtchnl_vlan_filter_list {
+	u16 vsi_id;
+	u16 num_elements;
+	u16 vlan_id[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+
+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct virtchnl_promisc_info {
+	u16 vsi_id;
+	u16 flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
+
+#define FLAG_VF_UNICAST_PROMISC	0x00000001
+#define FLAG_VF_MULTICAST_PROMISC	0x00000002
+
+/* VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct virtchnl_eth_stats in an external buffer.
+ */
+
+struct virtchnl_eth_stats {
+	u64 rx_bytes;			/* received bytes */
+	u64 rx_unicast;			/* received unicast pkts */
+	u64 rx_multicast;		/* received multicast pkts */
+	u64 rx_broadcast;		/* received broadcast pkts */
+	u64 rx_discards;
+	u64 rx_unknown_protocol;
+	u64 tx_bytes;			/* transmitted bytes */
+	u64 tx_unicast;			/* transmitted unicast pkts */
+	u64 tx_multicast;		/* transmitted multicast pkts */
+	u64 tx_broadcast;		/* transmitted broadcast pkts */
+	u64 tx_discards;
+	u64 tx_errors;
+};
+
+/* VIRTCHNL_OP_CONFIG_RSS_KEY
+ * VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct virtchnl_rss_key {
+	u16 vsi_id;
+	u16 key_len;
+	u8 key[1];         /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+
+struct virtchnl_rss_lut {
+	u16 vsi_id;
+	u16 lut_entries;
+	u8 lut[1];        /* RSS lookup table */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+
+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ */
+struct virtchnl_rss_hena {
+	u64 hena;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+
+/* This is used by PF driver to enforce how many channels can be supported.
+ * When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise
+ * PF driver will allow only max 4 channels
+ */
+#define VIRTCHNL_MAX_ADQ_CHANNELS 4
+#define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16
+
+/* VIRTCHNL_OP_ENABLE_CHANNELS
+ * VIRTCHNL_OP_DISABLE_CHANNELS
+ * VF sends these messages to enable or disable channels based on
+ * the user specified queue count and queue offset for each traffic class.
+ * This struct encompasses all the information that the PF needs from
+ * VF to create a channel.
+ */
+struct virtchnl_channel_info {
+	u16 count; /* number of queues in a channel */
+	u16 offset; /* queues in a channel start from 'offset' */
+	u32 pad;
+	u64 max_tx_rate;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
+
+struct virtchnl_tc_info {
+	u32	num_tc;
+	u32	pad;
+	struct	virtchnl_channel_info list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
+
+/* VIRTCHNL_ADD_CLOUD_FILTER
+ * VIRTCHNL_DEL_CLOUD_FILTER
+ * VF sends these messages to add or delete a cloud filter based on the
+ * user specified match and action filters. These structures encompass
+ * all the information that the PF needs from the VF to add/delete a
+ * cloud filter.
+ */
+
+struct virtchnl_l4_spec {
+	u8	src_mac[ETH_ALEN];
+	u8	dst_mac[ETH_ALEN];
+	/* vlan_prio is part of this 16 bit field even from OS perspective
+	 * vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio
+	 * in future, when decided to offload vlan_prio, pass that information
+	 * as part of the "vlan_id" field, Bit14..12
+	 */
+	__be16	vlan_id;
+	__be16	pad; /* reserved for future use */
+	__be32	src_ip[4];
+	__be32	dst_ip[4];
+	__be16	src_port;
+	__be16	dst_port;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
+
+union virtchnl_flow_spec {
+	struct	virtchnl_l4_spec tcp_spec;
+	u8	buffer[128]; /* reserved for future use */
+};
+
+VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
+
+enum virtchnl_action {
+	/* action types */
+	VIRTCHNL_ACTION_DROP = 0,
+	VIRTCHNL_ACTION_TC_REDIRECT,
+};
+
+enum virtchnl_flow_type {
+	/* flow types */
+	VIRTCHNL_TCP_V4_FLOW = 0,
+	VIRTCHNL_TCP_V6_FLOW,
+	VIRTCHNL_UDP_V4_FLOW,
+	VIRTCHNL_UDP_V6_FLOW,
+};
+
+struct virtchnl_filter {
+	union	virtchnl_flow_spec data;
+	union	virtchnl_flow_spec mask;
+	enum	virtchnl_flow_type flow_type;
+	enum	virtchnl_action action;
+	u32	action_meta;
+	u8	field_flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
+
+/* VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum virtchnl_event_codes {
+	VIRTCHNL_EVENT_UNKNOWN = 0,
+	VIRTCHNL_EVENT_LINK_CHANGE,
+	VIRTCHNL_EVENT_RESET_IMPENDING,
+	VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+
+#define PF_EVENT_SEVERITY_INFO		0
+#define PF_EVENT_SEVERITY_CERTAIN_DOOM	255
+
+struct virtchnl_pf_event {
+	enum virtchnl_event_codes event;
+	union {
+		/* If the PF driver does not support the new speed reporting
+		 * capabilities then use link_event else use link_event_adv to
+		 * get the speed and link information. The ability to understand
+		 * new speeds is indicated by setting the capability flag
+		 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
+		 * in virtchnl_vf_resource struct and can be used to determine
+		 * which link event struct to use below.
+		 */
+		struct {
+			enum virtchnl_link_speed link_speed;
+			u8 link_status;
+		} link_event;
+		struct {
+			/* link_speed provided in Mbps */
+			u32 link_speed;
+			u8 link_status;
+		} link_event_adv;
+	} event_data;
+
+	int severity;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
+
+/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+struct virtchnl_iwarp_qv_info {
+	u32 v_idx; /* msix_vector */
+	u16 ceq_idx;
+	u16 aeq_idx;
+	u8 itr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+
+struct virtchnl_iwarp_qvlist_info {
+	u32 num_vectors;
+	struct virtchnl_iwarp_qv_info qv_info[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+
+/* Since VF messages are limited by u16 size, precalculate the maximum possible
+ * values of nested elements in virtchnl structures that virtual channel can
+ * possibly handle in a single message.
+ */
+enum virtchnl_vector_limits {
+	VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX	=
+		((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) /
+		sizeof(struct virtchnl_queue_pair_info),
+
+	VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX		=
+		((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) /
+		sizeof(struct virtchnl_vector_map),
+
+	VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX	=
+		((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) /
+		sizeof(struct virtchnl_ether_addr),
+
+	VIRTCHNL_OP_ADD_DEL_VLAN_MAX		=
+		((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) /
+		sizeof(u16),
+
+	VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP_MAX	=
+		((u16)(~0) - sizeof(struct virtchnl_iwarp_qvlist_info)) /
+		sizeof(struct virtchnl_iwarp_qv_info),
+
+	VIRTCHNL_OP_ENABLE_CHANNELS_MAX		=
+		((u16)(~0) - sizeof(struct virtchnl_tc_info)) /
+		sizeof(struct virtchnl_channel_info),
+};
+
+/* VF reset states - these are written into the RSTAT register:
+ * VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum virtchnl_vfr_states {
+	VIRTCHNL_VFR_INPROGRESS = 0,
+	VIRTCHNL_VFR_COMPLETED,
+	VIRTCHNL_VFR_VFACTIVE,
+};
+
+/**
+ * virtchnl_vc_validate_vf_msg
+ * @ver: Virtchnl version info
+ * @v_opcode: Opcode for the message
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * validate msg format against struct for each opcode
+ */
+static inline int
+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+			    u8 *msg, u16 msglen)
+{
+	bool err_msg_format = false;
+	int valid_len = 0;
+
+	/* Validate message length. */
+	switch (v_opcode) {
+	case VIRTCHNL_OP_VERSION:
+		valid_len = sizeof(struct virtchnl_version_info);
+		break;
+	case VIRTCHNL_OP_RESET_VF:
+		break;
+	case VIRTCHNL_OP_GET_VF_RESOURCES:
+		if (VF_IS_V11(ver))
+			valid_len = sizeof(u32);
+		break;
+	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+		valid_len = sizeof(struct virtchnl_txq_info);
+		break;
+	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+		valid_len = sizeof(struct virtchnl_rxq_info);
+		break;
+	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+		valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+		if (msglen >= valid_len) {
+			struct virtchnl_vsi_queue_config_info *vqc =
+			    (struct virtchnl_vsi_queue_config_info *)msg;
+
+			if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs >
+			    VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) {
+				err_msg_format = true;
+				break;
+			}
+
+			valid_len += (vqc->num_queue_pairs *
+				      sizeof(struct
+					     virtchnl_queue_pair_info));
+		}
+		break;
+	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+		valid_len = sizeof(struct virtchnl_irq_map_info);
+		if (msglen >= valid_len) {
+			struct virtchnl_irq_map_info *vimi =
+			    (struct virtchnl_irq_map_info *)msg;
+
+			if (vimi->num_vectors == 0 || vimi->num_vectors >
+			    VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) {
+				err_msg_format = true;
+				break;
+			}
+
+			valid_len += (vimi->num_vectors *
+				      sizeof(struct virtchnl_vector_map));
+		}
+		break;
+	case VIRTCHNL_OP_ENABLE_QUEUES:
+	case VIRTCHNL_OP_DISABLE_QUEUES:
+		valid_len = sizeof(struct virtchnl_queue_select);
+		break;
+	case VIRTCHNL_OP_ADD_ETH_ADDR:
+	case VIRTCHNL_OP_DEL_ETH_ADDR:
+		valid_len = sizeof(struct virtchnl_ether_addr_list);
+		if (msglen >= valid_len) {
+			struct virtchnl_ether_addr_list *veal =
+			    (struct virtchnl_ether_addr_list *)msg;
+
+			if (veal->num_elements == 0 || veal->num_elements >
+			    VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) {
+				err_msg_format = true;
+				break;
+			}
+
+			valid_len += veal->num_elements *
+			    sizeof(struct virtchnl_ether_addr);
+		}
+		break;
+	case VIRTCHNL_OP_ADD_VLAN:
+	case VIRTCHNL_OP_DEL_VLAN:
+		valid_len = sizeof(struct virtchnl_vlan_filter_list);
+		if (msglen >= valid_len) {
+			struct virtchnl_vlan_filter_list *vfl =
+			    (struct virtchnl_vlan_filter_list *)msg;
+
+			if (vfl->num_elements == 0 || vfl->num_elements >
+			    VIRTCHNL_OP_ADD_DEL_VLAN_MAX) {
+				err_msg_format = true;
+				break;
+			}
+
+			valid_len += vfl->num_elements * sizeof(u16);
+		}
+		break;
+	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+		valid_len = sizeof(struct virtchnl_promisc_info);
+		break;
+	case VIRTCHNL_OP_GET_STATS:
+		valid_len = sizeof(struct virtchnl_queue_select);
+		break;
+	case VIRTCHNL_OP_IWARP:
+		/* These messages are opaque to us and will be validated in
+		 * the RDMA client code. We just need to check for nonzero
+		 * length. The firmware will enforce max length restrictions.
+		 */
+		if (msglen)
+			valid_len = msglen;
+		else
+			err_msg_format = true;
+		break;
+	case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+		break;
+	case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+		valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+		if (msglen >= valid_len) {
+			struct virtchnl_iwarp_qvlist_info *qv =
+				(struct virtchnl_iwarp_qvlist_info *)msg;
+
+			if (qv->num_vectors == 0 || qv->num_vectors >
+			    VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP_MAX) {
+				err_msg_format = true;
+				break;
+			}
+
+			valid_len += ((qv->num_vectors - 1) *
+				sizeof(struct virtchnl_iwarp_qv_info));
+		}
+		break;
+	case VIRTCHNL_OP_CONFIG_RSS_KEY:
+		valid_len = sizeof(struct virtchnl_rss_key);
+		if (msglen >= valid_len) {
+			struct virtchnl_rss_key *vrk =
+				(struct virtchnl_rss_key *)msg;
+
+			if (vrk->key_len == 0) {
+				/* zero length is allowed as input */
+				break;
+			}
+
+			valid_len += vrk->key_len - 1;
+		}
+		break;
+	case VIRTCHNL_OP_CONFIG_RSS_LUT:
+		valid_len = sizeof(struct virtchnl_rss_lut);
+		if (msglen >= valid_len) {
+			struct virtchnl_rss_lut *vrl =
+				(struct virtchnl_rss_lut *)msg;
+
+			if (vrl->lut_entries == 0) {
+				/* zero entries is allowed as input */
+				break;
+			}
+
+			valid_len += vrl->lut_entries - 1;
+		}
+		break;
+	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+		break;
+	case VIRTCHNL_OP_SET_RSS_HENA:
+		valid_len = sizeof(struct virtchnl_rss_hena);
+		break;
+	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+		break;
+	case VIRTCHNL_OP_REQUEST_QUEUES:
+		valid_len = sizeof(struct virtchnl_vf_res_request);
+		break;
+	case VIRTCHNL_OP_ENABLE_CHANNELS:
+		valid_len = sizeof(struct virtchnl_tc_info);
+		if (msglen >= valid_len) {
+			struct virtchnl_tc_info *vti =
+				(struct virtchnl_tc_info *)msg;
+
+			if (vti->num_tc == 0 || vti->num_tc >
+			    VIRTCHNL_OP_ENABLE_CHANNELS_MAX) {
+				err_msg_format = true;
+				break;
+			}
+
+			valid_len += (vti->num_tc - 1) *
+				     sizeof(struct virtchnl_channel_info);
+		}
+		break;
+	case VIRTCHNL_OP_DISABLE_CHANNELS:
+		break;
+	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+		valid_len = sizeof(struct virtchnl_filter);
+		break;
+	/* These are always errors coming from the VF. */
+	case VIRTCHNL_OP_EVENT:
+	case VIRTCHNL_OP_UNKNOWN:
+	default:
+		return VIRTCHNL_STATUS_ERR_PARAM;
+	}
+	/* few more checks */
+	if (err_msg_format || valid_len != msglen)
+		return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
+
+	return 0;
+}
+#endif /* _VIRTCHNL_H_ */
-- 
2.20.1